diff --git a/docs/.vuepress/config.js b/docs/.vuepress/config.js index 12958e86251246600f73ca4e873d39726374efa2..dae01cbb8c1b33e47b1dc4ac3d329b200e21aee5 100644 --- a/docs/.vuepress/config.js +++ b/docs/.vuepress/config.js @@ -78,11 +78,11 @@ module.exports = { `] ], locales: { - // '/en/': { - // lang: 'en-US', - // title: 'Spring Docs', - // description: '' - // }, + '/en/': { + lang: 'en-US', + title: 'Spring Docs', + description: '' + }, "/": { lang: "zh-CN", title: "中文文档", @@ -122,237 +122,626 @@ module.exports = { sidebarDepth: 3, activeHeaderLinks: true, locales: { - // '/en/': { - // // 多语言下拉菜单的标题 - // selectText: 'Language', - // // 该语言在下拉菜单中的标签 - // label: 'English', - // lastUpdated: 'Last Updated', - // lang: 'en-US', - // // 编辑链接文字 - // editLinkText: 'Edit this page on GitCode', - // algolia: {}, - // nav: [ - // { - // text: 'Spring', - // link: '/en/' - // }, - // { - // text: 'Spring Boot', - // link: '/en/spring-boot/' - // }, - // { - // text: 'Spring Framework', - // link: '/en/spring-framework/' - // }, - // { - // text: 'Spring Data', - // link: '/en/spring-data/' - // }, - // { - // text: 'Spring Cloud', - // link: '/en/spring-cloud/' - // }, - // { - // text: 'More', - // ariaLabel: 'Others', - // items: [ - // { text: 'Spring Cloud Data Flow', link: '/en/spring-cloud-data-flow/'}, - // { text: 'Spring Security', link: '/en/spring-security/'}, - // { text: 'Spring for GraphQL', link: '/en/spring-for-graphql/'}, - // { text: 'Spring Session', link: '/en/spring-session/'}, - // { text: 'Spring Integration', link: '/en/spring-integration/'}, - // { text: 'Spring HATEOAS', link: '/en/spring-hateoas/'}, - // { text: 'Spring REST Docs', link: '/en/spring-rest-docs/'}, - // { text: 'Spring Batch', link: '/en/spring-batch/'}, - // { text: 'Spring AMQP', link: '/en/spring-amqp/'}, - // { text: 'Spring CredHub', link: '/en/spring-credhub/'}, - // { text: 'Spring Flo', link: '/en/spring-flo/'}, - // { text: 'Spring for Apache Kafka', link: '/en/spring-for-apache-kafka/'}, - // { text: 'Spring LDAP', link: '/en/spring-ldap/'}, - // { text: 'Spring Shell', link: '/en/spring-shell/'}, - // { text: 'Spring Statemachine', link: '/en/spring-statemachine/'}, - // { text: 'Spring Vault', link: '/en/spring-vault/'}, - // { text: 'Spring Web Flow', link: '/en/spring-web-flow/'}, - // { text: 'Spring Web Services', link: '/en/spring-web-services/'} - // ] - // } - // ], - // sidebar: { - // '/en/spring-boot/': [ - // { - // title: 'Spring Boot', - // sidebarDepth: 2, - // collapsable: false, - // children: [ - // "/en/spring-boot/getting-help.md", - // "/en/spring-boot/documentation.md", - // "/en/spring-boot/getting-started.md", - // "/en/spring-boot/upgrading.md", - // "/en/spring-boot/using.md", - // "/en/spring-boot/features.md", - // "/en/spring-boot/web.md", - // "/en/spring-boot/data.md", - // "/en/spring-boot/io.md", - // "/en/spring-boot/messaging.md", - // "/en/spring-boot/container-images.md", - // "/en/spring-boot/actuator.md", - // "/en/spring-boot/deployment.md", - // "/en/spring-boot/cli.md", - // "/en/spring-boot/build-tool-plugins.md", - // "/en/spring-boot/howto.md" - // ], - // initialOpenGroupIndex: 0 // 可选的, 默认值是 0 - // } - // ], - // '/en/spring-framework/': [ - // { - // title: 'Spring Framework', - // sidebarDepth: 2, - // collapsable: false, - // children: [ - // "/en/spring-framework/overview.md", - // "/en/spring-framework/core.md", - // "/en/spring-framework/testing.md", - // "/en/spring-framework/data-access.md", - // "/en/spring-framework/web-servlet.md", - // "/en/spring-framework/web-reactive.md", - // "/en/spring-framework/integration.md", - // "/en/spring-framework/languages.md" - // ], - // initialOpenGroupIndex: 0 // 可选的, 默认值是 0 - // } - // ], - // '/en/spring-data/': [ - // { - // title: 'Spring Data', - // sidebarDepth: 2, - // collapsable: false, - // children: [ - // "/en/spring-data/spring-data.md" - // ], - // initialOpenGroupIndex: 0 // 可选的, 默认值是 0 - // } - // ], - // '/en/spring-cloud/': [ - // { - // title: 'Spring Cloud', - // sidebarDepth: 2, - // collapsable: false, - // children: [ - // "/en/spring-cloud/documentation-overview.md", - // "/en/spring-cloud/spring-cloud-build.md", - // "/en/spring-cloud/spring-cloud-bus.md", - // "/en/spring-cloud/spring-cloud-circuitbreaker.md", - // "/en/spring-cloud/spring-cloud-cli.md", - // "/en/spring-cloud/spring-cloud-cloudfoundry.md", - // "/en/spring-cloud/spring-cloud-commons.md", - // "/en/spring-cloud/spring-cloud-config.md", - // "/en/spring-cloud/spring-cloud-consul.md", - // "/en/spring-cloud/spring-cloud-contract.md", - // "/en/spring-cloud/spring-cloud-function.md", - // "/en/spring-cloud/spring-cloud-gateway.md", - // "/en/spring-cloud/spring-cloud-kubernetes.md", - // "/en/spring-cloud/spring-cloud-netflix.md", - // "/en/spring-cloud/spring-cloud-openfeign.md", - // "/en/spring-cloud/spring-cloud-sleuth.md", - // "/en/spring-cloud/spring-cloud-stream.md", - // "/en/spring-cloud/spring-cloud-task.md", - // "/en/spring-cloud/spring-cloud-vault.md", - // "/en/spring-cloud/spring-cloud-zookeeper.md", - // ], - // initialOpenGroupIndex: 0 // 可选的, 默认值是 0 - // } - // ], - // '/en/spring-cloud-data-flow/': [ - // { - // title: 'Spring Cloud Data Flow', - // sidebarDepth: 2, - // collapsable: false, - // children: [ - // "/en/spring-cloud-data-flow/spring-cloud-dataflow.md", - // ], - // initialOpenGroupIndex: 0 // 可选的, 默认值是 0 - // } - // ], - // '/en/spring-security/': [ - // { - // title: 'Spring Security', - // sidebarDepth: 2, - // collapsable: false, - // children: [ - // "/en/spring-security/index.md", - // "/en/spring-security/prerequisites.md", - // "/en/spring-security/community.md", - // "/en/spring-security/whats-new.md", - // "/en/spring-security/getting-spring-security.md", - // "/en/spring-security/features.md", - // "/en/spring-security/modules.md", - // "/en/spring-security/samples.md", - // "/en/spring-security/servlet.md", - // "/en/spring-security/reactive.md" - // ], - // initialOpenGroupIndex: 0 // 可选的, 默认值是 0 - // } - // ], + '/en/': { + // 多语言下拉菜单的标题 + selectText: 'Language', + // 该语言在下拉菜单中的标签 + label: 'English', + lastUpdated: 'Last Updated', + lang: 'en-US', + // 编辑链接文字 + editLinkText: 'Edit this page on GitCode', + algolia: {}, + nav: [ + { + text: "Spring", + link: '/en/spring/why-spring.html' + }, + { + text: "Spring Boot", + link: '/en/spring-boot/getting-help.html' + }, + { + text: "Spring Framework", + link: '/en/spring-framework/overview.html' + }, + { + text: "Spring Data", + link: '/en/spring-data/spring-data.html' + }, + { + text: "Spring Cloud", + link: '/en/spring-cloud/documentation-overview.html' + }, + { + text: "more", + ariaLabel: "Others", + items: [ + { text: 'Spring Cloud Data Flow', link: '/en/spring-cloud-data-flow/spring-cloud-dataflow.html' }, + { text: 'Spring Security', link: '/en/spring-security/overview.html' }, + { text: 'Spring for GraphQL', link: '/en/spring-for-graphql/spring-graphql.html' }, + { text: 'Spring Session', link: '/en/spring-session/_index.html' }, + { text: 'Spring Integration', link: '/en/spring-integration/preface.html' }, + { text: 'Spring HATEOAS', link: '/en/spring-hateoas/spring-hateoas.html' }, + { text: 'Spring REST Docs', link: '/en/spring-rest-docs/spring-restdocs.html' }, + { text: 'Spring Batch', link: '/en/spring-batch/spring-batch-intro.html' }, + { text: 'Spring AMQP', link: '/en/spring-amqp/spring-amqp.html' }, + { text: 'Spring CredHub', link: '/en/spring-credhub/spring-credhub.html' }, + { text: 'Spring Flo', link: '/en/spring-flo/spring-flo.html' }, + { text: 'Spring for Apache Kafka', link: '/en/spring-for-apache-kafka/spring-kafka.html' }, + { text: 'Spring LDAP', link: '/en/spring-ldap/spring-ldap.html' }, + { text: 'Spring Shell', link: '/en/spring-shell/spring-shell.html' }, + { text: 'Spring Statemachine', link: '/en/spring-statemachine/spring-statemachine.html' }, + { text: 'Spring Vault', link: '/en/spring-vault/spring-vault.html' }, + { text: 'Spring Web Flow', link: '/en/spring-web-flow/preface.html' }, + { text: 'Spring Web Services', link: '/en/spring-web-services/spring-web-service.html' } + ] + } + ], + sidebar: { + '/en/spring-boot/': [ + { + title: 'Spring Boot', + sidebarDepth: 2, + collapsable: false, + children: [ + "getting-help.md", + "documentation.md", + "getting-started.md", + "upgrading.md", + "using.md", + "features.md", + "web.md", + "data.md", + "io.md", + "messaging.md", + "container-images.md", + "actuator.md", + "deployment.md", + "cli.md", + "build-tool-plugins.md", + "howto.md" + ], + initialOpenGroupIndex: 0 + } + ], + '/en/spring-framework/': [ + { + title: 'Spring Framework', + sidebarDepth: 2, + collapsable: false, + children: [ + "overview.md", + "core.md", + "testing.md", + "data-access.md", + "web-servlet.md", + "web-reactive.md", + "integration.md", + "languages.md" + ], + initialOpenGroupIndex: 0 + } + ], + '/en/spring-data/': [ + { + title: 'Spring Data', + sidebarDepth: 2, + collapsable: false, + children: [ + "spring-data.md" + ], + initialOpenGroupIndex: 0 + } + ], + '/en/spring-cloud/': [ + { + title: 'Spring Cloud', + sidebarDepth: 2, + collapsable: false, + children: [ + "documentation-overview.md", + "spring-cloud-build.md", + "spring-cloud-bus.md", + "spring-cloud-circuitbreaker.md", + "spring-cloud-cli.md", + "spring-cloud-cloudfoundry.md", + "spring-cloud-commons.md", + "spring-cloud-config.md", + "spring-cloud-consul.md", + "spring-cloud-contract.md", + "spring-cloud-function.md", + "spring-cloud-gateway.md", + "spring-cloud-kubernetes.md", + "spring-cloud-netflix.md", + "spring-cloud-openfeign.md", + "spring-cloud-sleuth.md", + "spring-cloud-stream.md", + "spring-cloud-task.md", + "spring-cloud-vault.md", + "spring-cloud-zookeeper.md" + ], + initialOpenGroupIndex: 0 + } + ], + '/en/spring-cloud-data-flow/': [ + { + title: 'Spring Cloud Data Flow', + sidebarDepth: 2, + collapsable: false, + children: [ + "spring-cloud-dataflow.md", + ], + initialOpenGroupIndex: 0 + } + ], + '/en/spring-security/': [ + { + title: 'Spring Security', + sidebarDepth: 2, + collapsable: false, + children: [ + "overview.md", + "prerequisites.md", + "community.md", + "whats-new.md", + "getting-spring-security.md", + "features.md", + "features-authentication.md", + "features-authentication-password-storage.md", + "features-exploits.md", + "features-exploits-csrf.md", + "features-exploits-headers.md", + "features-exploits-http.md", + "features-integrations.md", + "features-integrations-cryptography.md", + "features-integrations-data.md", + "features-integrations-concurrency.md", + "features-integrations-jackson.md", + "features-integrations-localization.md", + "modules.md", + "samples.md", + "servlet.md", + "servlet-getting-started.md", + "servlet-architecture.md", + "servlet-authentication.md", + "servlet-authentication-architecture.md", + "servlet-authentication-passwords.md", + "servlet-authentication-passwords-input.md", + "servlet-authentication-passwords-form.md", + "servlet-authentication-passwords-basic.md", + "servlet-authentication-passwords-digest.md", + "servlet-authentication-passwords-storage.md", + "servlet-authentication-passwords-storage-in-memory.md", + "servlet-authentication-passwords-storage-jdbc.md", + "servlet-authentication-passwords-storage-user-details.md", + "servlet-authentication-passwords-storage-user-details-service.md", + "servlet-authentication-passwords-storage-password-encoder.md", + "servlet-authentication-passwords-storage-dao-authentication-provider.md", + "servlet-authentication-passwords-storage-ldap.md", + "servlet-authentication-session-management.md", + "servlet-authentication-rememberme.md", + "servlet-authentication-openid.md", + "servlet-authentication-anonymous.md", + "servlet-authentication-preauth.md", + "servlet-authentication-jaas.md", + "servlet-authentication-cas.md", + "servlet-authentication-x509.md", + "servlet-authentication-runas.md", + "servlet-authentication-logout.md", + "servlet-authentication-events.md", + "servlet-authorization-.md", + "servlet-authorization-architecture.md", + "servlet-authorization-authorize-http-requests.md", + "servlet-authorization-authorize-requests.md", + "servlet-authorization-expression-based.md", + "servlet-authorization-secure-objects.md", + "servlet-authorization-method-security.md", + "servlet-authorization-acls.md", + "servlet-oauth2-.md", + "servlet-oauth2-login.md", + "servlet-oauth2-login-core.md", + "servlet-oauth2-login-advanced.md", + "servlet-oauth2-client.md", + "servlet-oauth2-client-core.md", + "servlet-oauth2-client-authorization-grants.md", + "servlet-oauth2-client-client-authentication.md", + "servlet-oauth2-client-authorized-clients.md", + "servlet-oauth2-resource-server.md", + "servlet-oauth2-resource-server-jwt.md", + "servlet-oauth2-resource-server-opaque-token.md", + "servlet-oauth2-resource-server-multitenancy.md", + "servlet-oauth2-resource-server-bearer-tokens.md", + "servlet-saml2.md", + "servlet-saml2-login.md", + "servlet-saml2-login-overview.md", + "servlet-saml2-login-authentication-requests.md", + "servlet-saml2-login-authentication.md", + "servlet-saml2-logout.md", + "servlet-saml2-metadata.md", + "servlet-exploits.md", + "servlet-exploits-csrf.md", + "servlet-exploits-headers.md", + "servlet-exploits-http.md", + "servlet-exploits-firewall.md", + "servlet-integrations.md", + "servlet-integrations-concurrency.md", + "servlet-integrations-jackson.md", + "servlet-integrations-localization.md", + "servlet-integrations-servlet-api.md", + "servlet-integrations-data.md", + "servlet-integrations-mvc.md", + "servlet-integrations-websocket.md", + "servlet-integrations-cors.md", + "servlet-integrations-jsp-taglibs.md", + "servlet-configuration-java.md", + "servlet-configuration-kotlin.md", + "servlet-configuration-xml-namespace.md", + "servlet-test.md", + "servlet-test-method.md", + "servlet-test-mockmvc.md", + "servlet-test-mockmvc-setup.md", + "servlet-test-mockmvc-request-post-processors.md", + "servlet-test-mockmvc-authentication.md", + "servlet-test-mockmvc-csrf.md", + "servlet-test-mockmvc-form-login.md", + "servlet-test-mockmvc-http-basic.md", + "servlet-test-mockmvc-oauth2.md", + "servlet-test-mockmvc-logout.md", + "servlet-test-mockmvc-request-builders.md", + "servlet-test-mockmvc-result-matchers.md", + "servlet-test-mockmvc-result-handlers.md", + "servlet-appendix.md", + "servlet-appendix-database-schema.md", + "servlet-appendix-namespace.md", + "servlet-appendix-namespace-authentication-manager.md", + "servlet-appendix-namespace-http.md", + "servlet-appendix-namespace-method-security.md", + "servlet-appendix-namespace-ldap.md", + "servlet-appendix-namespace-websocket.md", + "servlet-appendix-faq.md", + "reactive.md", + "reactive-getting-started.md", + "reactive-authentication-x509.md", + "reactive-authentication-logout.md", + "reactive-authorization-authorize-http-requests.md", + "reactive-authorization-method.md", + "reactive-oauth2.md", + "reactive-oauth2-login.md", + "reactive-oauth2-login-core.md", + "reactive-oauth2-login-advanced.md", + "reactive-oauth2-client.md", + "reactive-oauth2-client-core.md", + "reactive-oauth2-client-authorization-grants.md", + "reactive-oauth2-client-client-authentication.md", + "reactive-oauth2-client-authorized-clients.md", + "reactive-oauth2-resource-server.md", + "reactive-oauth2-resource-server-jwt.md", + "reactive-oauth2-resource-server-opaque-token.md", + "reactive-oauth2-resource-server-multitenancy.md", + "reactive-oauth2-resource-server-bearer-tokens.md", + "reactive-exploits.md", + "reactive-exploits-csrf.md", + "reactive-exploits-headers.md", + "reactive-exploits-http.md", + "reactive-integrations-cors.md", + "reactive-integrations-rsocket.md", + "reactive-test.md", + "reactive-test-method.md", + "reactive-test-web.md", + "reactive-test-web-setup.md", + "reactive-test-web-authentication.md", + "reactive-test-web-csrf.md", + "reactive-test-web-oauth2.md", + "reactive-configuration-webflux.md" + ], + initialOpenGroupIndex: 0 + } + ], + "/en/spring-for-graphql/": [ + { + title: "Spring For Graphql", + sidebarDepth: 2, + collapsable: false, + children: ["spring-graphql.md"], + initialOpenGroupIndex: 0 + } + ], + "/en/spring-session/": [ + { + title: "Spring Session", + sidebarDepth: 2, + collapsable: false, + children: [ + "_index.md", + "whats-new.md", + "samples.md", + "bootSamples/HttpSession/mongo.md", + "bootSamples/HttpSession/jdbc.md", + "bootSamples/HttpSession/Redis/boot-redis.md", + "bootSamples/boot-findbyusername.md", + "bootSamples/boot-websocket.md", + "webFlux/boot-webflux-custom-cookie.md", + "modules.md", + "http-session.md", + "web-socket.md", + "web-session.md", + "spring-security.md", + "api.md", + "upgrading.md" + ], + initialOpenGroupIndex: 0 + } + ], + "/en/spring-integration/": [ + { + title: "Spring Integration", + sidebarDepth: 2, + collapsable: false, + children: [ + "preface.md", + "whats-new.md", + "overview.md", + "core.md", + "message.md", + "message-routing.md", + "message-transformation.md", + "messaging-endpoints.md", + "dsl.md", + "kotlin-dsl.md", + "system-management.md", + "reactive-streams.md", + "endpoint-summary.md", + "amqp.md", + "event.md", + "feed.md", + "file.md", + "ftp.md", + "gemfire.md", + "http.md", + "jdbc.md", + "jpa.md", + "jms.md", + "jmx.md", + "kafka.md", + "mail.md", + "mongodb.md", + "mqtt.md", + "r2dbc.md", + "redis.md", + "resource.md", + "rmi.md", + "rsocket.md", + "sftp.md", + "stomp.md", + "stream.md", + "syslog.md", + "ip.md", + "webflux.md", + "web-sockets.md", + "ws.md", + "xml.md", + "xmpp.md", + "zeromq.md", + "zookeeper.md", + "error-handling.md", + "spel.md", + "message-publishing.md", + "transactions.md", + "security.md", + "configuration.md", + "testing.md", + "samples.md", + "resources.md", + "history.md" + ], + initialOpenGroupIndex: 0 + } + ], + "/en/spring-hateoas/": [ + { + title: "Spring HATEOAS", + sidebarDepth: 2, + collapsable: false, + children: ["spring-hateoas.md"], + initialOpenGroupIndex: 0 + } + ], + "/en/spring-rest-docs/": [ + { + title: "Spring REST Docs", + sidebarDepth: 2, + collapsable: false, + children: ["spring-restdocs.md"], + initialOpenGroupIndex: 0 + } + ], + "/en/spring-batch/": [ + { + title: "Spring Batch 文档", + sidebarDepth: 2, + collapsable: false, + children: [ + "spring-batch-intro.md", + "whatsnew.md", + "domain.md", + "job.md", + "step.md", + "readersAndWriters.md", + "processor.md", + "scalability.md", + "repeat.md", + "retry.md", + "testing.md", + "common-patterns.md", + "jsr-352.md", + "spring-batch-integration.md", + "monitoring-and-metrics.md", + "appendix.md", + "schema-appendix.md", + "transaction-appendix.md", + "glossary.md" + ], + initialOpenGroupIndex: 0 + } + ], + "/en/spring-amqp/": [ + { + title: "Spring AMQP", + sidebarDepth: 2, + collapsable: false, + children: ["spring-amqp.md"], + initialOpenGroupIndex: 0 + } + ], + "/en/spring-credhub/": [ + { + title: "Spring CredHub", + sidebarDepth: 2, + collapsable: false, + children: ["spring-credhub.md"], + initialOpenGroupIndex: 0 + } + ], + "/en/spring-flo/": [ + { + title: "Spring Flo", + sidebarDepth: 2, + collapsable: false, + children: ["spring-flo.md"], + initialOpenGroupIndex: 0 + } + ], + "/en/spring-for-apache-kafka/": [ + { + title: "Spring for Apache Kafka", + sidebarDepth: 2, + collapsable: false, + children: ["spring-kafka.md"], + initialOpenGroupIndex: 0 + } + ], + "/en/spring-ldap/": [ + { + title: "Spring for Apache Kafka", + sidebarDepth: 2, + collapsable: false, + children: ["spring-ldap.md"], + initialOpenGroupIndex: 0 + } + ], + "/en/spring-shell/": [ + { + title: "Spring Shell", + sidebarDepth: 2, + collapsable: false, + children: ["spring-shell.md"], + initialOpenGroupIndex: 0 + } + ], + "/en/spring-statemachine/": [ + { + title: "Spring Statemachine", + sidebarDepth: 2, + collapsable: false, + children: ["spring-statemachine.md"], + initialOpenGroupIndex: 0 + } + ], + "/en/spring-vault/": [ + { + title: "Spring Vault", + sidebarDepth: 2, + collapsable: false, + children: ["spring-vault.md"], + initialOpenGroupIndex: 0 + } + ], + "/en/spring-web-flow/": [ + { + title: "Spring Web Flow", + sidebarDepth: 2, + collapsable: false, + children: [ + "preface.md", + "introduction.md", + "whatsnew.md", + "defining-flows.md", + "el.md", + "views.md", + "actions.md", + "flow-managed-persistence.md", + "flow-security.md", + "flow-inheritance.md", + "system-setup.md", + "spring-mvc.md", + "spring-js.md", + "spring-faces.md", + "testing.md", + "field-mappings.md" + ], + initialOpenGroupIndex: 0 + } + ], + "/en/spring-web-services/": [ + { + title: "Spring Web Services", + sidebarDepth: 2, + collapsable: false, + children: ["spring-web-service.md"], + initialOpenGroupIndex: 0 + } + ], - // // fallback - // '/en/': - // [{ - // title: 'Spring Doc', // 必要的 - // // path: '/', // 可选的, 标题的跳转链接,应为绝对路径且必须存在 - // collapsable: false, // 可选的, 默认值是 true, - // sidebarDepth: 1, // 可选的, 默认值是 1 - // children: [ - // '' - // ] - // }, - // { - // title: 'INTRO', - // sidebarDepth: 2, - // collapsable: false, - // children: [ - // "/en/why-spring.md", - // "/en/introducing-spring-boot.md", - // "/en/quickstart.md" - // ], - // initialOpenGroupIndex: 0 // 可选的, 默认值是 0 - // }, - // { - // title: 'GUIDE', - // sidebarDepth: 2, - // collapsable: false, - // children: [ - // "/en/system-requirements.md", - // "/en/installing.md", - // "/en/initializr.md" - // ], - // initialOpenGroupIndex: 0 // 可选的, 默认值是 0 - // }, - // { - // title: 'IDE', - // sidebarDepth: 2, - // collapsable: false, - // children: [ - // "/en/vscode_java.md", - // "/en/intellij_idea.md" - // ], - // initialOpenGroupIndex: 0 // 可选的, 默认值是 0 - // }, - // { - // title: 'DEMO', - // sidebarDepth: 2, - // collapsable: false, - // children: [ - // "/en/getting-started_first-application.md", - // "/en/rest-service.md", - // "/en/consuming-rest.md" - // ], - // initialOpenGroupIndex: 0 // 可选的, 默认值是 0 - // } - // ] - // } - // }, + // fallback + '/en/spring/': + [ + { + title: 'INTRO', + sidebarDepth: 2, + collapsable: false, + children: [ + "why-spring.md", + "introducing-spring-boot.md", + "quickstart.md" + ], + initialOpenGroupIndex: 0 + }, + { + title: 'GUIDE', + sidebarDepth: 2, + collapsable: false, + children: [ + "system-requirements.md", + "installing.md", + "initializr.md" + ], + initialOpenGroupIndex: 0 + }, + { + title: 'IDE', + sidebarDepth: 2, + collapsable: false, + children: [ + "vscode_java.md", + "intellij_idea.md" + ], + initialOpenGroupIndex: 0 + }, + { + title: 'DEMO', + sidebarDepth: 2, + collapsable: false, + children: [ + "getting-started_first-application.md", + "rest-service.md", + "consuming-rest.md" + ], + initialOpenGroupIndex: 0 + } + ] + } + }, "/": { selectText: "选择语言", label: "简体中文", @@ -414,24 +803,24 @@ module.exports = { sidebarDepth: 2, collapsable: false, children: [ - "/spring-boot/getting-help.md", - "/spring-boot/documentation.md", - "/spring-boot/getting-started.md", - "/spring-boot/upgrading.md", - "/spring-boot/using.md", - "/spring-boot/features.md", - "/spring-boot/web.md", - "/spring-boot/data.md", - "/spring-boot/io.md", - "/spring-boot/messaging.md", - "/spring-boot/container-images.md", - "/spring-boot/actuator.md", - "/spring-boot/deployment.md", - "/spring-boot/cli.md", - "/spring-boot/build-tool-plugins.md", - "/spring-boot/howto.md" + "getting-help.md", + "documentation.md", + "getting-started.md", + "upgrading.md", + "using.md", + "features.md", + "web.md", + "data.md", + "io.md", + "messaging.md", + "container-images.md", + "actuator.md", + "deployment.md", + "cli.md", + "build-tool-plugins.md", + "howto.md" ], - initialOpenGroupIndex: 0 // 可选的, 默认值是 0 + initialOpenGroupIndex: 0 } ], "/spring-framework/": [ @@ -440,16 +829,16 @@ module.exports = { sidebarDepth: 2, collapsable: false, children: [ - "/spring-framework/overview.md", - "/spring-framework/core.md", - "/spring-framework/testing.md", - "/spring-framework/data-access.md", - "/spring-framework/web-servlet.md", - "/spring-framework/web-reactive.md", - "/spring-framework/integration.md", - "/spring-framework/languages.md" + "overview.md", + "core.md", + "testing.md", + "data-access.md", + "web-servlet.md", + "web-reactive.md", + "integration.md", + "languages.md" ], - initialOpenGroupIndex: 0 // 可选的, 默认值是 0 + initialOpenGroupIndex: 0 } ], "/spring-data/": [ @@ -458,9 +847,9 @@ module.exports = { sidebarDepth: 2, collapsable: false, children: [ - "/spring-data/spring-data.md" + "spring-data.md" ], - initialOpenGroupIndex: 0 // 可选的, 默认值是 0 + initialOpenGroupIndex: 0 } ], "/spring-cloud/": [ @@ -469,28 +858,28 @@ module.exports = { sidebarDepth: 2, collapsable: false, children: [ - "/spring-cloud/documentation-overview.md", - "/spring-cloud/spring-cloud-build.md", - "/spring-cloud/spring-cloud-bus.md", - "/spring-cloud/spring-cloud-circuitbreaker.md", - "/spring-cloud/spring-cloud-cli.md", - "/spring-cloud/spring-cloud-cloudfoundry.md", - "/spring-cloud/spring-cloud-commons.md", - "/spring-cloud/spring-cloud-config.md", - "/spring-cloud/spring-cloud-consul.md", - "/spring-cloud/spring-cloud-contract.md", - "/spring-cloud/spring-cloud-function.md", - "/spring-cloud/spring-cloud-gateway.md", - "/spring-cloud/spring-cloud-kubernetes.md", - "/spring-cloud/spring-cloud-netflix.md", - "/spring-cloud/spring-cloud-openfeign.md", - "/spring-cloud/spring-cloud-sleuth.md", - "/spring-cloud/spring-cloud-stream.md", - "/spring-cloud/spring-cloud-task.md", - "/spring-cloud/spring-cloud-vault.md", - "/spring-cloud/spring-cloud-zookeeper.md" + "documentation-overview.md", + "spring-cloud-build.md", + "spring-cloud-bus.md", + "spring-cloud-circuitbreaker.md", + "spring-cloud-cli.md", + "spring-cloud-cloudfoundry.md", + "spring-cloud-commons.md", + "spring-cloud-config.md", + "spring-cloud-consul.md", + "spring-cloud-contract.md", + "spring-cloud-function.md", + "spring-cloud-gateway.md", + "spring-cloud-kubernetes.md", + "spring-cloud-netflix.md", + "spring-cloud-openfeign.md", + "spring-cloud-sleuth.md", + "spring-cloud-stream.md", + "spring-cloud-task.md", + "spring-cloud-vault.md", + "spring-cloud-zookeeper.md" ], - initialOpenGroupIndex: 0 // 可选的, 默认值是 0 + initialOpenGroupIndex: 0 } ], "/spring-cloud-data-flow/": [ @@ -498,8 +887,8 @@ module.exports = { title: "Spring Cloud Data Flow 文档", sidebarDepth: 2, collapsable: false, - children: ["/spring-cloud-data-flow/spring-cloud-dataflow.md"], - initialOpenGroupIndex: 0 // 可选的, 默认值是 0 + children: ["spring-cloud-dataflow.md"], + initialOpenGroupIndex: 0 } ], "/spring-security/": [ @@ -508,161 +897,161 @@ module.exports = { sidebarDepth: 2, collapsable: false, children: [ - "/spring-security/overview.md", - "/spring-security/prerequisites.md", - "/spring-security/community.md", - "/spring-security/whats-new.md", - "/spring-security/getting-spring-security.md", - "/spring-security/features.md", - "/spring-security/features-authentication.md", - "/spring-security/features-authentication-password-storage.md", - "/spring-security/features-exploits.md", - "/spring-security/features-exploits-csrf.md", - "/spring-security/features-exploits-headers.md", - "/spring-security/features-exploits-http.md", - "/spring-security/features-integrations.md", - "/spring-security/features-integrations-cryptography.md", - "/spring-security/features-integrations-data.md", - "/spring-security/features-integrations-concurrency.md", - "/spring-security/features-integrations-jackson.md", - "/spring-security/features-integrations-localization.md", - "/spring-security/modules.md", - "/spring-security/samples.md", - "/spring-security/servlet.md", - "/spring-security/servlet-getting-started.md", - "/spring-security/servlet-architecture.md", - "/spring-security/servlet-authentication.md", - "/spring-security/servlet-authentication-architecture.md", - "/spring-security/servlet-authentication-passwords.md", - "/spring-security/servlet-authentication-passwords-input.md", - "/spring-security/servlet-authentication-passwords-form.md", - "/spring-security/servlet-authentication-passwords-basic.md", - "/spring-security/servlet-authentication-passwords-digest.md", - "/spring-security/servlet-authentication-passwords-storage.md", - "/spring-security/servlet-authentication-passwords-storage-in-memory.md", - "/spring-security/servlet-authentication-passwords-storage-jdbc.md", - "/spring-security/servlet-authentication-passwords-storage-user-details.md", - "/spring-security/servlet-authentication-passwords-storage-user-details-service.md", - "/spring-security/servlet-authentication-passwords-storage-password-encoder.md", - "/spring-security/servlet-authentication-passwords-storage-dao-authentication-provider.md", - "/spring-security/servlet-authentication-passwords-storage-ldap.md", - "/spring-security/servlet-authentication-session-management.md", - "/spring-security/servlet-authentication-rememberme.md", - "/spring-security/servlet-authentication-openid.md", - "/spring-security/servlet-authentication-anonymous.md", - "/spring-security/servlet-authentication-preauth.md", - "/spring-security/servlet-authentication-jaas.md", - "/spring-security/servlet-authentication-cas.md", - "/spring-security/servlet-authentication-x509.md", - "/spring-security/servlet-authentication-runas.md", - "/spring-security/servlet-authentication-logout.md", - "/spring-security/servlet-authentication-events.md", - "/spring-security/servlet-authorization-.md", - "/spring-security/servlet-authorization-architecture.md", - "/spring-security/servlet-authorization-authorize-http-requests.md", - "/spring-security/servlet-authorization-authorize-requests.md", - "/spring-security/servlet-authorization-expression-based.md", - "/spring-security/servlet-authorization-secure-objects.md", - "/spring-security/servlet-authorization-method-security.md", - "/spring-security/servlet-authorization-acls.md", - "/spring-security/servlet-oauth2-.md", - "/spring-security/servlet-oauth2-login.md", - "/spring-security/servlet-oauth2-login-core.md", - "/spring-security/servlet-oauth2-login-advanced.md", - "/spring-security/servlet-oauth2-client.md", - "/spring-security/servlet-oauth2-client-core.md", - "/spring-security/servlet-oauth2-client-authorization-grants.md", - "/spring-security/servlet-oauth2-client-client-authentication.md", - "/spring-security/servlet-oauth2-client-authorized-clients.md", - "/spring-security/servlet-oauth2-resource-server.md", - "/spring-security/servlet-oauth2-resource-server-jwt.md", - "/spring-security/servlet-oauth2-resource-server-opaque-token.md", - "/spring-security/servlet-oauth2-resource-server-multitenancy.md", - "/spring-security/servlet-oauth2-resource-server-bearer-tokens.md", - "/spring-security/servlet-saml2.md", - "/spring-security/servlet-saml2-login.md", - "/spring-security/servlet-saml2-login-overview.md", - "/spring-security/servlet-saml2-login-authentication-requests.md", - "/spring-security/servlet-saml2-login-authentication.md", - "/spring-security/servlet-saml2-logout.md", - "/spring-security/servlet-saml2-metadata.md", - "/spring-security/servlet-exploits.md", - "/spring-security/servlet-exploits-csrf.md", - "/spring-security/servlet-exploits-headers.md", - "/spring-security/servlet-exploits-http.md", - "/spring-security/servlet-exploits-firewall.md", - "/spring-security/servlet-integrations.md", - "/spring-security/servlet-integrations-concurrency.md", - "/spring-security/servlet-integrations-jackson.md", - "/spring-security/servlet-integrations-localization.md", - "/spring-security/servlet-integrations-servlet-api.md", - "/spring-security/servlet-integrations-data.md", - "/spring-security/servlet-integrations-mvc.md", - "/spring-security/servlet-integrations-websocket.md", - "/spring-security/servlet-integrations-cors.md", - "/spring-security/servlet-integrations-jsp-taglibs.md", - "/spring-security/servlet-configuration-java.md", - "/spring-security/servlet-configuration-kotlin.md", - "/spring-security/servlet-configuration-xml-namespace.md", - "/spring-security/servlet-test.md", - "/spring-security/servlet-test-method.md", - "/spring-security/servlet-test-mockmvc.md", - "/spring-security/servlet-test-mockmvc-setup.md", - "/spring-security/servlet-test-mockmvc-request-post-processors.md", - "/spring-security/servlet-test-mockmvc-authentication.md", - "/spring-security/servlet-test-mockmvc-csrf.md", - "/spring-security/servlet-test-mockmvc-form-login.md", - "/spring-security/servlet-test-mockmvc-http-basic.md", - "/spring-security/servlet-test-mockmvc-oauth2.md", - "/spring-security/servlet-test-mockmvc-logout.md", - "/spring-security/servlet-test-mockmvc-request-builders.md", - "/spring-security/servlet-test-mockmvc-result-matchers.md", - "/spring-security/servlet-test-mockmvc-result-handlers.md", - "/spring-security/servlet-appendix.md", - "/spring-security/servlet-appendix-database-schema.md", - "/spring-security/servlet-appendix-namespace.md", - "/spring-security/servlet-appendix-namespace-authentication-manager.md", - "/spring-security/servlet-appendix-namespace-http.md", - "/spring-security/servlet-appendix-namespace-method-security.md", - "/spring-security/servlet-appendix-namespace-ldap.md", - "/spring-security/servlet-appendix-namespace-websocket.md", - "/spring-security/servlet-appendix-faq.md", - "/spring-security/reactive.md", - "/spring-security/reactive-getting-started.md", - "/spring-security/reactive-authentication-x509.md", - "/spring-security/reactive-authentication-logout.md", - "/spring-security/reactive-authorization-authorize-http-requests.md", - "/spring-security/reactive-authorization-method.md", - "/spring-security/reactive-oauth2.md", - "/spring-security/reactive-oauth2-login.md", - "/spring-security/reactive-oauth2-login-core.md", - "/spring-security/reactive-oauth2-login-advanced.md", - "/spring-security/reactive-oauth2-client.md", - "/spring-security/reactive-oauth2-client-core.md", - "/spring-security/reactive-oauth2-client-authorization-grants.md", - "/spring-security/reactive-oauth2-client-client-authentication.md", - "/spring-security/reactive-oauth2-client-authorized-clients.md", - "/spring-security/reactive-oauth2-resource-server.md", - "/spring-security/reactive-oauth2-resource-server-jwt.md", - "/spring-security/reactive-oauth2-resource-server-opaque-token.md", - "/spring-security/reactive-oauth2-resource-server-multitenancy.md", - "/spring-security/reactive-oauth2-resource-server-bearer-tokens.md", - "/spring-security/reactive-exploits.md", - "/spring-security/reactive-exploits-csrf.md", - "/spring-security/reactive-exploits-headers.md", - "/spring-security/reactive-exploits-http.md", - "/spring-security/reactive-integrations-cors.md", - "/spring-security/reactive-integrations-rsocket.md", - "/spring-security/reactive-test.md", - "/spring-security/reactive-test-method.md", - "/spring-security/reactive-test-web.md", - "/spring-security/reactive-test-web-setup.md", - "/spring-security/reactive-test-web-authentication.md", - "/spring-security/reactive-test-web-csrf.md", - "/spring-security/reactive-test-web-oauth2.md", - "/spring-security/reactive-configuration-webflux.md" + "overview.md", + "prerequisites.md", + "community.md", + "whats-new.md", + "getting-spring-security.md", + "features.md", + "features-authentication.md", + "features-authentication-password-storage.md", + "features-exploits.md", + "features-exploits-csrf.md", + "features-exploits-headers.md", + "features-exploits-http.md", + "features-integrations.md", + "features-integrations-cryptography.md", + "features-integrations-data.md", + "features-integrations-concurrency.md", + "features-integrations-jackson.md", + "features-integrations-localization.md", + "modules.md", + "samples.md", + "servlet.md", + "servlet-getting-started.md", + "servlet-architecture.md", + "servlet-authentication.md", + "servlet-authentication-architecture.md", + "servlet-authentication-passwords.md", + "servlet-authentication-passwords-input.md", + "servlet-authentication-passwords-form.md", + "servlet-authentication-passwords-basic.md", + "servlet-authentication-passwords-digest.md", + "servlet-authentication-passwords-storage.md", + "servlet-authentication-passwords-storage-in-memory.md", + "servlet-authentication-passwords-storage-jdbc.md", + "servlet-authentication-passwords-storage-user-details.md", + "servlet-authentication-passwords-storage-user-details-service.md", + "servlet-authentication-passwords-storage-password-encoder.md", + "servlet-authentication-passwords-storage-dao-authentication-provider.md", + "servlet-authentication-passwords-storage-ldap.md", + "servlet-authentication-session-management.md", + "servlet-authentication-rememberme.md", + "servlet-authentication-openid.md", + "servlet-authentication-anonymous.md", + "servlet-authentication-preauth.md", + "servlet-authentication-jaas.md", + "servlet-authentication-cas.md", + "servlet-authentication-x509.md", + "servlet-authentication-runas.md", + "servlet-authentication-logout.md", + "servlet-authentication-events.md", + "servlet-authorization-.md", + "servlet-authorization-architecture.md", + "servlet-authorization-authorize-http-requests.md", + "servlet-authorization-authorize-requests.md", + "servlet-authorization-expression-based.md", + "servlet-authorization-secure-objects.md", + "servlet-authorization-method-security.md", + "servlet-authorization-acls.md", + "servlet-oauth2-.md", + "servlet-oauth2-login.md", + "servlet-oauth2-login-core.md", + "servlet-oauth2-login-advanced.md", + "servlet-oauth2-client.md", + "servlet-oauth2-client-core.md", + "servlet-oauth2-client-authorization-grants.md", + "servlet-oauth2-client-client-authentication.md", + "servlet-oauth2-client-authorized-clients.md", + "servlet-oauth2-resource-server.md", + "servlet-oauth2-resource-server-jwt.md", + "servlet-oauth2-resource-server-opaque-token.md", + "servlet-oauth2-resource-server-multitenancy.md", + "servlet-oauth2-resource-server-bearer-tokens.md", + "servlet-saml2.md", + "servlet-saml2-login.md", + "servlet-saml2-login-overview.md", + "servlet-saml2-login-authentication-requests.md", + "servlet-saml2-login-authentication.md", + "servlet-saml2-logout.md", + "servlet-saml2-metadata.md", + "servlet-exploits.md", + "servlet-exploits-csrf.md", + "servlet-exploits-headers.md", + "servlet-exploits-http.md", + "servlet-exploits-firewall.md", + "servlet-integrations.md", + "servlet-integrations-concurrency.md", + "servlet-integrations-jackson.md", + "servlet-integrations-localization.md", + "servlet-integrations-servlet-api.md", + "servlet-integrations-data.md", + "servlet-integrations-mvc.md", + "servlet-integrations-websocket.md", + "servlet-integrations-cors.md", + "servlet-integrations-jsp-taglibs.md", + "servlet-configuration-java.md", + "servlet-configuration-kotlin.md", + "servlet-configuration-xml-namespace.md", + "servlet-test.md", + "servlet-test-method.md", + "servlet-test-mockmvc.md", + "servlet-test-mockmvc-setup.md", + "servlet-test-mockmvc-request-post-processors.md", + "servlet-test-mockmvc-authentication.md", + "servlet-test-mockmvc-csrf.md", + "servlet-test-mockmvc-form-login.md", + "servlet-test-mockmvc-http-basic.md", + "servlet-test-mockmvc-oauth2.md", + "servlet-test-mockmvc-logout.md", + "servlet-test-mockmvc-request-builders.md", + "servlet-test-mockmvc-result-matchers.md", + "servlet-test-mockmvc-result-handlers.md", + "servlet-appendix.md", + "servlet-appendix-database-schema.md", + "servlet-appendix-namespace.md", + "servlet-appendix-namespace-authentication-manager.md", + "servlet-appendix-namespace-http.md", + "servlet-appendix-namespace-method-security.md", + "servlet-appendix-namespace-ldap.md", + "servlet-appendix-namespace-websocket.md", + "servlet-appendix-faq.md", + "reactive.md", + "reactive-getting-started.md", + "reactive-authentication-x509.md", + "reactive-authentication-logout.md", + "reactive-authorization-authorize-http-requests.md", + "reactive-authorization-method.md", + "reactive-oauth2.md", + "reactive-oauth2-login.md", + "reactive-oauth2-login-core.md", + "reactive-oauth2-login-advanced.md", + "reactive-oauth2-client.md", + "reactive-oauth2-client-core.md", + "reactive-oauth2-client-authorization-grants.md", + "reactive-oauth2-client-client-authentication.md", + "reactive-oauth2-client-authorized-clients.md", + "reactive-oauth2-resource-server.md", + "reactive-oauth2-resource-server-jwt.md", + "reactive-oauth2-resource-server-opaque-token.md", + "reactive-oauth2-resource-server-multitenancy.md", + "reactive-oauth2-resource-server-bearer-tokens.md", + "reactive-exploits.md", + "reactive-exploits-csrf.md", + "reactive-exploits-headers.md", + "reactive-exploits-http.md", + "reactive-integrations-cors.md", + "reactive-integrations-rsocket.md", + "reactive-test.md", + "reactive-test-method.md", + "reactive-test-web.md", + "reactive-test-web-setup.md", + "reactive-test-web-authentication.md", + "reactive-test-web-csrf.md", + "reactive-test-web-oauth2.md", + "reactive-configuration-webflux.md" ], - initialOpenGroupIndex: 0 // 可选的, 默认值是 0 + initialOpenGroupIndex: 0 } ], @@ -671,8 +1060,8 @@ module.exports = { title: "Spring For Graphql 文档", sidebarDepth: 2, collapsable: false, - children: ["/spring-for-graphql/spring-graphql.md"], - initialOpenGroupIndex: 0 // 可选的, 默认值是 0 + children: ["spring-graphql.md"], + initialOpenGroupIndex: 0 } ], "/spring-session/": [ @@ -681,33 +1070,24 @@ module.exports = { sidebarDepth: 2, collapsable: false, children: [ - "/spring-session/_index.md", - "/spring-session/whats-new.md", - "/spring-session/samples.md", - "/spring-session/bootSamples/HttpSession/mongo.md", - "/spring-session/bootSamples/HttpSession/jdbc.md", - "/spring-session/bootSamples/HttpSession/Redis/boot-redis.md", - "/spring-session/bootSamples/boot-findbyusername.md", - "/spring-session/bootSamples/boot-websocket.md", - "/spring-session/webFlux/boot-webflux-custom-cookie.md", - "/spring-session/modules.md", - "/spring-session/http-session.md", - "/spring-session/web-socket.md", - "/spring-session/web-session.md", - "/spring-session/spring-security.md", - "/spring-session/api.md", - "/spring-session/upgrading.md" + "_index.md", + "whats-new.md", + "samples.md", + "bootSamples/HttpSession/mongo.md", + "bootSamples/HttpSession/jdbc.md", + "bootSamples/HttpSession/Redis/boot-redis.md", + "bootSamples/boot-findbyusername.md", + "bootSamples/boot-websocket.md", + "webFlux/boot-webflux-custom-cookie.md", + "modules.md", + "http-session.md", + "web-socket.md", + "web-session.md", + "spring-security.md", + "api.md", + "upgrading.md" ], - initialOpenGroupIndex: 0 // 可选的, 默认值是 0 - } - ], - "/spring-session/sample/": [ - { - title: "Spring Session 文档3", - sidebarDepth: 2, - collapsable: false, - children: ["/spring-session/sample/samples.md"], - initialOpenGroupIndex: 0 // 可选的, 默认值是 0 + initialOpenGroupIndex: 0 } ], "/spring-integration/": [ @@ -716,63 +1096,63 @@ module.exports = { sidebarDepth: 2, collapsable: false, children: [ - "/spring-integration/preface.md", - "/spring-integration/whats-new.md", - "/spring-integration/overview.md", - "/spring-integration/core.md", - "/spring-integration/message.md", - "/spring-integration/message-routing.md", - "/spring-integration/message-transformation.md", - "/spring-integration/messaging-endpoints.md", - "/spring-integration/dsl.md", - "/spring-integration/kotlin-dsl.md", - "/spring-integration/system-management.md", - "/spring-integration/reactive-streams.md", - "/spring-integration/endpoint-summary.md", - "/spring-integration/amqp.md", - "/spring-integration/event.md", - "/spring-integration/feed.md", - "/spring-integration/file.md", - "/spring-integration/ftp.md", - "/spring-integration/gemfire.md", - "/spring-integration/http.md", - "/spring-integration/jdbc.md", - "/spring-integration/jpa.md", - "/spring-integration/jms.md", - "/spring-integration/jmx.md", - "/spring-integration/kafka.md", - "/spring-integration/mail.md", - "/spring-integration/mongodb.md", - "/spring-integration/mqtt.md", - "/spring-integration/r2dbc.md", - "/spring-integration/redis.md", - "/spring-integration/resource.md", - "/spring-integration/rmi.md", - "/spring-integration/rsocket.md", - "/spring-integration/sftp.md", - "/spring-integration/stomp.md", - "/spring-integration/stream.md", - "/spring-integration/syslog.md", - "/spring-integration/ip.md", - "/spring-integration/webflux.md", - "/spring-integration/web-sockets.md", - "/spring-integration/ws.md", - "/spring-integration/xml.md", - "/spring-integration/xmpp.md", - "/spring-integration/zeromq.md", - "/spring-integration/zookeeper.md", - "/spring-integration/error-handling.md", - "/spring-integration/spel.md", - "/spring-integration/message-publishing.md", - "/spring-integration/transactions.md", - "/spring-integration/security.md", - "/spring-integration/configuration.md", - "/spring-integration/testing.md", - "/spring-integration/samples.md", - "/spring-integration/resources.md", - "/spring-integration/history.md" + "preface.md", + "whats-new.md", + "overview.md", + "core.md", + "message.md", + "message-routing.md", + "message-transformation.md", + "messaging-endpoints.md", + "dsl.md", + "kotlin-dsl.md", + "system-management.md", + "reactive-streams.md", + "endpoint-summary.md", + "amqp.md", + "event.md", + "feed.md", + "file.md", + "ftp.md", + "gemfire.md", + "http.md", + "jdbc.md", + "jpa.md", + "jms.md", + "jmx.md", + "kafka.md", + "mail.md", + "mongodb.md", + "mqtt.md", + "r2dbc.md", + "redis.md", + "resource.md", + "rmi.md", + "rsocket.md", + "sftp.md", + "stomp.md", + "stream.md", + "syslog.md", + "ip.md", + "webflux.md", + "web-sockets.md", + "ws.md", + "xml.md", + "xmpp.md", + "zeromq.md", + "zookeeper.md", + "error-handling.md", + "spel.md", + "message-publishing.md", + "transactions.md", + "security.md", + "configuration.md", + "testing.md", + "samples.md", + "resources.md", + "history.md" ], - initialOpenGroupIndex: 0 // 可选的, 默认值是 0 + initialOpenGroupIndex: 0 } ], "/spring-hateoas/": [ @@ -780,8 +1160,8 @@ module.exports = { title: "Spring HATEOAS 文档", sidebarDepth: 2, collapsable: false, - children: ["/spring-hateoas/spring-hateoas.md"], - initialOpenGroupIndex: 0 // 可选的, 默认值是 0 + children: ["spring-hateoas.md"], + initialOpenGroupIndex: 0 } ], "/spring-rest-docs/": [ @@ -789,8 +1169,8 @@ module.exports = { title: "Spring HATEOAS 文档", sidebarDepth: 2, collapsable: false, - children: ["/spring-rest-docs/spring-restdocs.md"], - initialOpenGroupIndex: 0 // 可选的, 默认值是 0 + children: ["spring-restdocs.md"], + initialOpenGroupIndex: 0 } ], "/spring-batch/": [ @@ -799,27 +1179,27 @@ module.exports = { sidebarDepth: 2, collapsable: false, children: [ - "/spring-batch/spring-batch-intro.md", - "/spring-batch/whatsnew.md", - "/spring-batch/domain.md", - "/spring-batch/job.md", - "/spring-batch/step.md", - "/spring-batch/readersAndWriters.md", - "/spring-batch/processor.md", - "/spring-batch/scalability.md", - "/spring-batch/repeat.md", - "/spring-batch/retry.md", - "/spring-batch/testing.md", - "/spring-batch/common-patterns.md", - "/spring-batch/jsr-352.md", - "/spring-batch/spring-batch-integration.md", - "/spring-batch/monitoring-and-metrics.md", - "/spring-batch/appendix.md", - "/spring-batch/schema-appendix.md", - "/spring-batch/transaction-appendix.md", - "/spring-batch/glossary.md" + "spring-batch-intro.md", + "whatsnew.md", + "domain.md", + "job.md", + "step.md", + "readersAndWriters.md", + "processor.md", + "scalability.md", + "repeat.md", + "retry.md", + "testing.md", + "common-patterns.md", + "jsr-352.md", + "spring-batch-integration.md", + "monitoring-and-metrics.md", + "appendix.md", + "schema-appendix.md", + "transaction-appendix.md", + "glossary.md" ], - initialOpenGroupIndex: 0 // 可选的, 默认值是 0 + initialOpenGroupIndex: 0 } ], "/spring-amqp/": [ @@ -827,8 +1207,8 @@ module.exports = { title: "Spring AMQP 文档", sidebarDepth: 2, collapsable: false, - children: ["/spring-amqp/spring-amqp.md"], - initialOpenGroupIndex: 0 // 可选的, 默认值是 0 + children: ["spring-amqp.md"], + initialOpenGroupIndex: 0 } ], "/spring-credhub/": [ @@ -836,8 +1216,8 @@ module.exports = { title: "Spring CredHub 文档", sidebarDepth: 2, collapsable: false, - children: ["/spring-credhub/spring-credhub.md"], - initialOpenGroupIndex: 0 // 可选的, 默认值是 0 + children: ["spring-credhub.md"], + initialOpenGroupIndex: 0 } ], "/spring-flo/": [ @@ -845,8 +1225,8 @@ module.exports = { title: "Spring Flo 文档", sidebarDepth: 2, collapsable: false, - children: ["/spring-flo/spring-flo.md"], - initialOpenGroupIndex: 0 // 可选的, 默认值是 0 + children: ["spring-flo.md"], + initialOpenGroupIndex: 0 } ], "/spring-for-apache-kafka/": [ @@ -854,8 +1234,8 @@ module.exports = { title: "Spring for Apache Kafka 文档", sidebarDepth: 2, collapsable: false, - children: ["/spring-for-apache-kafka/spring-kafka.md"], - initialOpenGroupIndex: 0 // 可选的, 默认值是 0 + children: ["spring-kafka.md"], + initialOpenGroupIndex: 0 } ], "/spring-ldap/": [ @@ -863,8 +1243,8 @@ module.exports = { title: "Spring for Apache Kafka 文档", sidebarDepth: 2, collapsable: false, - children: ["/spring-ldap/spring-ldap.md"], - initialOpenGroupIndex: 0 // 可选的, 默认值是 0 + children: ["spring-ldap.md"], + initialOpenGroupIndex: 0 } ], "/spring-shell/": [ @@ -872,8 +1252,8 @@ module.exports = { title: "Spring Shell 文档", sidebarDepth: 2, collapsable: false, - children: ["/spring-shell/spring-shell.md"], - initialOpenGroupIndex: 0 // 可选的, 默认值是 0 + children: ["spring-shell.md"], + initialOpenGroupIndex: 0 } ], "/spring-statemachine/": [ @@ -881,8 +1261,8 @@ module.exports = { title: "Spring Statemachine 文档", sidebarDepth: 2, collapsable: false, - children: ["/spring-statemachine/spring-statemachine.md"], - initialOpenGroupIndex: 0 // 可选的, 默认值是 0 + children: ["spring-statemachine.md"], + initialOpenGroupIndex: 0 } ], "/spring-vault/": [ @@ -890,8 +1270,8 @@ module.exports = { title: "Spring Vault 文档", sidebarDepth: 2, collapsable: false, - children: ["/spring-vault/spring-vault.md"], - initialOpenGroupIndex: 0 // 可选的, 默认值是 0 + children: ["spring-vault.md"], + initialOpenGroupIndex: 0 } ], "/spring-web-flow/": [ @@ -900,24 +1280,24 @@ module.exports = { sidebarDepth: 2, collapsable: false, children: [ - "/spring-web-flow/preface.md", - "/spring-web-flow/introduction.md", - "/spring-web-flow/whatsnew.md", - "/spring-web-flow/defining-flows.md", - "/spring-web-flow/el.md", - "/spring-web-flow/views.md", - "/spring-web-flow/actions.md", - "/spring-web-flow/flow-managed-persistence.md", - "/spring-web-flow/flow-security.md", - "/spring-web-flow/flow-inheritance.md", - "/spring-web-flow/system-setup.md", - "/spring-web-flow/spring-mvc.md", - "/spring-web-flow/spring-js.md", - "/spring-web-flow/spring-faces.md", - "/spring-web-flow/testing.md", - "/spring-web-flow/field-mappings.md" + "preface.md", + "introduction.md", + "whatsnew.md", + "defining-flows.md", + "el.md", + "views.md", + "actions.md", + "flow-managed-persistence.md", + "flow-security.md", + "flow-inheritance.md", + "system-setup.md", + "spring-mvc.md", + "spring-js.md", + "spring-faces.md", + "testing.md", + "field-mappings.md" ], - initialOpenGroupIndex: 0 // 可选的, 默认值是 0 + initialOpenGroupIndex: 0 } ], "/spring-web-services/": [ @@ -925,8 +1305,8 @@ module.exports = { title: "Spring Web Services 文档", sidebarDepth: 2, collapsable: false, - children: ["/spring-web-services/spring-web-service.md"], - initialOpenGroupIndex: 0 // 可选的, 默认值是 0 + children: ["spring-web-service.md"], + initialOpenGroupIndex: 0 } ], // fallback @@ -947,7 +1327,7 @@ module.exports = { "introducing-spring-boot.md", "quickstart.md" ], - initialOpenGroupIndex: 0 // 可选的, 默认值是 0 + initialOpenGroupIndex: 0 }, { title: "教程", @@ -958,7 +1338,7 @@ module.exports = { "installing.md", "initializr.md" ], - initialOpenGroupIndex: 0 // 可选的, 默认值是 0 + initialOpenGroupIndex: 0 }, { title: "编辑器", @@ -968,7 +1348,7 @@ module.exports = { "vscode_java.md", "intellij_idea.md" ], - initialOpenGroupIndex: 0 // 可选的, 默认值是 0 + initialOpenGroupIndex: 0 }, { title: "代码案例", @@ -979,7 +1359,7 @@ module.exports = { "rest-service.md", "consuming-rest.md" ], - initialOpenGroupIndex: 0 // 可选的, 默认值是 0 + initialOpenGroupIndex: 0 } ] } diff --git a/docs/en/README.md b/docs/en/README.md new file mode 100644 index 0000000000000000000000000000000000000000..a8c782c354cdd7b0cea8900b10e47b7759d185fb --- /dev/null +++ b/docs/en/README.md @@ -0,0 +1,73 @@ +# Spring 中文文档社区 + +从实操的角度整理翻译`Spring`相关文档,包括`快速开始`、`安装指南`、`开发工具配置`、`代码案例`等。 + +## 声明 +本站中的文章内容来源于 [Spring.io](https://spring.io/),原始版权归属于 [Spring.io](https://spring.io/)。本站对相关文章进行了翻译及整理。本站文章可供个人学习、研究或者欣赏之用,未经本站事先书面许可,不得进行任何转载、商用或与之相关的行为。 +商标声明:Spring 是 Pivotal Software, Inc. 在美国以及其他国家的商标。 + +## 文档列表 + +- [Spring](/why-spring.html) +- [Spring Boot](/spring-boot/getting-help.html) +- [Spring Framework](/spring-framework/overview.html) +- [Spring Data](/spring-data/spring-data.html) +- [Spring Cloud](/spring-cloud/documentation-overview.html) +- [Spring Cloud Data Flow](/spring-cloud-data-flow/spring-cloud-dataflow.html) +- [Spring Security](/spring-security/overview.html) +- [Spring for GraphQL](/spring-for-graphql/spring-graphql.html) +- [Spring Session](/spring-session/_index.html) +- [Spring Integration](/spring-integration/preface.html) +- [Spring HATEOAS](/spring-hateoas/spring-hateoas.html) +- [Spring REST Docs](/spring-rest-docs/spring-restdocs.html) +- [Spring Batch](/spring-batch/spring-batch-intro.html) +- [Spring AMQP](/spring-amqp/spring-amqp.html) +- [Spring CredHub](/spring-credhub/spring-credhub.html) +- [Spring Flo](/spring-flo/spring-flo.html) +- [Spring for Apache Kafka](/spring-for-apache-kafka/spring-kafka.html) +- [Spring LDAP](/spring-ldap/spring-ldap.html) +- [Spring Shell](/spring-shell/spring-shell.html) +- [Spring Statemachine](/spring-statemachine/spring-statemachine.html) +- [Spring Vault](/spring-vault/spring-vault.html) +- [Spring Web Flow](/spring-web-flow/preface.html) +- [Spring Web Services](/spring-web-services/spring-web-service.html) + + +## 参与贡献流程 + +所有 **`Java Spring 熟练使用者`** 可以参与到`Spring 中文文档社区`的建设中来,选择自己感兴趣题目,可以直接撰写文章,也可以翻译 [Spring 官方](https://spring.io/) 上面的内容,也可以校对别人翻译的文章。具体贡献流程如下。 +![](./readme/readme-1.png) + +### 1. 阅读文档帮助改善 + +在[`Spring 中文文档社区`](https://spring.gitcode.net)上浏览某一篇文档时,发现有不准确的地方,可以`随时`在该页面的左下方点击`在 GitCode 上编辑此页`。 +![](./readme/readme-2.png) + +### 2. 在 GitCode 校对/创作 + +进入GitCode之后,会自动定位到你想要修改的文件,修改文件内容。 + +#### 2-1. 仓库的成员 + +如果是仓库的成员,点击`“编辑”按钮,会直接进入可编辑的状态,供你修改文件内容。 + +![](./readme/readme-3.png) + +![](./readme/readme-4.png) + +#### 2-2. 非仓库的成员 + +如果是非仓库的成员,点击`“编辑”`,GitCode 会提醒你没有权限编辑,可以点击`Fork`按钮,将该项目克隆到你的 GitCode 账户下。 + +![](./readme/readme-5.png) + + +### 3. 内容编辑完成提交PR + +内容编辑完成者向[此仓库](https://gitcode.net/dev-cloud/spring-docs)提交 PR(Pull Request)。 + +### 4. 审核 +[主仓库](https://gitcode.net/dev-cloud/spring-docs) 管理者会 Review,符合要求的,即会 Merge 到[主仓库](https://gitcode.net/dev-cloud/spring-docs)中。 + +### 5. 查看更新 +Merge 成功之后,稍等片刻就可以刷新页面查看更新。 diff --git a/docs/en/readme/readme-1.png b/docs/en/readme/readme-1.png new file mode 100644 index 0000000000000000000000000000000000000000..e3dec2b802776dbefd4fb5833815c7c4fafe77a2 Binary files /dev/null and b/docs/en/readme/readme-1.png differ diff --git a/docs/en/readme/readme-2.png b/docs/en/readme/readme-2.png new file mode 100644 index 0000000000000000000000000000000000000000..3d854fd894eed22ac3440ef5da850f672f7a58e5 Binary files /dev/null and b/docs/en/readme/readme-2.png differ diff --git a/docs/en/readme/readme-3.png b/docs/en/readme/readme-3.png new file mode 100644 index 0000000000000000000000000000000000000000..14acdf6db343c6725c8c3c119810013814671aa7 Binary files /dev/null and b/docs/en/readme/readme-3.png differ diff --git a/docs/en/readme/readme-4.png b/docs/en/readme/readme-4.png new file mode 100644 index 0000000000000000000000000000000000000000..b6a370a9f98fe2331cd62a54286b718ebf7513cd Binary files /dev/null and b/docs/en/readme/readme-4.png differ diff --git a/docs/en/readme/readme-5.png b/docs/en/readme/readme-5.png new file mode 100644 index 0000000000000000000000000000000000000000..bc98671d5b0069225c279474abf5d1781df24e4e Binary files /dev/null and b/docs/en/readme/readme-5.png differ diff --git a/docs/en/spring-amqp/README.md b/docs/en/spring-amqp/README.md new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/docs/en/spring-amqp/spring-amqp.md b/docs/en/spring-amqp/spring-amqp.md new file mode 100644 index 0000000000000000000000000000000000000000..631c572daba81673cd6ca452814bced763f4c341 --- /dev/null +++ b/docs/en/spring-amqp/spring-amqp.md @@ -0,0 +1,8287 @@ +# Spring AMQP + +## 1. Preface + +The Spring AMQP project applies core Spring concepts to the development of AMQP-based messaging solutions. +We provide a “template” as a high-level abstraction for sending and receiving messages. +We also provide support for message-driven POJOs. +These libraries facilitate management of AMQP resources while promoting the use of dependency injection and declarative configuration. +In all of these cases, you can see similarities to the JMS support in the Spring Framework. +For other project-related information, visit the Spring AMQP project [homepage](https://projects.spring.io/spring-amqp/). + +## 2. What’s New + +### 2.1. Changes in 2.4 Since 2.3 + +This section describes the changes between version 2.4 and version 2.4. +See [Change History](#change-history) for changes in previous versions. + +#### 2.1.1. `@RabbitListener` Changes + +`MessageProperties` is now available for argument matching. +See [Annotated Endpoint Method Signature](#async-annotation-driven-enable-signature) for more information. + +#### 2.1.2. `RabbitAdmin` Changes + +A new property `recoverManualDeclarations` allows recovery of manually declared queues/exchanges/bindings. +See [Recovering Auto-Delete Declarations](#declarable-recovery) for more information. + +#### 2.1.3. Remoting Support + +Support remoting using Spring Framework’s RMI support is deprecated and will be removed in 3.0. +See [Spring Remoting with AMQP](#remoting) for more information. + +## 3. Introduction + +This first part of the reference documentation is a high-level overview of Spring AMQP and the underlying concepts. +It includes some code snippets to get you up and running as quickly as possible. + +### 3.1. Quick Tour for the impatient + +#### 3.1.1. Introduction + +This is the five-minute tour to get started with Spring AMQP. + +Prerequisites: Install and run the RabbitMQ broker ([https://www.rabbitmq.com/download.html](https://www.rabbitmq.com/download.html)). +Then grab the spring-rabbit JAR and all its dependencies - the easiest way to do so is to declare a dependency in your build tool. +For example, for Maven, you can do something resembling the following: + +``` + + org.springframework.amqp + spring-rabbit + 2.4.2 + +``` + +For Gradle, you can do something resembling the following: + +``` +compile 'org.springframework.amqp:spring-rabbit:2.4.2' +``` + +##### Compatibility + +The minimum Spring Framework version dependency is 5.2.0. + +The minimum `amqp-client` Java client library version is 5.7.0. + +##### Very, Very Quick + +This section offers the fastest introduction. + +First, add the following `import` statements to make the examples later in this section work: + +``` +import org.springframework.amqp.core.AmqpAdmin; +import org.springframework.amqp.core.AmqpTemplate; +import org.springframework.amqp.core.Queue; +import org.springframework.amqp.rabbit.connection.CachingConnectionFactory; +import org.springframework.amqp.rabbit.connection.ConnectionFactory; +import org.springframework.amqp.rabbit.core.RabbitAdmin; +import org.springframework.amqp.rabbit.core.RabbitTemplate; +``` + +The following example uses plain, imperative Java to send and receive a message: + +``` +ConnectionFactory connectionFactory = new CachingConnectionFactory(); +AmqpAdmin admin = new RabbitAdmin(connectionFactory); +admin.declareQueue(new Queue("myqueue")); +AmqpTemplate template = new RabbitTemplate(connectionFactory); +template.convertAndSend("myqueue", "foo"); +String foo = (String) template.receiveAndConvert("myqueue"); +``` + +Note that there is also a `ConnectionFactory` in the native Java Rabbit client. +We use the Spring abstraction in the preceding code. +It caches channels (and optionally connections) for reuse. +We rely on the default exchange in the broker (since none is specified in the send), and the default binding of all queues to the default exchange by their name (thus, we can use the queue name as a routing key in the send). +Those behaviors are defined in the AMQP specification. + +##### With XML Configuration + +The following example is the same as the preceding example but externalizes the resource configuration to XML: + +``` +ApplicationContext context = + new GenericXmlApplicationContext("classpath:/rabbit-context.xml"); +AmqpTemplate template = context.getBean(AmqpTemplate.class); +template.convertAndSend("myqueue", "foo"); +String foo = (String) template.receiveAndConvert("myqueue"); +``` + +``` + + + + + + + + + + + +``` + +By default, the `` declaration automatically looks for beans of type `Queue`, `Exchange`, and `Binding` and declares them to the broker on behalf of the user. +As a result, you need not use that bean explicitly in the simple Java driver. +There are plenty of options to configure the properties of the components in the XML schema. +You can use auto-complete features of your XML editor to explore them and look at their documentation. + +##### With Java Configuration + +The following example repeats the same example as the preceding example but with the external configuration defined in Java: + +``` +ApplicationContext context = + new AnnotationConfigApplicationContext(RabbitConfiguration.class); +AmqpTemplate template = context.getBean(AmqpTemplate.class); +template.convertAndSend("myqueue", "foo"); +String foo = (String) template.receiveAndConvert("myqueue"); + +........ + +@Configuration +public class RabbitConfiguration { + + @Bean + public CachingConnectionFactory connectionFactory() { + return new CachingConnectionFactory("localhost"); + } + + @Bean + public RabbitAdmin amqpAdmin() { + return new RabbitAdmin(connectionFactory()); + } + + @Bean + public RabbitTemplate rabbitTemplate() { + return new RabbitTemplate(connectionFactory()); + } + + @Bean + public Queue myQueue() { + return new Queue("myqueue"); + } +} +``` + +##### With Spring Boot Auto Configuration and an Async POJO Listener + +Spring Boot automatically configures the infrastructure beans, as the following example shows: + +``` +@SpringBootApplication +public class Application { + + public static void main(String[] args) { + SpringApplication.run(Application.class, args); + } + + @Bean + public ApplicationRunner runner(AmqpTemplate template) { + return args -> template.convertAndSend("myqueue", "foo"); + } + + @Bean + public Queue myQueue() { + return new Queue("myqueue"); + } + + @RabbitListener(queues = "myqueue") + public void listen(String in) { + System.out.println(in); + } + +} +``` + +## 4. Reference + +This part of the reference documentation details the various components that comprise Spring AMQP. +The [main chapter](#amqp) covers the core classes to develop an AMQP application. +This part also includes a chapter about the [sample applications](#sample-apps). + +### 4.1. Using Spring AMQP + +This chapter explores the interfaces and classes that are the essential components for developing applications with Spring AMQP. + +#### 4.1.1. AMQP Abstractions + +Spring AMQP consists of two modules (each represented by a JAR in the distribution): `spring-amqp` and `spring-rabbit`. +The 'spring-amqp' module contains the `org.springframework.amqp.core` package. +Within that package, you can find the classes that represent the core AMQP “model”. +Our intention is to provide generic abstractions that do not rely on any particular AMQP broker implementation or client library. +End user code can be more portable across vendor implementations as it can be developed against the abstraction layer only. +These abstractions are then implemented by broker-specific modules, such as 'spring-rabbit'. +There is currently only a RabbitMQ implementation. +However, the abstractions have been validated in .NET using Apache Qpid in addition to RabbitMQ. +Since AMQP operates at the protocol level, in principle, you can use the RabbitMQ client with any broker that supports the same protocol version, but we do not test any other brokers at present. + +This overview assumes that you are already familiar with the basics of the AMQP specification. +If not, have a look at the resources listed in [Other Resources](#resources) + +##### `Message` + +The 0-9-1 AMQP specification does not define a `Message` class or interface. +Instead, when performing an operation such as `basicPublish()`, the content is passed as a byte-array argument and additional properties are passed in as separate arguments. +Spring AMQP defines a `Message` class as part of a more general AMQP domain model representation. +The purpose of the `Message` class is to encapsulate the body and properties within a single instance so that the API can, in turn, be simpler. +The following example shows the `Message` class definition: + +``` +public class Message { + + private final MessageProperties messageProperties; + + private final byte[] body; + + public Message(byte[] body, MessageProperties messageProperties) { + this.body = body; + this.messageProperties = messageProperties; + } + + public byte[] getBody() { + return this.body; + } + + public MessageProperties getMessageProperties() { + return this.messageProperties; + } +} +``` + +The `MessageProperties` interface defines several common properties, such as 'messageId', 'timestamp', 'contentType', and several more. +You can also extend those properties with user-defined 'headers' by calling the `setHeader(String key, Object value)` method. + +| |Starting with versions `1.5.7`, `1.6.11`, `1.7.4`, and `2.0.0`, if a message body is a serialized `Serializable` java object, it is no longer deserialized (by default) when performing `toString()` operations (such as in log messages).
This is to prevent unsafe deserialization.
By default, only `java.util` and `java.lang` classes are deserialized.
To revert to the previous behavior, you can add allowable class/package patterns by invoking `Message.addAllowedListPatterns(…​)`.
A simple `` **wildcard is supported, for example `com.something.`**`, *.MyClass`.
Bodies that cannot be deserialized are represented by `byte[]` in log messages.| +|---|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +##### Exchange + +The `Exchange` interface represents an AMQP Exchange, which is what a Message Producer sends to. +Each Exchange within a virtual host of a broker has a unique name as well as a few other properties. +The following example shows the `Exchange` interface: + +``` +public interface Exchange { + + String getName(); + + String getExchangeType(); + + boolean isDurable(); + + boolean isAutoDelete(); + + Map getArguments(); + +} +``` + +As you can see, an `Exchange` also has a 'type' represented by constants defined in `ExchangeTypes`. +The basic types are: `direct`, `topic`, `fanout`, and `headers`. +In the core package, you can find implementations of the `Exchange` interface for each of those types. +The behavior varies across these `Exchange` types in terms of how they handle bindings to queues. +For example, a `Direct` exchange lets a queue be bound by a fixed routing key (often the queue’s name). +A `Topic` exchange supports bindings with routing patterns that may include the '\*' and '#' wildcards for 'exactly-one' and 'zero-or-more', respectively. +The `Fanout` exchange publishes to all queues that are bound to it without taking any routing key into consideration. +For much more information about these and the other Exchange types, see [Other Resources](#resources). + +| |The AMQP specification also requires that any broker provide a “default” direct exchange that has no name.
All queues that are declared are bound to that default `Exchange` with their names as routing keys.
You can learn more about the default Exchange’s usage within Spring AMQP in [`AmqpTemplate`](#amqp-template).| +|---|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +##### Queue + +The `Queue` class represents the component from which a message consumer receives messages. +Like the various `Exchange` classes, our implementation is intended to be an abstract representation of this core AMQP type. +The following listing shows the `Queue` class: + +``` +public class Queue { + + private final String name; + + private volatile boolean durable; + + private volatile boolean exclusive; + + private volatile boolean autoDelete; + + private volatile Map arguments; + + /** + * The queue is durable, non-exclusive and non auto-delete. + * + * @param name the name of the queue. + */ + public Queue(String name) { + this(name, true, false, false); + } + + // Getters and Setters omitted for brevity + +} +``` + +Notice that the constructor takes the queue name. +Depending on the implementation, the admin template may provide methods for generating a uniquely named queue. +Such queues can be useful as a “reply-to” address or in other **temporary** situations. +For that reason, the 'exclusive' and 'autoDelete' properties of an auto-generated queue would both be set to 'true'. + +| |See the section on queues in [Configuring the Broker](#broker-configuration) for information about declaring queues by using namespace support, including queue arguments.| +|---|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +##### Binding + +Given that a producer sends to an exchange and a consumer receives from a queue, the bindings that connect queues to exchanges are critical for connecting those producers and consumers via messaging. +In Spring AMQP, we define a `Binding` class to represent those connections. +This section reviews the basic options for binding queues to exchanges. + +You can bind a queue to a `DirectExchange` with a fixed routing key, as the following example shows: + +``` +new Binding(someQueue, someDirectExchange, "foo.bar"); +``` + +You can bind a queue to a `TopicExchange` with a routing pattern, as the following example shows: + +``` +new Binding(someQueue, someTopicExchange, "foo.*"); +``` + +You can bind a queue to a `FanoutExchange` with no routing key, as the following example shows: + +``` +new Binding(someQueue, someFanoutExchange); +``` + +We also provide a `BindingBuilder` to facilitate a “fluent API” style, as the following example shows: + +``` +Binding b = BindingBuilder.bind(someQueue).to(someTopicExchange).with("foo.*"); +``` + +| |For clarity, the preceding example shows the `BindingBuilder` class, but this style works well when using a static import for the 'bind()' method.| +|---|--------------------------------------------------------------------------------------------------------------------------------------------------| + +By itself, an instance of the `Binding` class only holds the data about a connection. +In other words, it is not an “active” component. +However, as you will see later in [Configuring the Broker](#broker-configuration), the `AmqpAdmin` class can use `Binding` instances to actually trigger the binding actions on the broker. +Also, as you can see in that same section, you can define the `Binding` instances by using Spring’s `@Bean` annotations within `@Configuration` classes. +There is also a convenient base class that further simplifies that approach for generating AMQP-related bean definitions and recognizes the queues, exchanges, and bindings so that they are all declared on the AMQP broker upon application startup. + +The `AmqpTemplate` is also defined within the core package. +As one of the main components involved in actual AMQP messaging, it is discussed in detail in its own section (see [`AmqpTemplate`](#amqp-template)). + +#### 4.1.2. Connection and Resource Management + +Whereas the AMQP model we described in the previous section is generic and applicable to all implementations, when we get into the management of resources, the details are specific to the broker implementation. +Therefore, in this section, we focus on code that exists only within our “spring-rabbit” module since, at this point, RabbitMQ is the only supported implementation. + +The central component for managing a connection to the RabbitMQ broker is the `ConnectionFactory` interface. +The responsibility of a `ConnectionFactory` implementation is to provide an instance of `org.springframework.amqp.rabbit.connection.Connection`, which is a wrapper for `com.rabbitmq.client.Connection`. + +##### Choosing a Connection Factory + +There are three connection factories to chose from + +* `PooledChannelConnectionFactory` + +* `ThreadChannelConnectionFactory` + +* `CachingConnectionFactory` + +The first two were added in version 2.3. + +For most use cases, the `PooledChannelConnectionFactory` should be used. +The `ThreadChannelConnectionFactory` can be used if you want to ensure strict message ordering without the need to use [Scoped Operations](#scoped-operations). +The `CachingConnectionFactory` should be used if you want to use correlated publisher confirmations or if you wish to open multiple connections, via its `CacheMode`. + +Simple publisher confirmations are supported by all three factories. + +When configuring a `RabbitTemplate` to use a [separate connection](#separate-connection), you can now, starting with version 2.3.2, configure the publishing connection factory to be a different type. +By default, the publishing factory is the same type and any properties set on the main factory are also propagated to the publishing factory. + +###### `PooledChannelConnectionFactory` + +This factory manages a single connection and two pools of channels, based on the Apache Pool2. +One pool is for transactional channels, the other is for non-transactional channels. +The pools are `GenericObjectPool` s with default configuration; a callback is provided to configure the pools; refer to the Apache documentation for more information. + +The Apache `commons-pool2` jar must be on the class path to use this factory. + +``` +@Bean +PooledChannelConnectionFactory pcf() throws Exception { + ConnectionFactory rabbitConnectionFactory = new ConnectionFactory(); + rabbitConnectionFactory.setHost("localhost"); + PooledChannelConnectionFactory pcf = new PooledChannelConnectionFactory(rabbitConnectionFactory); + pcf.setPoolConfigurer((pool, tx) -> { + if (tx) { + // configure the transactional pool + } + else { + // configure the non-transactional pool + } + }); + return pcf; +} +``` + +###### `ThreadChannelConnectionFactory` + +This factory manages a single connection and two `ThreadLocal` s, one for transactional channels, the other for non-transactional channels. +This factory ensures that all operations on the same thread use the same channel (as long as it remains open). +This facilitates strict message ordering without the need for [Scoped Operations](#scoped-operations). +To avoid memory leaks, if your application uses many short-lived threads, you must call the factory’s `closeThreadChannel()` to release the channel resource. +Starting with version 2.3.7, a thread can transfer its channel(s) to another thread. +See [Strict Message Ordering in a Multi-Threaded Environment](#multi-strict) for more information. + +###### `CachingConnectionFactory` + +The third implementation provided is the `CachingConnectionFactory`, which, by default, establishes a single connection proxy that can be shared by the application. +Sharing of the connection is possible since the “unit of work” for messaging with AMQP is actually a “channel” (in some ways, this is similar to the relationship between a connection and a session in JMS). +The connection instance provides a `createChannel` method. +The `CachingConnectionFactory` implementation supports caching of those channels, and it maintains separate caches for channels based on whether they are transactional. +When creating an instance of `CachingConnectionFactory`, you can provide the 'hostname' through the constructor. +You should also provide the 'username' and 'password' properties. +To configure the size of the channel cache (the default is 25), you can call the`setChannelCacheSize()` method. + +Starting with version 1.3, you can configure the `CachingConnectionFactory` to cache connections as well as only channels. +In this case, each call to `createConnection()` creates a new connection (or retrieves an idle one from the cache). +Closing a connection returns it to the cache (if the cache size has not been reached). +Channels created on such connections are also cached. +The use of separate connections might be useful in some environments, such as consuming from an HA cluster, in +conjunction with a load balancer, to connect to different cluster members, and others. +To cache connections, set the `cacheMode` to `CacheMode.CONNECTION`. + +| |This does not limit the number of connections.
Rather, it specifies how many idle open connections are allowed.| +|---|-------------------------------------------------------------------------------------------------------------------| + +Starting with version 1.5.5, a new property called `connectionLimit` is provided. +When this property is set, it limits the total number of connections allowed. +When set, if the limit is reached, the `channelCheckoutTimeLimit` is used to wait for a connection to become idle. +If the time is exceeded, an `AmqpTimeoutException` is thrown. + +| |When the cache mode is `CONNECTION`, automatic declaration of queues and others
(See [Automatic Declaration of Exchanges, Queues, and Bindings](#automatic-declaration)) is NOT supported.

Also, at the time of this writing, the `amqp-client` library by default creates a fixed thread pool for each connection (default size: `Runtime.getRuntime().availableProcessors() * 2` threads).
When using a large number of connections, you should consider setting a custom `executor` on the `CachingConnectionFactory`.
Then, the same executor can be used by all connections and its threads can be shared.
The executor’s thread pool should be unbounded or set appropriately for the expected use (usually, at least one thread per connection).
If multiple channels are created on each connection, the pool size affects the concurrency, so a variable (or simple cached) thread pool executor would be most suitable.| +|---|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +It is important to understand that the cache size is (by default) not a limit but is merely the number of channels that can be cached. +With a cache size of, say, 10, any number of channels can actually be in use. +If more than 10 channels are being used and they are all returned to the cache, 10 go in the cache. +The remainder are physically closed. + +Starting with version 1.6, the default channel cache size has been increased from 1 to 25. +In high volume, multi-threaded environments, a small cache means that channels are created and closed at a high rate. +Increasing the default cache size can avoid this overhead. +You should monitor the channels in use through the RabbitMQ Admin UI and consider increasing the cache size further if you +see many channels being created and closed. +The cache grows only on-demand (to suit the concurrency requirements of the application), so this change does not +impact existing low-volume applications. + +Starting with version 1.4.2, the `CachingConnectionFactory` has a property called `channelCheckoutTimeout`. +When this property is greater than zero, the `channelCacheSize` becomes a limit on the number of channels that can be created on a connection. +If the limit is reached, calling threads block until a channel is available or this timeout is reached, in which case a `AmqpTimeoutException` is thrown. + +| |Channels used within the framework (for example,`RabbitTemplate`) are reliably returned to the cache.
If you create channels outside of the framework, (for example,
by accessing the connections directly and invoking `createChannel()`), you must return them (by closing) reliably, perhaps in a `finally` block, to avoid running out of channels.| +|---|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +The following example shows how to create a new `connection`: + +``` +CachingConnectionFactory connectionFactory = new CachingConnectionFactory("somehost"); +connectionFactory.setUsername("guest"); +connectionFactory.setPassword("guest"); + +Connection connection = connectionFactory.createConnection(); +``` + +When using XML, the configuration might look like the following example: + +``` + + + + + +``` + +| |There is also a `SingleConnectionFactory` implementation that is available only in the unit test code of the framework.
It is simpler than `CachingConnectionFactory`, since it does not cache channels, but it is not intended for practical usage outside of simple tests due to its lack of performance and resilience.
If you need to implement your own `ConnectionFactory` for some reason, the `AbstractConnectionFactory` base class may provide a nice starting point.| +|---|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +A `ConnectionFactory` can be created quickly and conveniently by using the rabbit namespace, as follows: + +``` + +``` + +In most cases, this approach is preferable, since the framework can choose the best defaults for you. +The created instance is a `CachingConnectionFactory`. +Keep in mind that the default cache size for channels is 25. +If you want more channels to be cachedm, set a larger value by setting the 'channelCacheSize' property. +In XML it would look like as follows: + +``` + + + + + + +``` + +Also, with the namespace, you can add the 'channel-cache-size' attribute, as follows: + +``` + +``` + +The default cache mode is `CHANNEL`, but you can configure it to cache connections instead. +In the following example, we use `connection-cache-size`: + +``` + +``` + +You can provide host and port attributes by using the namespace, as follows: + +``` + +``` + +Alternatively, if running in a clustered environment, you can use the addresses attribute, as follows: + +``` + +``` + +See [Connecting to a Cluster](#cluster) for information about `address-shuffle-mode`. + +The following example with a custom thread factory that prefixes thread names with `rabbitmq-`: + +``` + + + + + +``` + +##### AddressResolver + +Starting with version 2.1.15, you can now use an `AddressResover` to resolve the connection address(es). +This will override any settings of the `addresses` and `host/port` properties. + +##### Naming Connections + +Starting with version 1.7, a `ConnectionNameStrategy` is provided for the injection into the `AbstractionConnectionFactory`. +The generated name is used for the application-specific identification of the target RabbitMQ connection. +The connection name is displayed in the management UI if the RabbitMQ server supports it. +This value does not have to be unique and cannot be used as a connection identifier — for example, in HTTP API requests. +This value is supposed to be human-readable and is a part of `ClientProperties` under the `connection_name` key. +You can use a simple Lambda, as follows: + +``` +connectionFactory.setConnectionNameStrategy(connectionFactory -> "MY_CONNECTION"); +``` + +The `ConnectionFactory` argument can be used to distinguish target connection names by some logic. +By default, the `beanName` of the `AbstractConnectionFactory`, a hex string representing the object, and an internal counter are used to generate the `connection_name`. +The `` namespace component is also supplied with the `connection-name-strategy` attribute. + +An implementation of `SimplePropertyValueConnectionNameStrategy` sets the connection name to an application property. +You can declare it as a `@Bean` and inject it into the connection factory, as the following example shows: + +``` +@Bean +public SimplePropertyValueConnectionNameStrategy cns() { + return new SimplePropertyValueConnectionNameStrategy("spring.application.name"); +} + +@Bean +public ConnectionFactory rabbitConnectionFactory(ConnectionNameStrategy cns) { + CachingConnectionFactory connectionFactory = new CachingConnectionFactory(); + ... + connectionFactory.setConnectionNameStrategy(cns); + return connectionFactory; +} +``` + +The property must exist in the application context’s `Environment`. + +| |When using Spring Boot and its autoconfigured connection factory, you need only declare the `ConnectionNameStrategy` `@Bean`.
Boot auto-detects the bean and wires it into the factory.| +|---|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +##### Blocked Connections and Resource Constraints + +The connection might be blocked for interaction from the broker that corresponds to the [Memory Alarm](https://www.rabbitmq.com/memory.html). +Starting with version 2.0, the `org.springframework.amqp.rabbit.connection.Connection` can be supplied with `com.rabbitmq.client.BlockedListener` instances to be notified for connection blocked and unblocked events. +In addition, the `AbstractConnectionFactory` emits a `ConnectionBlockedEvent` and `ConnectionUnblockedEvent`, respectively, through its internal `BlockedListener` implementation. +These let you provide application logic to react appropriately to problems on the broker and (for example) take some corrective actions. + +| |When the application is configured with a single `CachingConnectionFactory`, as it is by default with Spring Boot auto-configuration, the application stops working when the connection is blocked by the Broker.
And when it is blocked by the Broker, any of its clients stop to work.
If we have producers and consumers in the same application, we may end up with a deadlock when producers are blocking the connection (because there are no resources on the Broker any more) and consumers cannot free them (because the connection is blocked).
To mitigate the problem, we suggest having one more separate `CachingConnectionFactory` instance with the same options — one for producers and one for consumers.
A separate `CachingConnectionFactory` is not possible for transactional producers that execute on a consumer thread, since they should reuse the `Channel` associated with the consumer transactions.| +|---|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +Starting with version 2.0.2, the `RabbitTemplate` has a configuration option to automatically use a second connection factory, unless transactions are being used. +See [Using a Separate Connection](#separate-connection) for more information. +The `ConnectionNameStrategy` for the publisher connection is the same as the primary strategy with `.publisher` appended to the result of calling the method. + +Starting with version 1.7.7, an `AmqpResourceNotAvailableException` is provided, which is thrown when `SimpleConnection.createChannel()` cannot create a `Channel` (for example, because the `channelMax` limit is reached and there are no available channels in the cache). +You can use this exception in the `RetryPolicy` to recover the operation after some back-off. + +##### Configuring the Underlying Client Connection Factory + +The `CachingConnectionFactory` uses an instance of the Rabbit client `ConnectionFactory`. +A number of configuration properties are passed through (`host, port, userName, password, requestedHeartBeat, and connectionTimeout` for example) when setting the equivalent property on the `CachingConnectionFactory`. +To set other properties (`clientProperties`, for example), you can define an instance of the Rabbit factory and provide a reference to it by using the appropriate constructor of the `CachingConnectionFactory`. +When using the namespace ([as described earlier](#connections)), you need to provide a reference to the configured factory in the `connection-factory` attribute. +For convenience, a factory bean is provided to assist in configuring the connection factory in a Spring application context, as discussed in [the next section](#rabbitconnectionfactorybean-configuring-ssl). + +``` + +``` + +| |The 4.0.x client enables automatic recovery by default.
While compatible with this feature, Spring AMQP has its own recovery mechanisms and the client recovery feature generally is not needed.
We recommend disabling `amqp-client` automatic recovery, to avoid getting `AutoRecoverConnectionNotCurrentlyOpenException` instances when the broker is available but the connection has not yet recovered.
You may notice this exception, for example, when a `RetryTemplate` is configured in a `RabbitTemplate`, even when failing over to another broker in a cluster.
Since the auto-recovering connection recovers on a timer, the connection may be recovered more quickly by using Spring AMQP’s recovery mechanisms.
Starting with version 1.7.1, Spring AMQP disables `amqp-client` automatic recovery unless you explicitly create your own RabbitMQ connection factory and provide it to the `CachingConnectionFactory`.
RabbitMQ `ConnectionFactory` instances created by the `RabbitConnectionFactoryBean` also have the option disabled by default.| +|---|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +##### `RabbitConnectionFactoryBean` and Configuring SSL + +Starting with version 1.4, a convenient `RabbitConnectionFactoryBean` is provided to enable convenient configuration of SSL properties on the underlying client connection factory by using dependency injection. +Other setters delegate to the underlying factory. +Previously, you had to configure the SSL options programmatically. +The following example shows how to configure a `RabbitConnectionFactoryBean`: + +``` + + + + + + +``` + +See the [RabbitMQ Documentation](https://www.rabbitmq.com/ssl.html) for information about configuring SSL. +Omit the `keyStore` and `trustStore` configuration to connect over SSL without certificate validation. +The next example shows how you can provide key and trust store configuration. + +The `sslPropertiesLocation` property is a Spring `Resource` pointing to a properties file containing the following keys: + +``` +keyStore=file:/secret/keycert.p12 +trustStore=file:/secret/trustStore +keyStore.passPhrase=secret +trustStore.passPhrase=secret +``` + +The `keyStore` and `truststore` are Spring `Resources` pointing to the stores. +Typically this properties file is secured by the operating system with the application having read access. + +Starting with Spring AMQP version 1.5,you can set these properties directly on the factory bean. +If both discrete properties and `sslPropertiesLocation` is provided, properties in the latter override the +discrete values. + +| |Starting with version 2.0, the server certificate is validated by default because it is more secure.
If you wish to skip this validation for some reason, set the factory bean’s `skipServerCertificateValidation` property to `true`.
Starting with version 2.1, the `RabbitConnectionFactoryBean` now calls `enableHostnameVerification()` by default.
To revert to the previous behavior, set the `enableHostnameVerification` property to `false`.| +|---|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +| |Starting with version 2.2.5, the factory bean will always use TLS v1.2 by default; previously, it used v1.1 in some cases and v1.2 in others (depending on other properties).
If you need to use v1.1 for some reason, set the `sslAlgorithm` property: `setSslAlgorithm("TLSv1.1")`.| +|---|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +##### Connecting to a Cluster + +To connect to a cluster, configure the `addresses` property on the `CachingConnectionFactory`: + +``` +@Bean +public CachingConnectionFactory ccf() { + CachingConnectionFactory ccf = new CachingConnectionFactory(); + ccf.setAddresses("host1:5672,host2:5672,host3:5672"); + return ccf; +} +``` + +The underlying connection factory will attempt to connect to each host, in order, whenever a new connection is established. +Starting with version 2.1.8, the connection order can be made random by setting the `addressShuffleMode` property to `RANDOM`; the shuffle will be applied before creating any new connection. +Starting with version 2.6, the `INORDER` shuffle mode was added, which means the first address is moved to the end after a connection is created. +You may wish to use this mode with the [RabbitMQ Sharding Plugin](https://github.com/rabbitmq/rabbitmq-sharding) with `CacheMode.CONNECTION` and suitable concurrency if you wish to consume from all shards on all nodes. + +``` +@Bean +public CachingConnectionFactory ccf() { + CachingConnectionFactory ccf = new CachingConnectionFactory(); + ccf.setAddresses("host1:5672,host2:5672,host3:5672"); + ccf.setAddressShuffleMode(AddressShuffleMode.RANDOM); + return ccf; +} +``` + +##### Routing Connection Factory + +Starting with version 1.3, the `AbstractRoutingConnectionFactory` has been introduced. +This factory provides a mechanism to configure mappings for several `ConnectionFactories` and determine a target `ConnectionFactory` by some `lookupKey` at runtime. +Typically, the implementation checks a thread-bound context. +For convenience, Spring AMQP provides the `SimpleRoutingConnectionFactory`, which gets the current thread-bound `lookupKey` from the `SimpleResourceHolder`. +The following examples shows how to configure a `SimpleRoutingConnectionFactory` in both XML and Java: + +``` + + + + + + + + + + +``` + +``` +public class MyService { + + @Autowired + private RabbitTemplate rabbitTemplate; + + public void service(String vHost, String payload) { + SimpleResourceHolder.bind(rabbitTemplate.getConnectionFactory(), vHost); + rabbitTemplate.convertAndSend(payload); + SimpleResourceHolder.unbind(rabbitTemplate.getConnectionFactory()); + } + +} +``` + +It is important to unbind the resource after use. +For more information, see the [JavaDoc](https://docs.spring.io/spring-amqp/docs/latest-ga/api/org/springframework/amqp/rabbit/connection/AbstractRoutingConnectionFactory.html) for `AbstractRoutingConnectionFactory`. + +Starting with version 1.4, `RabbitTemplate` supports the SpEL `sendConnectionFactorySelectorExpression` and `receiveConnectionFactorySelectorExpression` properties, which are evaluated on each AMQP protocol interaction operation (`send`, `sendAndReceive`, `receive`, or `receiveAndReply`), resolving to a `lookupKey` value for the provided `AbstractRoutingConnectionFactory`. +You can use bean references, such as `@vHostResolver.getVHost(#root)` in the expression. +For `send` operations, the message to be sent is the root evaluation object. +For `receive` operations, the `queueName` is the root evaluation object. + +The routing algorithm is as follows: If the selector expression is `null` or is evaluated to `null` or the provided `ConnectionFactory` is not an instance of `AbstractRoutingConnectionFactory`, everything works as before, relying on the provided `ConnectionFactory` implementation. +The same occurs if the evaluation result is not `null`, but there is no target `ConnectionFactory` for that `lookupKey` and the `AbstractRoutingConnectionFactory` is configured with `lenientFallback = true`. +In the case of an `AbstractRoutingConnectionFactory`, it does fallback to its `routing` implementation based on `determineCurrentLookupKey()`. +However, if `lenientFallback = false`, an `IllegalStateException` is thrown. + +The namespace support also provides the `send-connection-factory-selector-expression` and `receive-connection-factory-selector-expression` attributes on the `` component. + +Also, starting with version 1.4, you can configure a routing connection factory in a listener container. +In that case, the list of queue names is used as the lookup key. +For example, if you configure the container with `setQueueNames("thing1", "thing2")`, the lookup key is `[thing1,thing]"` (note that there is no space in the key). + +Starting with version 1.6.9, you can add a qualifier to the lookup key by using `setLookupKeyQualifier` on the listener container. +Doing so enables, for example, listening to queues with the same name but in a different virtual host (where you would have a connection factory for each). + +For example, with lookup key qualifier `thing1` and a container listening to queue `thing2`, the lookup key you could register the target connection factory with could be `thing1[thing2]`. + +| |The target (and default, if provided) connection factories must have the same settings for publisher confirms and returns.
See [Publisher Confirms and Returns](#cf-pub-conf-ret).| +|---|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +##### Queue Affinity and the `LocalizedQueueConnectionFactory` + +When using HA queues in a cluster, for the best performance, you may want to connect to the physical broker +where the lead queue resides. +The `CachingConnectionFactory` can be configured with multiple broker addresses. +This is to fail over and the client attempts to connect in order. +The `LocalizedQueueConnectionFactory` uses the REST API provided by the management plugin to determine which node is the lead for the queue. +It then creates (or retrieves from a cache) a `CachingConnectionFactory` that connects to just that node. +If the connection fails, the new lead node is determined and the consumer connects to it. +The `LocalizedQueueConnectionFactory` is configured with a default connection factory, in case the physical location of the queue cannot be determined, in which case it connects as normal to the cluster. + +The `LocalizedQueueConnectionFactory` is a `RoutingConnectionFactory` and the `SimpleMessageListenerContainer` uses the queue names as the lookup key as discussed in [Routing Connection Factory](#routing-connection-factory) above. + +| |For this reason (the use of the queue name for the lookup), the `LocalizedQueueConnectionFactory` can only be used if the container is configured to listen to a single queue.| +|---|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +| |The RabbitMQ management plugin must be enabled on each node.| +|---|------------------------------------------------------------| + +| |This connection factory is intended for long-lived connections, such as those used by the `SimpleMessageListenerContainer`.
It is not intended for short connection use, such as with a `RabbitTemplate` because of the overhead of invoking the REST API before making the connection.
Also, for publish operations, the queue is unknown, and the message is published to all cluster members anyway, so the logic of looking up the node has little value.| +|---|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +The following example configuration shows how to configure the factories: + +``` +@Autowired +private ConfigurationProperties props; + +@Bean +public CachingConnectionFactory defaultConnectionFactory() { + CachingConnectionFactory cf = new CachingConnectionFactory(); + cf.setAddresses(this.props.getAddresses()); + cf.setUsername(this.props.getUsername()); + cf.setPassword(this.props.getPassword()); + cf.setVirtualHost(this.props.getVirtualHost()); + return cf; +} + +@Bean +public LocalizedQueueConnectionFactory queueAffinityCF( + @Qualifier("defaultConnectionFactory") ConnectionFactory defaultCF) { + return new LocalizedQueueConnectionFactory(defaultCF, + StringUtils.commaDelimitedListToStringArray(this.props.getAddresses()), + StringUtils.commaDelimitedListToStringArray(this.props.getAdminUris()), + StringUtils.commaDelimitedListToStringArray(this.props.getNodes()), + this.props.getVirtualHost(), this.props.getUsername(), this.props.getPassword(), + false, null); +} +``` + +Notice that the first three parameters are arrays of `addresses`, `adminUris`, and `nodes`. +These are positional in that, when a container attempts to connect to a queue, it uses the admin API to determine which node is the lead for the queue and connects to the address in the same array position as that node. + +##### Publisher Confirms and Returns + +Confirmed (with correlation) and returned messages are supported by setting the `CachingConnectionFactory` property `publisherConfirmType` to `ConfirmType.CORRELATED` and the `publisherReturns` property to 'true'. + +When these options are set, `Channel` instances created by the factory are wrapped in an `PublisherCallbackChannel`, which is used to facilitate the callbacks. +When such a channel is obtained, the client can register a `PublisherCallbackChannel.Listener` with the `Channel`. +The `PublisherCallbackChannel` implementation contains logic to route a confirm or return to the appropriate listener. +These features are explained further in the following sections. + +See also `simplePublisherConfirms` in [Scoped Operations](#scoped-operations). + +| |For some more background information, see the blog post by the RabbitMQ team titled [Introducing Publisher Confirms](https://www.rabbitmq.com/blog/2011/02/10/introducing-publisher-confirms/).| +|---|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +##### Connection and Channel Listeners + +The connection factory supports registering `ConnectionListener` and `ChannelListener` implementations. +This allows you to receive notifications for connection and channel related events. +(A `ConnectionListener` is used by the `RabbitAdmin` to perform declarations when the connection is established - see [Automatic Declaration of Exchanges, Queues, and Bindings](#automatic-declaration) for more information). +The following listing shows the `ConnectionListener` interface definition: + +``` +@FunctionalInterface +public interface ConnectionListener { + + void onCreate(Connection connection); + + default void onClose(Connection connection) { + } + + default void onShutDown(ShutdownSignalException signal) { + } + +} +``` + +Starting with version 2.0, the `org.springframework.amqp.rabbit.connection.Connection` object can be supplied with `com.rabbitmq.client.BlockedListener` instances to be notified for connection blocked and unblocked events. +The following example shows the ChannelListener interface definition: + +``` +@FunctionalInterface +public interface ChannelListener { + + void onCreate(Channel channel, boolean transactional); + + default void onShutDown(ShutdownSignalException signal) { + } + +} +``` + +See [Publishing is Asynchronous — How to Detect Successes and Failures](#publishing-is-async) for one scenario where you might want to register a `ChannelListener`. + +##### Logging Channel Close Events + +Version 1.5 introduced a mechanism to enable users to control logging levels. + +The `CachingConnectionFactory` uses a default strategy to log channel closures as follows: + +* Normal channel closes (200 OK) are not logged. + +* If a channel is closed due to a failed passive queue declaration, it is logged at debug level. + +* If a channel is closed because the `basic.consume` is refused due to an exclusive consumer condition, it is logged at + INFO level. + +* All others are logged at ERROR level. + +To modify this behavior, you can inject a custom `ConditionalExceptionLogger` into the`CachingConnectionFactory` in its `closeExceptionLogger` property. + +See also [Consumer Events](#consumer-events). + +##### Runtime Cache Properties + +Staring with version 1.6, the `CachingConnectionFactory` now provides cache statistics through the `getCacheProperties()`method. +These statistics can be used to tune the cache to optimize it in production. +For example, the high water marks can be used to determine whether the cache size should be increased. +If it equals the cache size, you might want to consider increasing further. +The following table describes the `CacheMode.CHANNEL` properties: + +| Property | Meaning | +|------------------------------------------|-------------------------------------------------------------------------------------------------------------------------------------------| +| ```
connectionName
``` | The name of the connection generated by the `ConnectionNameStrategy`. | +| ```
channelCacheSize
``` | The currently configured maximum channels that are allowed to be idle. | +| ```
localPort
``` |The local port for the connection (if available).
This can be used to correlate with connections and channels on the RabbitMQ Admin UI.| +| ```
idleChannelsTx
``` | The number of transactional channels that are currently idle (cached). | +| ```
idleChannelsNotTx
``` | The number of non-transactional channels that are currently idle (cached). | +| ```
idleChannelsTxHighWater
``` | The maximum number of transactional channels that have been concurrently idle (cached). | +|```
idleChannelsNotTxHighWater
```| The maximum number of non-transactional channels have been concurrently idle (cached). | + +The following table describes the `CacheMode.CONNECTION` properties: + +| Property | Meaning | +|------------------------------------------------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| ```
connectionName:
``` | The name of the connection generated by the `ConnectionNameStrategy`. | +| ```
openConnections
``` | The number of connection objects representing connections to brokers. | +| ```
channelCacheSize
``` | The currently configured maximum channels that are allowed to be idle. | +| ```
connectionCacheSize
``` | The currently configured maximum connections that are allowed to be idle. | +| ```
idleConnections
``` | The number of connections that are currently idle. | +| ```
idleConnectionsHighWater
``` | The maximum number of connections that have been concurrently idle. | +| ```
idleChannelsTx:
``` | The number of transactional channels that are currently idle (cached) for this connection.
You can use the `localPort` part of the property name to correlate with connections and channels on the RabbitMQ Admin UI. | +| ```
idleChannelsNotTx:
``` |The number of non-transactional channels that are currently idle (cached) for this connection.
The `localPort` part of the property name can be used to correlate with connections and channels on the RabbitMQ Admin UI.| +| ```
idleChannelsTxHighWater:
``` | The maximum number of transactional channels that have been concurrently idle (cached).
The localPort part of the property name can be used to correlate with connections and channels on the RabbitMQ Admin UI. | +|```
idleChannelsNotTxHighWater:
```| The maximum number of non-transactional channels have been concurrently idle (cached).
You can use the `localPort` part of the property name to correlate with connections and channels on the RabbitMQ Admin UI. | + +The `cacheMode` property (`CHANNEL` or `CONNECTION`) is also included. + +![cacheStats](https://docs.spring.io/spring-amqp/docs/current/reference/html/images/cacheStats.png) + +Figure 1. JVisualVM Example + +##### RabbitMQ Automatic Connection/Topology recovery + +Since the first version of Spring AMQP, the framework has provided its own connection and channel recovery in the event of a broker failure. +Also, as discussed in [Configuring the Broker](#broker-configuration), the `RabbitAdmin` re-declares any infrastructure beans (queues and others) when the connection is re-established. +It therefore does not rely on the [auto-recovery](https://www.rabbitmq.com/api-guide.html#recovery) that is now provided by the `amqp-client` library. +Spring AMQP now uses the `4.0.x` version of `amqp-client`, which has auto recovery enabled by default. +Spring AMQP can still use its own recovery mechanisms if you wish, disabling it in the client, (by setting the `automaticRecoveryEnabled` property on the underlying `RabbitMQ connectionFactory` to `false`). +However, the framework is completely compatible with auto-recovery being enabled. +This means any consumers you create within your code (perhaps via `RabbitTemplate.execute()`) can be recovered automatically. + +| |Only elements (queues, exchanges, bindings) that are defined as beans will be re-declared after a connection failure.
Elements declared by invoking `RabbitAdmin.declare*()` methods directly from user code are unknown to the framework and therefore cannot be recovered.
If you have a need for a variable number of declarations, consider defining a bean, or beans, of type `Declarables`, as discussed in [Declaring Collections of Exchanges, Queues, and Bindings](#collection-declaration).| +|---|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +#### 4.1.3. Adding Custom Client Connection Properties + +The `CachingConnectionFactory` now lets you access the underlying connection factory to allow, for example, +setting custom client properties. +The following example shows how to do so: + +``` +connectionFactory.getRabbitConnectionFactory().getClientProperties().put("thing1", "thing2"); +``` + +These properties appear in the RabbitMQ Admin UI when viewing the connection. + +#### 4.1.4. `AmqpTemplate` + +As with many other high-level abstractions provided by the Spring Framework and related projects, Spring AMQP provides a “template” that plays a central role. +The interface that defines the main operations is called `AmqpTemplate`. +Those operations cover the general behavior for sending and receiving messages. +In other words, they are not unique to any implementation — hence the “AMQP” in the name. +On the other hand, there are implementations of that interface that are tied to implementations of the AMQP protocol. +Unlike JMS, which is an interface-level API itself, AMQP is a wire-level protocol. +The implementations of that protocol provide their own client libraries, so each implementation of the template interface depends on a particular client library. +Currently, there is only a single implementation: `RabbitTemplate`. +In the examples that follow, we often use an `AmqpTemplate`. +However, when you look at the configuration examples or any code excerpts where the template is instantiated or setters are invoked, you can see the implementation type (for example, `RabbitTemplate`). + +As mentioned earlier, the `AmqpTemplate` interface defines all of the basic operations for sending and receiving messages. +We will explore message sending and reception, respectively, in [Sending Messages](#sending-messages) and [Receiving Messages](#receiving-messages). + +See also [Async Rabbit Template](#async-template). + +##### Adding Retry Capabilities + +Starting with version 1.3, you can now configure the `RabbitTemplate` to use a `RetryTemplate` to help with handling problems with broker connectivity. +See the [spring-retry](https://github.com/spring-projects/spring-retry) project for complete information. +The following is only one example that uses an exponential back off policy and the default `SimpleRetryPolicy`, which makes three tries before throwing the exception to the caller. + +The following example uses the XML namespace: + +``` + + + + + + + + + + + +``` + +The following example uses the `@Configuration` annotation in Java: + +``` +@Bean +public RabbitTemplate rabbitTemplate() { + RabbitTemplate template = new RabbitTemplate(connectionFactory()); + RetryTemplate retryTemplate = new RetryTemplate(); + ExponentialBackOffPolicy backOffPolicy = new ExponentialBackOffPolicy(); + backOffPolicy.setInitialInterval(500); + backOffPolicy.setMultiplier(10.0); + backOffPolicy.setMaxInterval(10000); + retryTemplate.setBackOffPolicy(backOffPolicy); + template.setRetryTemplate(retryTemplate); + return template; +} +``` + +Starting with version 1.4, in addition to the `retryTemplate` property, the `recoveryCallback` option is supported on the `RabbitTemplate`. +It is used as a second argument for the `RetryTemplate.execute(RetryCallback retryCallback, RecoveryCallback recoveryCallback)`. + +| |The `RecoveryCallback` is somewhat limited, in that the retry context contains only the `lastThrowable` field.
For more sophisticated use cases, you should use an external `RetryTemplate` so that you can convey additional information to the `RecoveryCallback` through the context’s attributes.
The following example shows how to do so:| +|---|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +``` +retryTemplate.execute( + new RetryCallback() { + + @Override + public Object doWithRetry(RetryContext context) throws Exception { + context.setAttribute("message", message); + return rabbitTemplate.convertAndSend(exchange, routingKey, message); + } + + }, new RecoveryCallback() { + + @Override + public Object recover(RetryContext context) throws Exception { + Object message = context.getAttribute("message"); + Throwable t = context.getLastThrowable(); + // Do something with message + return null; + } + }); +} +``` + +In this case, you would **not** inject a `RetryTemplate` into the `RabbitTemplate`. + +##### Publishing is Asynchronous — How to Detect Successes and Failures + +Publishing messages is an asynchronous mechanism and, by default, messages that cannot be routed are dropped by RabbitMQ. +For successful publishing, you can receive an asynchronous confirm, as described in [Correlated Publisher Confirms and Returns](#template-confirms). +Consider two failure scenarios: + +* Publish to an exchange but there is no matching destination queue. + +* Publish to a non-existent exchange. + +The first case is covered by publisher returns, as described in [Correlated Publisher Confirms and Returns](#template-confirms). + +For the second case, the message is dropped and no return is generated. +The underlying channel is closed with an exception. +By default, this exception is logged, but you can register a `ChannelListener` with the `CachingConnectionFactory` to obtain notifications of such events. +The following example shows how to add a `ConnectionListener`: + +``` +this.connectionFactory.addConnectionListener(new ConnectionListener() { + + @Override + public void onCreate(Connection connection) { + } + + @Override + public void onShutDown(ShutdownSignalException signal) { + ... + } + +}); +``` + +You can examine the signal’s `reason` property to determine the problem that occurred. + +To detect the exception on the sending thread, you can `setChannelTransacted(true)` on the `RabbitTemplate` and the exception is detected on the `txCommit()`. +However, **transactions significantly impede performance**, so consider this carefully before enabling transactions for just this one use case. + +##### Correlated Publisher Confirms and Returns + +The `RabbitTemplate` implementation of `AmqpTemplate` supports publisher confirms and returns. + +For returned messages, the template’s `mandatory` property must be set to `true` or the `mandatory-expression`must evaluate to `true` for a particular message. +This feature requires a `CachingConnectionFactory` that has its `publisherReturns` property set to `true` (see [Publisher Confirms and Returns](#cf-pub-conf-ret)). +Returns are sent to the client by it registering a `RabbitTemplate.ReturnsCallback` by calling `setReturnsCallback(ReturnsCallback callback)`. +The callback must implement the following method: + +``` +void returnedMessage(ReturnedMessage returned); +``` + +The `ReturnedMessage` has the following properties: + +* `message` - the returned message itself + +* `replyCode` - a code indicating the reason for the return + +* `replyText` - a textual reason for the return - e.g. `NO_ROUTE` + +* `exchange` - the exchange to which the message was sent + +* `routingKey` - the routing key that was used + +Only one `ReturnsCallback` is supported by each `RabbitTemplate`. +See also [Reply Timeout](#reply-timeout). + +For publisher confirms (also known as publisher acknowledgements), the template requires a `CachingConnectionFactory` that has its `publisherConfirm` property set to `ConfirmType.CORRELATED`. +Confirms are sent to the client by it registering a `RabbitTemplate.ConfirmCallback` by calling `setConfirmCallback(ConfirmCallback callback)`. +The callback must implement this method: + +``` +void confirm(CorrelationData correlationData, boolean ack, String cause); +``` + +The `CorrelationData` is an object supplied by the client when sending the original message. +The `ack` is true for an `ack` and false for a `nack`. +For `nack` instances, the cause may contain a reason for the `nack`, if it is available when the `nack` is generated. +An example is when sending a message to a non-existent exchange. +In that case, the broker closes the channel. +The reason for the closure is included in the `cause`. +The `cause` was added in version 1.4. + +Only one `ConfirmCallback` is supported by a `RabbitTemplate`. + +| |When a rabbit template send operation completes, the channel is closed.
This precludes the reception of confirms or returns when the connection factory cache is full (when there is space in the cache, the channel is not physically closed and the returns and confirms proceed normally).
When the cache is full, the framework defers the close for up to five seconds, in order to allow time for the confirms and returns to be received.
When using confirms, the channel is closed when the last confirm is received.
When using only returns, the channel remains open for the full five seconds.
We generally recommend setting the connection factory’s `channelCacheSize` to a large enough value so that the channel on which a message is published is returned to the cache instead of being closed.
You can monitor channel usage by using the RabbitMQ management plugin.
If you see channels being opened and closed rapidly, you should consider increasing the cache size to reduce overhead on the server.| +|---|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +| |Before version 2.1, channels enabled for publisher confirms were returned to the cache before the confirms were received.
Some other process could check out the channel and perform some operation that causes the channel to close — such as publishing a message to a non-existent exchange.
This could cause the confirm to be lost.
Version 2.1 and later no longer return the channel to the cache while confirms are outstanding.
The `RabbitTemplate` performs a logical `close()` on the channel after each operation.
In general, this means that only one confirm is outstanding on a channel at a time.| +|---|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +| |Starting with version 2.2, the callbacks are invoked on one of the connection factory’s `executor` threads.
This is to avoid a potential deadlock if you perform Rabbit operations from within the callback.
With previous versions, the callbacks were invoked directly on the `amqp-client` connection I/O thread; this would deadlock if you perform some RPC operation (such as opening a new channel) since the I/O thread blocks waiting for the result, but the result needs to be processed by the I/O thread itself.
With those versions, it was necessary to hand off work (such as sending a messasge) to another thread within the callback.
This is no longer necessary since the framework now hands off the callback invocation to the executor.| +|---|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +| |The guarantee of receiving a returned message before the ack is still maintained as long as the return callback executes in 60 seconds or less.
The confirm is scheduled to be delivered after the return callback exits or after 60 seconds, whichever comes first.| +|---|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +Starting with version 2.1, the `CorrelationData` object has a `ListenableFuture` that you can use to get the result, instead of using a `ConfirmCallback` on the template. +The following example shows how to configure a `CorrelationData` instance: + +``` +CorrelationData cd1 = new CorrelationData(); +this.templateWithConfirmsEnabled.convertAndSend("exchange", queue.getName(), "foo", cd1); +assertTrue(cd1.getFuture().get(10, TimeUnit.SECONDS).isAck()); +``` + +Since it is a `ListenableFuture`, you can either `get()` the result when ready or add listeners for an asynchronous callback. +The `Confirm` object is a simple bean with 2 properties: `ack` and `reason` (for `nack` instances). +The reason is not populated for broker-generated `nack` instances. +It is populated for `nack` instances generated by the framework (for example, closing the connection while `ack` instances are outstanding). + +In addition, when both confirms and returns are enabled, the `CorrelationData` is populated with the returned message, as long as the `CorrelationData` has a unique `id`; this is always the case, by default, starting with version 2.3. +It is guaranteed that the returned message is set before the future is set with the `ack`. + +See also [Scoped Operations](#scoped-operations) for a simpler mechanism for waiting for publisher confirms. + +##### Scoped Operations + +Normally, when using the template, a `Channel` is checked out of the cache (or created), used for the operation, and returned to the cache for reuse. +In a multi-threaded environment, there is no guarantee that the next operation uses the same channel. +There may be times, however, where you want to have more control over the use of a channel and ensure that a number of operations are all performed on the same channel. + +Starting with version 2.0, a new method called `invoke` is provided, with an `OperationsCallback`. +Any operations performed within the scope of the callback and on the provided `RabbitOperations` argument use the same dedicated `Channel`, which will be closed at the end (not returned to a cache). +If the channel is a `PublisherCallbackChannel`, it is returned to the cache after all confirms have been received (see [Correlated Publisher Confirms and Returns](#template-confirms)). + +``` +@FunctionalInterface +public interface OperationsCallback { + + T doInRabbit(RabbitOperations operations); + +} +``` + +One example of why you might need this is if you wish to use the `waitForConfirms()` method on the underlying `Channel`. +This method was not previously exposed by the Spring API because the channel is, generally, cached and shared, as discussed earlier. +The `RabbitTemplate` now provides `waitForConfirms(long timeout)` and `waitForConfirmsOrDie(long timeout)`, which delegate to the dedicated channel used within the scope of the `OperationsCallback`. +The methods cannot be used outside of that scope, for obvious reasons. + +Note that a higher-level abstraction that lets you correlate confirms to requests is provided elsewhere (see [Correlated Publisher Confirms and Returns](#template-confirms)). +If you want only to wait until the broker has confirmed delivery, you can use the technique shown in the following example: + +``` +Collection messages = getMessagesToSend(); +Boolean result = this.template.invoke(t -> { + messages.forEach(m -> t.convertAndSend(ROUTE, m)); + t.waitForConfirmsOrDie(10_000); + return true; +}); +``` + +If you wish `RabbitAdmin` operations to be invoked on the same channel within the scope of the `OperationsCallback`, the admin must have been constructed by using the same `RabbitTemplate` that was used for the `invoke` operation. + +| |The preceding discussion is moot if the template operations are already performed within the scope of an existing transaction — for example, when running on a transacted listener container thread and performing operations on a transacted template.
In that case, the operations are performed on that channel and committed when the thread returns to the container.
It is not necessary to use `invoke` in that scenario.| +|---|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +When using confirms in this way, much of the infrastructure set up for correlating confirms to requests is not really needed (unless returns are also enabled). +Starting with version 2.2, the connection factory supports a new property called `publisherConfirmType`. +When this is set to `ConfirmType.SIMPLE`, the infrastructure is avoided and the confirm processing can be more efficient. + +Furthermore, the `RabbitTemplate` sets the `publisherSequenceNumber` property in the sent message `MessageProperties`. +If you wish to check (or log or otherwise use) specific confirms, you can do so with an overloaded `invoke` method, as the following example shows: + +``` +public T invoke(OperationsCallback action, com.rabbitmq.client.ConfirmCallback acks, + com.rabbitmq.client.ConfirmCallback nacks); +``` + +| |These `ConfirmCallback` objects (for `ack` and `nack` instances) are the Rabbit client callbacks, not the template callback.| +|---|----------------------------------------------------------------------------------------------------------------------------| + +The following example logs `ack` and `nack` instances: + +``` +Collection messages = getMessagesToSend(); +Boolean result = this.template.invoke(t -> { + messages.forEach(m -> t.convertAndSend(ROUTE, m)); + t.waitForConfirmsOrDie(10_000); + return true; +}, (tag, multiple) -> { + log.info("Ack: " + tag + ":" + multiple); +}, (tag, multiple) -> { + log.info("Nack: " + tag + ":" + multiple); +})); +``` + +| |Scoped operations are bound to a thread.
See [Strict Message Ordering in a Multi-Threaded Environment](#multi-strict) for a discussion about strict ordering in a multi-threaded environment.| +|---|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +##### Strict Message Ordering in a Multi-Threaded Environment + +The discussion in [Scoped Operations](#scoped-operations) applies only when the operations are performed on the same thread. + +Consider the following situation: + +* `thread-1` sends a message to a queue and hands off work to `thread-2` + +* `thread-2` sends a message to the same queue + +Because of the async nature of RabbitMQ and the use of cached channels; it is not certain that the same channel will be used and therefore the order in which the messages arrive in the queue is not guaranteed. +(In most cases they will arrive in order, but the probability of out-of-order delivery is not zero). +To solve this use case, you can use a bounded channel cache with size `1` (together with a `channelCheckoutTimeout`) to ensure the messages are always published on the same channel, and order will be guaranteed. +To do this, if you have other uses for the connection factory, such as consumers, you should either use a dedicated connection factory for the template, or configure the template to use the publisher connection factory embedded in the main connection factory (see [Using a Separate Connection](#separate-connection)). + +This is best illustrated with a simple Spring Boot Application: + +``` +@SpringBootApplication +public class Application { + + private static final Logger log = LoggerFactory.getLogger(Application.class); + + public static void main(String[] args) { + SpringApplication.run(Application.class, args); + } + + @Bean + TaskExecutor exec() { + ThreadPoolTaskExecutor exec = new ThreadPoolTaskExecutor(); + exec.setCorePoolSize(10); + return exec; + } + + @Bean + CachingConnectionFactory ccf() { + CachingConnectionFactory ccf = new CachingConnectionFactory("localhost"); + CachingConnectionFactory publisherCF = (CachingConnectionFactory) ccf.getPublisherConnectionFactory(); + publisherCF.setChannelCacheSize(1); + publisherCF.setChannelCheckoutTimeout(1000L); + return ccf; + } + + @RabbitListener(queues = "queue") + void listen(String in) { + log.info(in); + } + + @Bean + Queue queue() { + return new Queue("queue"); + } + + @Bean + public ApplicationRunner runner(Service service, TaskExecutor exec) { + return args -> { + exec.execute(() -> service.mainService("test")); + }; + } + +} + +@Component +class Service { + + private static final Logger LOG = LoggerFactory.getLogger(Service.class); + + private final RabbitTemplate template; + + private final TaskExecutor exec; + + Service(RabbitTemplate template, TaskExecutor exec) { + template.setUsePublisherConnection(true); + this.template = template; + this.exec = exec; + } + + void mainService(String toSend) { + LOG.info("Publishing from main service"); + this.template.convertAndSend("queue", toSend); + this.exec.execute(() -> secondaryService(toSend.toUpperCase())); + } + + void secondaryService(String toSend) { + LOG.info("Publishing from secondary service"); + this.template.convertAndSend("queue", toSend); + } + +} +``` + +Even though the publishing is performed on two different threads, they will both use the same channel because the cache is capped at a single channel. + +Starting with version 2.3.7, the `ThreadChannelConnectionFactory` supports transferring a thread’s channel(s) to another thread, using the `prepareContextSwitch` and `switchContext` methods. +The first method returns a context which is passed to the second thread which calls the second method. +A thread can have either a non-transactional channel or a transactional channel (or one of each) bound to it; you cannot transfer them individually, unless you use two connection factories. +An example follows: + +``` +@SpringBootApplication +public class Application { + + private static final Logger log = LoggerFactory.getLogger(Application.class); + + public static void main(String[] args) { + SpringApplication.run(Application.class, args); + } + + @Bean + TaskExecutor exec() { + ThreadPoolTaskExecutor exec = new ThreadPoolTaskExecutor(); + exec.setCorePoolSize(10); + return exec; + } + + @Bean + ThreadChannelConnectionFactory tccf() { + ConnectionFactory rabbitConnectionFactory = new ConnectionFactory(); + rabbitConnectionFactory.setHost("localhost"); + return new ThreadChannelConnectionFactory(rabbitConnectionFactory); + } + + @RabbitListener(queues = "queue") + void listen(String in) { + log.info(in); + } + + @Bean + Queue queue() { + return new Queue("queue"); + } + + @Bean + public ApplicationRunner runner(Service service, TaskExecutor exec) { + return args -> { + exec.execute(() -> service.mainService("test")); + }; + } + +} + +@Component +class Service { + + private static final Logger LOG = LoggerFactory.getLogger(Service.class); + + private final RabbitTemplate template; + + private final TaskExecutor exec; + + private final ThreadChannelConnectionFactory connFactory; + + Service(RabbitTemplate template, TaskExecutor exec, + ThreadChannelConnectionFactory tccf) { + + this.template = template; + this.exec = exec; + this.connFactory = tccf; + } + + void mainService(String toSend) { + LOG.info("Publishing from main service"); + this.template.convertAndSend("queue", toSend); + Object context = this.connFactory.prepareSwitchContext(); + this.exec.execute(() -> secondaryService(toSend.toUpperCase(), context)); + } + + void secondaryService(String toSend, Object threadContext) { + LOG.info("Publishing from secondary service"); + this.connFactory.switchContext(threadContext); + this.template.convertAndSend("queue", toSend); + this.connFactory.closeThreadChannel(); + } + +} +``` + +| |Once the `prepareSwitchContext` is called, if the current thread performs any more operations, they will be performed on a new channel.
It is important to close the thread-bound channel when it is no longer needed.| +|---|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +##### Messaging Integration + +Starting with version 1.4, `RabbitMessagingTemplate` (built on top of `RabbitTemplate`) provides an integration with the Spring Framework messaging abstraction — that is,`org.springframework.messaging.Message`. +This lets you send and receive messages by using the `spring-messaging` `Message` abstraction. +This abstraction is used by other Spring projects, such as Spring Integration and Spring’s STOMP support. +There are two message converters involved: one to convert between a spring-messaging `Message` and Spring AMQP’s `Message` abstraction and one to convert between Spring AMQP’s `Message` abstraction and the format required by the underlying RabbitMQ client library. +By default, the message payload is converted by the provided `RabbitTemplate` instance’s message converter. +Alternatively, you can inject a custom `MessagingMessageConverter` with some other payload converter, as the following example shows: + +``` +MessagingMessageConverter amqpMessageConverter = new MessagingMessageConverter(); +amqpMessageConverter.setPayloadConverter(myPayloadConverter); +rabbitMessagingTemplate.setAmqpMessageConverter(amqpMessageConverter); +``` + +##### Validated User Id + +Starting with version 1.6, the template now supports a `user-id-expression` (`userIdExpression` when using Java configuration). +If a message is sent, the user id property is set (if not already set) after evaluating this expression. +The root object for the evaluation is the message to be sent. + +The following examples show how to use the `user-id-expression` attribute: + +``` + + + +``` + +The first example is a literal expression. +The second obtains the `username` property from a connection factory bean in the application context. + +##### Using a Separate Connection + +Starting with version 2.0.2, you can set the `usePublisherConnection` property to `true` to use a different connection to that used by listener containers, when possible. +This is to avoid consumers being blocked when a producer is blocked for any reason. +The connection factories maintain a second internal connection factory for this purpose; by default it is the same type as the main factory, but can be set explicity if you wish to use a different factory type for publishing. +If the rabbit template is running in a transaction started by the listener container, the container’s channel is used, regardless of this setting. + +| |In general, you should not use a `RabbitAdmin` with a template that has this set to `true`.
Use the `RabbitAdmin` constructor that takes a connection factory.
If you use the other constructor that takes a template, ensure the template’s property is `false`.
This is because, often, an admin is used to declare queues for listener containers.
Using a template that has the property set to `true` would mean that exclusive queues (such as `AnonymousQueue`) would be declared on a different connection to that used by listener containers.
In that case, the queues cannot be used by the containers.| +|---|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +#### 4.1.5. Sending Messages + +When sending a message, you can use any of the following methods: + +``` +void send(Message message) throws AmqpException; + +void send(String routingKey, Message message) throws AmqpException; + +void send(String exchange, String routingKey, Message message) throws AmqpException; +``` + +We can begin our discussion with the last method in the preceding listing, since it is actually the most explicit. +It lets an AMQP exchange name (along with a routing key)be provided at runtime. +The last parameter is the callback that is responsible for actual creating the message instance. +An example of using this method to send a message might look like this: +The following example shows how to use the `send` method to send a message: + +``` +amqpTemplate.send("marketData.topic", "quotes.nasdaq.THING1", + new Message("12.34".getBytes(), someProperties)); +``` + +You can set the `exchange` property on the template itself if you plan to use that template instance to send to the same exchange most or all of the time. +In such cases, you can use the second method in the preceding listing. +The following example is functionally equivalent to the previous example: + +``` +amqpTemplate.setExchange("marketData.topic"); +amqpTemplate.send("quotes.nasdaq.FOO", new Message("12.34".getBytes(), someProperties)); +``` + +If both the `exchange` and `routingKey` properties are set on the template, you can use the method that accepts only the `Message`. +The following example shows how to do so: + +``` +amqpTemplate.setExchange("marketData.topic"); +amqpTemplate.setRoutingKey("quotes.nasdaq.FOO"); +amqpTemplate.send(new Message("12.34".getBytes(), someProperties)); +``` + +A better way of thinking about the exchange and routing key properties is that the explicit method parameters always override the template’s default values. +In fact, even if you do not explicitly set those properties on the template, there are always default values in place. +In both cases, the default is an empty `String`, but that is actually a sensible default. +As far as the routing key is concerned, it is not always necessary in the first place (for example, for +a `Fanout` exchange). +Furthermore, a queue may be bound to an exchange with an empty `String`. +Those are both legitimate scenarios for reliance on the default empty `String` value for the routing key property of the template. +As far as the exchange name is concerned, the empty `String` is commonly used because the AMQP specification defines the “default exchange” as having no name. +Since all queues are automatically bound to that default exchange (which is a direct exchange), using their name as the binding value, the second method in the preceding listing can be used for simple point-to-point messaging to any queue through the default exchange. +You can provide the queue name as the `routingKey`, either by providing the method parameter at runtime. +The following example shows how to do so: + +``` +RabbitTemplate template = new RabbitTemplate(); // using default no-name Exchange +template.send("queue.helloWorld", new Message("Hello World".getBytes(), someProperties)); +``` + +Alternately, you can create a template that can be used for publishing primarily or exclusively to a single Queue. +The following example shows how to do so: + +``` +RabbitTemplate template = new RabbitTemplate(); // using default no-name Exchange +template.setRoutingKey("queue.helloWorld"); // but we'll always send to this Queue +template.send(new Message("Hello World".getBytes(), someProperties)); +``` + +##### Message Builder API + +Starting with version 1.3, a message builder API is provided by the `MessageBuilder` and `MessagePropertiesBuilder`. +These methods provide a convenient “fluent” means of creating a message or message properties. +The following examples show the fluent API in action: + +``` +Message message = MessageBuilder.withBody("foo".getBytes()) + .setContentType(MessageProperties.CONTENT_TYPE_TEXT_PLAIN) + .setMessageId("123") + .setHeader("bar", "baz") + .build(); +``` + +``` +MessageProperties props = MessagePropertiesBuilder.newInstance() + .setContentType(MessageProperties.CONTENT_TYPE_TEXT_PLAIN) + .setMessageId("123") + .setHeader("bar", "baz") + .build(); +Message message = MessageBuilder.withBody("foo".getBytes()) + .andProperties(props) + .build(); +``` + +Each of the properties defined on the [`MessageProperties`](https://docs.spring.io/spring-amqp/docs/latest-ga/api/org/springframework/amqp/core/MessageProperties.html) can be set. +Other methods include `setHeader(String key, String value)`, `removeHeader(String key)`, `removeHeaders()`, and `copyProperties(MessageProperties properties)`. +Each property setting method has a `set*IfAbsent()` variant. +In the cases where a default initial value exists, the method is named `set*IfAbsentOrDefault()`. + +Five static methods are provided to create an initial message builder: + +``` +public static MessageBuilder withBody(byte[] body) (1) + +public static MessageBuilder withClonedBody(byte[] body) (2) + +public static MessageBuilder withBody(byte[] body, int from, int to) (3) + +public static MessageBuilder fromMessage(Message message) (4) + +public static MessageBuilder fromClonedMessage(Message message) (5) +``` + +|**1**| The message created by the builder has a body that is a direct reference to the argument. | +|-----|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +|**2**| The message created by the builder has a body that is a new array containing a copy of bytes in the argument. | +|**3**|The message created by the builder has a body that is a new array containing the range of bytes from the argument.
See [`Arrays.copyOfRange()`](https://docs.oracle.com/javase/7/docs/api/java/util/Arrays.html) for more details.| +|**4**| The message created by the builder has a body that is a direct reference to the body of the argument.
The argument’s properties are copied to a new `MessageProperties` object. | +|**5**| The message created by the builder has a body that is a new array containing a copy of the argument’s body.
The argument’s properties are copied to a new `MessageProperties` object. | + +Three static methods are provided to create a `MessagePropertiesBuilder` instance: + +``` +public static MessagePropertiesBuilder newInstance() (1) + +public static MessagePropertiesBuilder fromProperties(MessageProperties properties) (2) + +public static MessagePropertiesBuilder fromClonedProperties(MessageProperties properties) (3) +``` + +|**1**| A new message properties object is initialized with default values. | +|-----|--------------------------------------------------------------------------------------------| +|**2**|The builder is initialized with, and `build()` will return, the provided properties object.,| +|**3**| The argument’s properties are copied to a new `MessageProperties` object. | + +With the `RabbitTemplate` implementation of `AmqpTemplate`, each of the `send()` methods has an overloaded version that takes an additional `CorrelationData` object. +When publisher confirms are enabled, this object is returned in the callback described in [`AmqpTemplate`](#amqp-template). +This lets the sender correlate a confirm (`ack` or `nack`) with the sent message. + +Starting with version 1.6.7, the `CorrelationAwareMessagePostProcessor` interface was introduced, allowing the correlation data to be modified after the message has been converted. +The following example shows how to use it: + +``` +Message postProcessMessage(Message message, Correlation correlation); +``` + +In version 2.0, this interface is deprecated. +The method has been moved to `MessagePostProcessor` with a default implementation that delegates to `postProcessMessage(Message message)`. + +Also starting with version 1.6.7, a new callback interface called `CorrelationDataPostProcessor` is provided. +This is invoked after all `MessagePostProcessor` instances (provided in the `send()` method as well as those provided in `setBeforePublishPostProcessors()`). +Implementations can update or replace the correlation data supplied in the `send()` method (if any). +The `Message` and original `CorrelationData` (if any) are provided as arguments. +The following example shows how to use the `postProcess` method: + +``` +CorrelationData postProcess(Message message, CorrelationData correlationData); +``` + +##### Publisher Returns + +When the template’s `mandatory` property is `true`, returned messages are provided by the callback described in [`AmqpTemplate`](#amqp-template). + +Starting with version 1.4, the `RabbitTemplate` supports the SpEL `mandatoryExpression` property, which is evaluated against each request message as the root evaluation object, resolving to a `boolean` value. +Bean references, such as `@myBean.isMandatory(#root)`, can be used in the expression. + +Publisher returns can also be used internally by the `RabbitTemplate` in send and receive operations. +See [Reply Timeout](#reply-timeout) for more information. + +##### Batching + +Version 1.4.2 introduced the `BatchingRabbitTemplate`. +This is a subclass of `RabbitTemplate` with an overridden `send` method that batches messages according to the `BatchingStrategy`. +Only when a batch is complete is the message sent to RabbitMQ. +The following listing shows the `BatchingStrategy` interface definition: + +``` +public interface BatchingStrategy { + + MessageBatch addToBatch(String exchange, String routingKey, Message message); + + Date nextRelease(); + + Collection releaseBatches(); + +} +``` + +| |Batched data is held in memory.
Unsent messages can be lost in the event of a system failure.| +|---|-------------------------------------------------------------------------------------------------| + +A `SimpleBatchingStrategy` is provided. +It supports sending messages to a single exchange or routing key. +It has the following properties: + +* `batchSize`: The number of messages in a batch before it is sent. + +* `bufferLimit`: The maximum size of the batched message. + This preempts the `batchSize`, if exceeded, and causes a partial batch to be sent. + +* `timeout`: A time after which a partial batch is sent when there is no new activity adding messages to the batch. + +The `SimpleBatchingStrategy` formats the batch by preceding each embedded message with a four-byte binary length. +This is communicated to the receiving system by setting the `springBatchFormat` message property to `lengthHeader4`. + +| |Batched messages are automatically de-batched by listener containers by default (by using the `springBatchFormat` message header).
Rejecting any message from a batch causes the entire batch to be rejected.| +|---|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +However, see [@RabbitListener with Batching](#receiving-batch) for more information. + +#### 4.1.6. Receiving Messages + +Message reception is always a little more complicated than sending. +There are two ways to receive a `Message`. +The simpler option is to poll for one `Message` at a time with a polling method call. +The more complicated yet more common approach is to register a listener that receives `Messages` on-demand, asynchronously. +We consider an example of each approach in the next two sub-sections. + +##### Polling Consumer + +The `AmqpTemplate` itself can be used for polled `Message` reception. +By default, if no message is available, `null` is returned immediately. +There is no blocking. +Starting with version 1.5, you can set a `receiveTimeout`, in milliseconds, and the receive methods block for up to that long, waiting for a message. +A value less than zero means block indefinitely (or at least until the connection to the broker is lost). +Version 1.6 introduced variants of the `receive` methods that let the timeout be passed in on each call. + +| |Since the receive operation creates a new `QueueingConsumer` for each message, this technique is not really appropriate for high-volume environments.
Consider using an asynchronous consumer or a `receiveTimeout` of zero for those use cases.| +|---|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +There are four simple `receive` methods available. +As with the `Exchange` on the sending side, there is a method that requires that a default queue property has been set +directly on the template itself, and there is a method that accepts a queue parameter at runtime. +Version 1.6 introduced variants to accept `timeoutMillis` to override `receiveTimeout` on a per-request basis. +The following listing shows the definitions of the four methods: + +``` +Message receive() throws AmqpException; + +Message receive(String queueName) throws AmqpException; + +Message receive(long timeoutMillis) throws AmqpException; + +Message receive(String queueName, long timeoutMillis) throws AmqpException; +``` + +As in the case of sending messages, the `AmqpTemplate` has some convenience methods for receiving POJOs instead of `Message` instances, and implementations provide a way to customize the `MessageConverter` used to create the `Object` returned: +The following listing shows those methods: + +``` +Object receiveAndConvert() throws AmqpException; + +Object receiveAndConvert(String queueName) throws AmqpException; + +Object receiveAndConvert(long timeoutMillis) throws AmqpException; + +Object receiveAndConvert(String queueName, long timeoutMillis) throws AmqpException; +``` + +Starting with version 2.0, there are variants of these methods that take an additional `ParameterizedTypeReference` argument to convert complex types. +The template must be configured with a `SmartMessageConverter`. +See [Converting From a `Message` With `RabbitTemplate`](#json-complex) for more information. + +Similar to `sendAndReceive` methods, beginning with version 1.3, the `AmqpTemplate` has several convenience `receiveAndReply` methods for synchronously receiving, processing and replying to messages. +The following listing shows those method definitions: + +``` + boolean receiveAndReply(ReceiveAndReplyCallback callback) + throws AmqpException; + + boolean receiveAndReply(String queueName, ReceiveAndReplyCallback callback) + throws AmqpException; + + boolean receiveAndReply(ReceiveAndReplyCallback callback, + String replyExchange, String replyRoutingKey) throws AmqpException; + + boolean receiveAndReply(String queueName, ReceiveAndReplyCallback callback, + String replyExchange, String replyRoutingKey) throws AmqpException; + + boolean receiveAndReply(ReceiveAndReplyCallback callback, + ReplyToAddressCallback replyToAddressCallback) throws AmqpException; + + boolean receiveAndReply(String queueName, ReceiveAndReplyCallback callback, + ReplyToAddressCallback replyToAddressCallback) throws AmqpException; +``` + +The `AmqpTemplate` implementation takes care of the `receive` and `reply` phases. +In most cases, you should provide only an implementation of `ReceiveAndReplyCallback` to perform some business logic for the received message and build a reply object or message, if needed. +Note, a `ReceiveAndReplyCallback` may return `null`. +In this case, no reply is sent and `receiveAndReply` works like the `receive` method. +This lets the same queue be used for a mixture of messages, some of which may not need a reply. + +Automatic message (request and reply) conversion is applied only if the provided callback is not an instance of `ReceiveAndReplyMessageCallback`, which provides a raw message exchange contract. + +The `ReplyToAddressCallback` is useful for cases requiring custom logic to determine the `replyTo` address at runtime against the received message and reply from the `ReceiveAndReplyCallback`. +By default, `replyTo` information in the request message is used to route the reply. + +The following listing shows an example of POJO-based receive and reply: + +``` +boolean received = + this.template.receiveAndReply(ROUTE, new ReceiveAndReplyCallback() { + + public Invoice handle(Order order) { + return processOrder(order); + } + }); +if (received) { + log.info("We received an order!"); +} +``` + +##### Asynchronous Consumer + +| |Spring AMQP also supports annotated listener endpoints through the use of the `@RabbitListener` annotation and provides an open infrastructure to register endpoints programmatically.
This is by far the most convenient way to setup an asynchronous consumer.
See [Annotation-driven Listener Endpoints](#async-annotation-driven) for more details.| +|---|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +| |The prefetch default value used to be 1, which could lead to under-utilization of efficient consumers.
Starting with version 2.0, the default prefetch value is now 250, which should keep consumers busy in most common scenarios and
thus improve throughput.

There are, nevertheless, scenarios where the prefetch value should be low:

* For large messages, especially if the processing is slow (messages could add up to a large amount of memory in the client process)

* When strict message ordering is necessary (the prefetch value should be set back to 1 in this case)

* Other special cases

Also, with low-volume messaging and multiple consumers (including concurrency within a single listener container instance), you may wish to reduce the prefetch to get a more even distribution of messages across consumers.

See [Message Listener Container Configuration](#containerAttributes).

For more background about prefetch, see this post about [consumer utilization in RabbitMQ](https://www.rabbitmq.com/blog/2014/04/14/finding-bottlenecks-with-rabbitmq-3-3/)and this post about [queuing theory](https://www.rabbitmq.com/blog/2012/05/11/some-queuing-theory-throughput-latency-and-bandwidth/).| +|---|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +###### Message Listener + +For asynchronous `Message` reception, a dedicated component (not the `AmqpTemplate`) is involved. +That component is a container for a `Message`-consuming callback. +We consider the container and its properties later in this section. +First, though, we should look at the callback, since that is where your application code is integrated with the messaging system. +There are a few options for the callback, starting with an implementation of the `MessageListener` interface, which the following listing shows: + +``` +public interface MessageListener { + void onMessage(Message message); +} +``` + +If your callback logic depends on the AMQP Channel instance for any reason, you may instead use the `ChannelAwareMessageListener`. +It looks similar but has an extra parameter. +The following listing shows the `ChannelAwareMessageListener` interface definition: + +``` +public interface ChannelAwareMessageListener { + void onMessage(Message message, Channel channel) throws Exception; +} +``` + +| |In version 2.1, this interface moved from package `o.s.amqp.rabbit.core` to `o.s.amqp.rabbit.listener.api`.| +|---|-----------------------------------------------------------------------------------------------------------| + +###### `MessageListenerAdapter` + +If you prefer to maintain a stricter separation between your application logic and the messaging API, you can rely upon an adapter implementation that is provided by the framework. +This is often referred to as “Message-driven POJO” support. + +| |Version 1.5 introduced a more flexible mechanism for POJO messaging, the `@RabbitListener` annotation.
See [Annotation-driven Listener Endpoints](#async-annotation-driven) for more information.| +|---|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +When using the adapter, you need to provide only a reference to the instance that the adapter itself should invoke. +The following example shows how to do so: + +``` +MessageListenerAdapter listener = new MessageListenerAdapter(somePojo); +listener.setDefaultListenerMethod("myMethod"); +``` + +You can subclass the adapter and provide an implementation of `getListenerMethodName()` to dynamically select different methods based on the message. +This method has two parameters, `originalMessage` and `extractedMessage`, the latter being the result of any conversion. +By default, a `SimpleMessageConverter` is configured. +See [`SimpleMessageConverter`](#simple-message-converter) for more information and information about other converters available. + +Starting with version 1.4.2, the original message has `consumerQueue` and `consumerTag` properties, which can be used to determine the queue from which a message was received. + +Starting with version 1.5, you can configure a map of consumer queue or tag to method name, to dynamically select the method to call. +If no entry is in the map, we fall back to the default listener method. +The default listener method (if not set) is `handleMessage`. + +Starting with version 2.0, a convenient `FunctionalInterface` has been provided. +The following listing shows the definition of `FunctionalInterface`: + +``` +@FunctionalInterface +public interface ReplyingMessageListener { + + R handleMessage(T t); + +} +``` + +This interface facilitates convenient configuration of the adapter by using Java 8 lambdas, as the following example shows: + +``` +new MessageListenerAdapter((ReplyingMessageListener) data -> { + ... + return result; +})); +``` + +Starting with version 2.2, the `buildListenerArguments(Object)` has been deprecated and new `buildListenerArguments(Object, Channel, Message)` one has been introduced instead. +The new method helps listener to get `Channel` and `Message` arguments to do more, such as calling `channel.basicReject(long, boolean)` in manual acknowledge mode. +The following listing shows the most basic example: + +``` +public class ExtendedListenerAdapter extends MessageListenerAdapter { + + @Override + protected Object[] buildListenerArguments(Object extractedMessage, Channel channel, Message message) { + return new Object[]{extractedMessage, channel, message}; + } + +} +``` + +Now you could configure `ExtendedListenerAdapter` as same as `MessageListenerAdapter` if you need to receive “channel” and “message”. +Parameters of listener should be set as `buildListenerArguments(Object, Channel, Message)` returned, as the following example of listener shows: + +``` +public void handleMessage(Object object, Channel channel, Message message) throws IOException { + ... +} +``` + +###### Container + +Now that you have seen the various options for the `Message`-listening callback, we can turn our attention to the container. +Basically, the container handles the “active” responsibilities so that the listener callback can remain passive. +The container is an example of a “lifecycle” component. +It provides methods for starting and stopping. +When configuring the container, you essentially bridge the gap between an AMQP Queue and the `MessageListener` instance. +You must provide a reference to the `ConnectionFactory` and the queue names or Queue instances from which that listener should consume messages. + +Prior to version 2.0, there was one listener container, the `SimpleMessageListenerContainer`. +There is now a second container, the `DirectMessageListenerContainer`. +The differences between the containers and criteria you might apply when choosing which to use are described in [Choosing a Container](#choose-container). + +The following listing shows the most basic example, which works by using the, `SimpleMessageListenerContainer`: + +``` +SimpleMessageListenerContainer container = new SimpleMessageListenerContainer(); +container.setConnectionFactory(rabbitConnectionFactory); +container.setQueueNames("some.queue"); +container.setMessageListener(new MessageListenerAdapter(somePojo)); +``` + +As an “active” component, it is most common to create the listener container with a bean definition so that it can run in the background. +The following example shows one way to do so with XML: + +``` + + + +``` + +The following listing shows another way to do so with XML: + +``` + + + +``` + +Both of the preceding examples create a `DirectMessageListenerContainer` (notice the `type` attribute — it defaults to `simple`). + +Alternately, you may prefer to use Java configuration, which looks similar to the preceding code snippet: + +``` +@Configuration +public class ExampleAmqpConfiguration { + + @Bean + public SimpleMessageListenerContainer messageListenerContainer() { + SimpleMessageListenerContainer container = new SimpleMessageListenerContainer(); + container.setConnectionFactory(rabbitConnectionFactory()); + container.setQueueName("some.queue"); + container.setMessageListener(exampleListener()); + return container; + } + + @Bean + public CachingConnectionFactory rabbitConnectionFactory() { + CachingConnectionFactory connectionFactory = + new CachingConnectionFactory("localhost"); + connectionFactory.setUsername("guest"); + connectionFactory.setPassword("guest"); + return connectionFactory; + } + + @Bean + public MessageListener exampleListener() { + return new MessageListener() { + public void onMessage(Message message) { + System.out.println("received: " + message); + } + }; + } +} +``` + +###### Consumer Priority + +Starting with RabbitMQ Version 3.2, the broker now supports consumer priority (see [Using Consumer Priorities with RabbitMQ](https://www.rabbitmq.com/blog/2013/12/16/using-consumer-priorities-with-rabbitmq/)). +This is enabled by setting the `x-priority` argument on the consumer. +The `SimpleMessageListenerContainer` now supports setting consumer arguments, as the following example shows: + +``` +container.setConsumerArguments(Collections. + singletonMap("x-priority", Integer.valueOf(10))); +``` + +For convenience, the namespace provides the `priority` attribute on the `listener` element, as the following example shows: + +``` + + + +``` + +Starting with version 1.3, you can modify the queues on which the container listens at runtime. +See [Listener Container Queues](#listener-queues). + +###### `auto-delete` Queues + +When a container is configured to listen to `auto-delete` queues, the queue has an `x-expires` option, or the [Time-To-Live](https://www.rabbitmq.com/ttl.html) policy is configured on the Broker, the queue is removed by the broker when the container is stopped (that is, when the last consumer is cancelled). +Before version 1.3, the container could not be restarted because the queue was missing. +The `RabbitAdmin` only automatically redeclares queues and so on when the connection is closed or when it opens, which does not happen when the container is stopped and started. + +Starting with version 1.3, the container uses a `RabbitAdmin` to redeclare any missing queues during startup. + +You can also use conditional declaration (see [Conditional Declaration](#conditional-declaration)) together with an `auto-startup="false"` admin to defer queue declaration until the container is started. +The following example shows how to do so: + +``` + + + + + + + + + + + + + +``` + +In this case, the queue and exchange are declared by `containerAdmin`, which has `auto-startup="false"` so that the elements are not declared during context initialization. +Also, the container is not started for the same reason. +When the container is later started, it uses its reference to `containerAdmin` to declare the elements. + +##### Batched Messages + +Batched messages (created by a producer) are automatically de-batched by listener containers (using the `springBatchFormat` message header). +Rejecting any message from a batch causes the entire batch to be rejected. +See [Batching](#template-batching) for more information about batching. + +Starting with version 2.2, the `SimpleMessageListenerContainer` can be use to create batches on the consumer side (where the producer sent discrete messages). + +Set the container property `consumerBatchEnabled` to enable this feature.`deBatchingEnabled` must also be true so that the container is responsible for processing batches of both types. +Implement `BatchMessageListener` or `ChannelAwareBatchMessageListener` when `consumerBatchEnabled` is true. +Starting with version 2.2.7 both the `SimpleMessageListenerContainer` and `DirectMessageListenerContainer` can debatch [producer created batches](#template-batching) as `List`. +See [@RabbitListener with Batching](#receiving-batch) for information about using this feature with `@RabbitListener`. + +##### Consumer Events + +The containers publish application events whenever a listener +(consumer) experiences a failure of some kind. +The event `ListenerContainerConsumerFailedEvent` has the following properties: + +* `container`: The listener container where the consumer experienced the problem. + +* `reason`: A textual reason for the failure. + +* `fatal`: A boolean indicating whether the failure was fatal. + With non-fatal exceptions, the container tries to restart the consumer, according to the `recoveryInterval` or `recoveryBackoff` (for the `SimpleMessageListenerContainer`) or the `monitorInterval` (for the `DirectMessageListenerContainer`). + +* `throwable`: The `Throwable` that was caught. + +These events can be consumed by implementing `ApplicationListener`. + +| |System-wide events (such as connection failures) are published by all consumers when `concurrentConsumers` is greater than 1.| +|---|-----------------------------------------------------------------------------------------------------------------------------| + +If a consumer fails because one if its queues is being used exclusively, by default, as well as publishing the event, a `WARN` log is issued. +To change this logging behavior, provide a custom `ConditionalExceptionLogger` in the `SimpleMessageListenerContainer` instance’s `exclusiveConsumerExceptionLogger` property. +See also [Logging Channel Close Events](#channel-close-logging). + +Fatal errors are always logged at the `ERROR` level. +This it not modifiable. + +Several other events are published at various stages of the container lifecycle: + +* `AsyncConsumerStartedEvent`: When the consumer is started. + +* `AsyncConsumerRestartedEvent`: When the consumer is restarted after a failure - `SimpleMessageListenerContainer` only. + +* `AsyncConsumerTerminatedEvent`: When a consumer is stopped normally. + +* `AsyncConsumerStoppedEvent`: When the consumer is stopped - `SimpleMessageListenerContainer` only. + +* `ConsumeOkEvent`: When a `consumeOk` is received from the broker, contains the queue name and `consumerTag` + +* `ListenerContainerIdleEvent`: See [Detecting Idle Asynchronous Consumers](#idle-containers). + +* `MissingQueueEvent`: When a missing queue is detected. + +##### Consumer Tags + +You can provide a strategy to generate consumer tags. +By default, the consumer tag is generated by the broker. +The following listing shows the `ConsumerTagStrategy` interface definition: + +``` +public interface ConsumerTagStrategy { + + String createConsumerTag(String queue); + +} +``` + +The queue is made available so that it can (optionally) be used in the tag. + +See [Message Listener Container Configuration](#containerAttributes). + +##### Annotation-driven Listener Endpoints + +The easiest way to receive a message asynchronously is to use the annotated listener endpoint infrastructure. +In a nutshell, it lets you expose a method of a managed bean as a Rabbit listener endpoint. +The following example shows how to use the `@RabbitListener` annotation: + +``` +@Component +public class MyService { + + @RabbitListener(queues = "myQueue") + public void processOrder(String data) { + ... + } + +} +``` + +The idea of the preceding example is that, whenever a message is available on the queue named `myQueue`, the `processOrder` method is invoked accordingly (in this case, with the payload of the message). + +The annotated endpoint infrastructure creates a message listener container behind the scenes for each annotated method, by using a `RabbitListenerContainerFactory`. + +In the preceding example, `myQueue` must already exist and be bound to some exchange. +The queue can be declared and bound automatically, as long as a `RabbitAdmin` exists in the application context. + +| |Property placeholders (`${some.property}`) or SpEL expressions (`#{someExpression}`) can be specified for the annotation properties (`queues` etc).
See [Listening to Multiple Queues](#annotation-multiple-queues) for an example of why you might use SpEL instead of a property placeholder.
The following listing shows three examples of how to declare a Rabbit listener:| +|---|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +``` +@Component +public class MyService { + + @RabbitListener(bindings = @QueueBinding( + value = @Queue(value = "myQueue", durable = "true"), + exchange = @Exchange(value = "auto.exch", ignoreDeclarationExceptions = "true"), + key = "orderRoutingKey") + ) + public void processOrder(Order order) { + ... + } + + @RabbitListener(bindings = @QueueBinding( + value = @Queue, + exchange = @Exchange(value = "auto.exch"), + key = "invoiceRoutingKey") + ) + public void processInvoice(Invoice invoice) { + ... + } + + @RabbitListener(queuesToDeclare = @Queue(name = "${my.queue}", durable = "true")) + public String handleWithSimpleDeclare(String data) { + ... + } + +} +``` + +In the first example, a queue `myQueue` is declared automatically (durable) together with the exchange, if needed, +and bound to the exchange with the routing key. +In the second example, an anonymous (exclusive, auto-delete) queue is declared and bound. +Multiple `QueueBinding` entries can be provided, letting the listener listen to multiple queues. +In the third example, a queue with the name retrieved from property `my.queue` is declared, if necessary, with the default binding to the default exchange using the queue name as the routing key. + +Since version 2.0, the `@Exchange` annotation supports any exchange types, including custom. +For more information, see [AMQP Concepts](https://www.rabbitmq.com/tutorials/amqp-concepts.html). + +You can use normal `@Bean` definitions when you need more advanced configuration. + +Notice `ignoreDeclarationExceptions` on the exchange in the first example. +This allows, for example, binding to an existing exchange that might have different settings (such as `internal`). +By default, the properties of an existing exchange must match. + +Starting with version 2.0, you can now bind a queue to an exchange with multiple routing keys, as the following example shows: + +``` +... + key = { "red", "yellow" } +... +``` + +You can also specify arguments within `@QueueBinding` annotations for queues, exchanges, +and bindings, as the following example shows: + +``` +@RabbitListener(bindings = @QueueBinding( + value = @Queue(value = "auto.headers", autoDelete = "true", + arguments = @Argument(name = "x-message-ttl", value = "10000", + type = "java.lang.Integer")), + exchange = @Exchange(value = "auto.headers", type = ExchangeTypes.HEADERS, autoDelete = "true"), + arguments = { + @Argument(name = "x-match", value = "all"), + @Argument(name = "thing1", value = "somevalue"), + @Argument(name = "thing2") + }) +) +public String handleWithHeadersExchange(String foo) { + ... +} +``` + +Notice that the `x-message-ttl` argument is set to 10 seconds for the queue. +Since the argument type is not `String`, we have to specify its type — in this case, `Integer`. +As with all such declarations, if the queue already exists, the arguments must match those on the queue. +For the header exchange, we set the binding arguments to match messages that have the `thing1` header set to `somevalue`, and +the `thing2` header must be present with any value. +The `x-match` argument means both conditions must be satisfied. + +The argument name, value, and type can be property placeholders (`${…​}`) or SpEL expressions (`#{…​}`). +The `name` must resolve to a `String`. +The expression for `type` must resolve to a `Class` or the fully-qualified name of a class. +The `value` must resolve to something that can be converted by the `DefaultConversionService` to the type (such as the `x-message-ttl` in the preceding example). + +If a name resolves to `null` or an empty `String`, that `@Argument` is ignored. + +###### Meta-annotations + +Sometimes you may want to use the same configuration for multiple listeners. +To reduce the boilerplate configuration, you can use meta-annotations to create your own listener annotation. +The following example shows how to do so: + +``` +@Target({ElementType.TYPE, ElementType.METHOD, ElementType.ANNOTATION_TYPE}) +@Retention(RetentionPolicy.RUNTIME) +@RabbitListener(bindings = @QueueBinding( + value = @Queue, + exchange = @Exchange(value = "metaFanout", type = ExchangeTypes.FANOUT))) +public @interface MyAnonFanoutListener { +} + +public class MetaListener { + + @MyAnonFanoutListener + public void handle1(String foo) { + ... + } + + @MyAnonFanoutListener + public void handle2(String foo) { + ... + } + +} +``` + +In the preceding example, each listener created by the `@MyAnonFanoutListener` annotation binds an anonymous, auto-delete +queue to the fanout exchange, `metaFanout`. +Starting with version 2.2.3, `@AliasFor` is supported to allow overriding properties on the meta-annotated annotation. +Also, user annotations can now be `@Repeatable`, allowing multiple containers to be created for a method. + +``` +@Component +static class MetaAnnotationTestBean { + + @MyListener("queue1") + @MyListener("queue2") + public void handleIt(String body) { + } + +} + +@RabbitListener +@Target(ElementType.METHOD) +@Retention(RetentionPolicy.RUNTIME) +@Repeatable(MyListeners.class) +static @interface MyListener { + + @AliasFor(annotation = RabbitListener.class, attribute = "queues") + String[] value() default {}; + +} + +@Target(ElementType.METHOD) +@Retention(RetentionPolicy.RUNTIME) +static @interface MyListeners { + + MyListener[] value(); + +} +``` + +###### Enable Listener Endpoint Annotations + +To enable support for `@RabbitListener` annotations, you can add `@EnableRabbit` to one of your `@Configuration` classes. +The following example shows how to do so: + +``` +@Configuration +@EnableRabbit +public class AppConfig { + + @Bean + public SimpleRabbitListenerContainerFactory rabbitListenerContainerFactory() { + SimpleRabbitListenerContainerFactory factory = new SimpleRabbitListenerContainerFactory(); + factory.setConnectionFactory(connectionFactory()); + factory.setConcurrentConsumers(3); + factory.setMaxConcurrentConsumers(10); + factory.setContainerCustomizer(container -> /* customize the container */); + return factory; + } +} +``` + +Since version 2.0, a `DirectMessageListenerContainerFactory` is also available. +It creates `DirectMessageListenerContainer` instances. + +| |For information to help you choose between `SimpleRabbitListenerContainerFactory` and `DirectRabbitListenerContainerFactory`, see [Choosing a Container](#choose-container).| +|---|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +Starting wih version 2.2.2, you can provide a `ContainerCustomizer` implementation (as shown above). +This can be used to further configure the container after it has been created and configured; you can use this, for example, to set properties that are not exposed by the container factory. + +By default, the infrastructure looks for a bean named `rabbitListenerContainerFactory` as the source for the factory to use to create message listener containers. +In this case, and ignoring the RabbitMQ infrastructure setup, the `processOrder` method can be invoked with a core poll size of three threads and a maximum pool size of ten threads. + +You can customize the listener container factory to use for each annotation, or you can configure an explicit default by implementing the `RabbitListenerConfigurer` interface. +The default is required only if at least one endpoint is registered without a specific container factory. +See the [Javadoc](https://docs.spring.io/spring-amqp/docs/latest-ga/api/org/springframework/amqp/rabbit/annotation/RabbitListenerConfigurer.html) for full details and examples. + +The container factories provide methods for adding `MessagePostProcessor` instances that are applied after receiving messages (before invoking the listener) and before sending replies. + +See [Reply Management](#async-annotation-driven-reply) for information about replies. + +Starting with version 2.0.6, you can add a `RetryTemplate` and `RecoveryCallback` to the listener container factory. +It is used when sending replies. +The `RecoveryCallback` is invoked when retries are exhausted. +You can use a `SendRetryContextAccessor` to get information from the context. +The following example shows how to do so: + +``` +factory.setRetryTemplate(retryTemplate); +factory.setReplyRecoveryCallback(ctx -> { + Message failed = SendRetryContextAccessor.getMessage(ctx); + Address replyTo = SendRetryContextAccessor.getAddress(ctx); + Throwable t = ctx.getLastThrowable(); + ... + return null; +}); +``` + +If you prefer XML configuration, you can use the `` element. +Any beans annotated with `@RabbitListener` are detected. + +For `SimpleRabbitListenerContainer` instances, you can use XML similar to the following: + +``` + + + + + + + +``` + +For `DirectMessageListenerContainer` instances, you can use XML similar to the following: + +``` + + + + + + +``` + +Starting with version 2.0, the `@RabbitListener` annotation has a `concurrency` property. +It supports SpEL expressions (`#{…​}`) and property placeholders (`${…​}`). +Its meaning and allowed values depend on the container type, as follows: + +* For the `DirectMessageListenerContainer`, the value must be a single integer value, which sets the `consumersPerQueue` property on the container. + +* For the `SimpleRabbitListenerContainer`, the value can be a single integer value, which sets the `concurrentConsumers` property on the container, or it can have the form, `m-n`, where `m` is the `concurrentConsumers` property and `n` is the `maxConcurrentConsumers` property. + +In either case, this setting overrides the settings on the factory. +Previously you had to define different container factories if you had listeners that required different concurrency. + +The annotation also allows overriding the factory `autoStartup` and `taskExecutor` properties via the `autoStartup` and `executor` (since 2.2) annotation properties. +Using a different executor for each might help with identifying threads associated with each listener in logs and thread dumps. + +Version 2.2 also added the `ackMode` property, which allows you to override the container factory’s `acknowledgeMode` property. + +``` +@RabbitListener(id = "manual.acks.1", queues = "manual.acks.1", ackMode = "MANUAL") +public void manual1(String in, Channel channel, + @Header(AmqpHeaders.DELIVERY_TAG) long tag) throws IOException { + + ... + channel.basicAck(tag, false); +} +``` + +###### Message Conversion for Annotated Methods + +There are two conversion steps in the pipeline before invoking the listener. +The first step uses a `MessageConverter` to convert the incoming Spring AMQP `Message` to a Spring-messaging `Message`. +When the target method is invoked, the message payload is converted, if necessary, to the method parameter type. + +The default `MessageConverter` for the first step is a Spring AMQP `SimpleMessageConverter` that handles conversion to`String` and `java.io.Serializable` objects. +All others remain as a `byte[]`. +In the following discussion, we call this the “message converter”. + +The default converter for the second step is a `GenericMessageConverter`, which delegates to a conversion service +(an instance of `DefaultFormattingConversionService`). +In the following discussion, we call this the “method argument converter”. + +To change the message converter, you can add it as a property to the container factory bean. +The following example shows how to do so: + +``` +@Bean +public SimpleRabbitListenerContainerFactory rabbitListenerContainerFactory() { + SimpleRabbitListenerContainerFactory factory = new SimpleRabbitListenerContainerFactory(); + ... + factory.setMessageConverter(new Jackson2JsonMessageConverter()); + ... + return factory; +} +``` + +This configures a Jackson2 converter that expects header information to be present to guide the conversion. + +You can also use a `ContentTypeDelegatingMessageConverter`, which can handle conversion of different content types. + +Starting with version 2.3, you can override the factory converter by specifying a bean name in the `messageConverter` property. + +``` +@Bean +public Jackson2JsonMessageConverter jsonConverter() { + return new Jackson2JsonMessageConverter(); +} + +@RabbitListener(..., messageConverter = "jsonConverter") +public void listen(String in) { + ... +} +``` + +This avoids having to declare a different container factory just to change the converter. + +In most cases, it is not necessary to customize the method argument converter unless, for example, you want to use +a custom `ConversionService`. + +In versions prior to 1.6, the type information to convert the JSON had to be provided in message headers, or a +custom `ClassMapper` was required. +Starting with version 1.6, if there are no type information headers, the type can be inferred from the target +method arguments. + +| |This type inference works only for `@RabbitListener` at the method level.| +|---|-------------------------------------------------------------------------| + +See [Jackson2JsonMessageConverter](#json-message-converter) for more information. + +If you wish to customize the method argument converter, you can do so as follows: + +``` +@Configuration +@EnableRabbit +public class AppConfig implements RabbitListenerConfigurer { + + ... + + @Bean + public DefaultMessageHandlerMethodFactory myHandlerMethodFactory() { + DefaultMessageHandlerMethodFactory factory = new DefaultMessageHandlerMethodFactory(); + factory.setMessageConverter(new GenericMessageConverter(myConversionService())); + return factory; + } + + @Bean + public DefaultConversionService myConversionService() { + DefaultConversionService conv = new DefaultConversionService(); + conv.addConverter(mySpecialConverter()); + return conv; + } + + @Override + public void configureRabbitListeners(RabbitListenerEndpointRegistrar registrar) { + registrar.setMessageHandlerMethodFactory(myHandlerMethodFactory()); + } + + ... + +} +``` + +| |For multi-method listeners (see [Multi-method Listeners](#annotation-method-selection)), the method selection is based on the payload of the message **after the message conversion**.
The method argument converter is called only after the method has been selected.| +|---|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +###### Adding a Custom `HandlerMethodArgumentResolver` to @RabbitListener + +Starting with version 2.3.7 you are able to add your own `HandlerMethodArgumentResolver` and resolve custom method parameters. +All you need is to implement `RabbitListenerConfigurer` and use method `setCustomMethodArgumentResolvers()` from class `RabbitListenerEndpointRegistrar`. + +``` +@Configuration +class CustomRabbitConfig implements RabbitListenerConfigurer { + + @Override + public void configureRabbitListeners(RabbitListenerEndpointRegistrar registrar) { + registrar.setCustomMethodArgumentResolvers( + new HandlerMethodArgumentResolver() { + + @Override + public boolean supportsParameter(MethodParameter parameter) { + return CustomMethodArgument.class.isAssignableFrom(parameter.getParameterType()); + } + + @Override + public Object resolveArgument(MethodParameter parameter, org.springframework.messaging.Message message) { + return new CustomMethodArgument( + (String) message.getPayload(), + message.getHeaders().get("customHeader", String.class) + ); + } + + } + ); + } + +} +``` + +###### Programmatic Endpoint Registration + +`RabbitListenerEndpoint` provides a model of a Rabbit endpoint and is responsible for configuring the container for that model. +The infrastructure lets you configure endpoints programmatically in addition to the ones that are detected by the `RabbitListener` annotation. +The following example shows how to do so: + +``` +@Configuration +@EnableRabbit +public class AppConfig implements RabbitListenerConfigurer { + + @Override + public void configureRabbitListeners(RabbitListenerEndpointRegistrar registrar) { + SimpleRabbitListenerEndpoint endpoint = new SimpleRabbitListenerEndpoint(); + endpoint.setQueueNames("anotherQueue"); + endpoint.setMessageListener(message -> { + // processing + }); + registrar.registerEndpoint(endpoint); + } +} +``` + +In the preceding example, we used `SimpleRabbitListenerEndpoint`, which provides the actual `MessageListener` to invoke, but you could just as well build your own endpoint variant to describe a custom invocation mechanism. + +It should be noted that you could just as well skip the use of `@RabbitListener` altogether and register your endpoints programmatically through `RabbitListenerConfigurer`. + +###### Annotated Endpoint Method Signature + +So far, we have been injecting a simple `String` in our endpoint, but it can actually have a very flexible method signature. +The following example rewrites it to inject the `Order` with a custom header: + +``` +@Component +public class MyService { + + @RabbitListener(queues = "myQueue") + public void processOrder(Order order, @Header("order_type") String orderType) { + ... + } +} +``` + +The following list shows the arguments that are available to be matched with parameters in listener endpoints: + +* The raw `org.springframework.amqp.core.Message`. + +* The `MessageProperties` from the raw `Message`. + +* The `com.rabbitmq.client.Channel` on which the message was received. + +* The `org.springframework.messaging.Message` converted from the incoming AMQP message. + +* `@Header`-annotated method arguments to extract a specific header value, including standard AMQP headers. + +* `@Headers`-annotated argument that must also be assignable to `java.util.Map` for getting access to all headers. + +* The converted payload + +A non-annotated element that is not one of the supported types (that is,`Message`, `MessageProperties`, `Message` and `Channel`) is matched with the payload. +You can make that explicit by annotating the parameter with `@Payload`. +You can also turn on validation by adding an extra `@Valid`. + +The ability to inject Spring’s message abstraction is particularly useful to benefit from all the information stored in the transport-specific message without relying on the transport-specific API. +The following example shows how to do so: + +``` +@RabbitListener(queues = "myQueue") +public void processOrder(Message order) { ... +} +``` + +Handling of method arguments is provided by `DefaultMessageHandlerMethodFactory`, which you can further customize to support additional method arguments. +The conversion and validation support can be customized there as well. + +For instance, if we want to make sure our `Order` is valid before processing it, we can annotate the payload with `@Valid` and configure the necessary validator, as follows: + +``` +@Configuration +@EnableRabbit +public class AppConfig implements RabbitListenerConfigurer { + + @Override + public void configureRabbitListeners(RabbitListenerEndpointRegistrar registrar) { + registrar.setMessageHandlerMethodFactory(myHandlerMethodFactory()); + } + + @Bean + public DefaultMessageHandlerMethodFactory myHandlerMethodFactory() { + DefaultMessageHandlerMethodFactory factory = new DefaultMessageHandlerMethodFactory(); + factory.setValidator(myValidator()); + return factory; + } +} +``` + +###### @RabbitListener @Payload Validation + +Starting with version 2.3.7, it is now easier to add a `Validator` to validate `@RabbitListener` and `@RabbitHandler` `@Payload` arguments. +Now, you can simply add the validator to the registrar itself. + +``` +@Configuration +@EnableRabbit +public class Config implements RabbitListenerConfigurer { + ... + @Override + public void configureRabbitListeners(RabbitListenerEndpointRegistrar registrar) { + registrar.setValidator(new MyValidator()); + } +} +``` + +| |When using Spring Boot with the validation starter, a `LocalValidatorFactoryBean` is auto-configured:| +|---|-----------------------------------------------------------------------------------------------------| + +``` +@Configuration +@EnableRabbit +public class Config implements RabbitListenerConfigurer { + @Autowired + private LocalValidatorFactoryBean validator; + ... + @Override + public void configureRabbitListeners(RabbitListenerEndpointRegistrar registrar) { + registrar.setValidator(this.validator); + } +} +``` + +To validate: + +``` +public static class ValidatedClass { + @Max(10) + private int bar; + public int getBar() { + return this.bar; + } + public void setBar(int bar) { + this.bar = bar; + } +} +``` + +and + +``` +@RabbitListener(id="validated", queues = "queue1", errorHandler = "validationErrorHandler", + containerFactory = "jsonListenerContainerFactory") +public void validatedListener(@Payload @Valid ValidatedClass val) { + ... +} +@Bean +public RabbitListenerErrorHandler validationErrorHandler() { + return (m, e) -> { + ... + }; +} +``` + +###### Listening to Multiple Queues + +When you use the `queues` attribute, you can specify that the associated container can listen to multiple queues. +You can use a `@Header` annotation to make the queue name from which a message was received available to the POJO +method. +The following example shows how to do so: + +``` +@Component +public class MyService { + + @RabbitListener(queues = { "queue1", "queue2" } ) + public void processOrder(String data, @Header(AmqpHeaders.CONSUMER_QUEUE) String queue) { + ... + } + +} +``` + +Starting with version 1.5, you can externalize the queue names by using property placeholders and SpEL. +The following example shows how to do so: + +``` +@Component +public class MyService { + + @RabbitListener(queues = "#{'${property.with.comma.delimited.queue.names}'.split(',')}" ) + public void processOrder(String data, @Header(AmqpHeaders.CONSUMER_QUEUE) String queue) { + ... + } + +} +``` + +Prior to version 1.5, only a single queue could be specified this way. +Each queue needed a separate property. + +###### Reply Management + +The existing support in `MessageListenerAdapter` already lets your method have a non-void return type. +When that is the case, the result of the invocation is encapsulated in a message sent to the the address specified in the `ReplyToAddress` header of the original message, or to the default address configured on the listener. +You can set that default address by using the `@SendTo` annotation of the messaging abstraction. + +Assuming our `processOrder` method should now return an `OrderStatus`, we can write it as follows to automatically send a reply: + +``` +@RabbitListener(destination = "myQueue") +@SendTo("status") +public OrderStatus processOrder(Order order) { + // order processing + return status; +} +``` + +If you need to set additional headers in a transport-independent manner, you could return a `Message` instead, something like the following: + +``` +@RabbitListener(destination = "myQueue") +@SendTo("status") +public Message processOrder(Order order) { + // order processing + return MessageBuilder + .withPayload(status) + .setHeader("code", 1234) + .build(); +} +``` + +Alternatively, you can use a `MessagePostProcessor` in the `beforeSendReplyMessagePostProcessors` container factory property to add more headers. +Starting with version 2.2.3, the called bean/method is made avaiable in the reply message, which can be used in a message post processor to communicate the information back to the caller: + +``` +factory.setBeforeSendReplyPostProcessors(msg -> { + msg.getMessageProperties().setHeader("calledBean", + msg.getMessageProperties().getTargetBean().getClass().getSimpleName()); + msg.getMessageProperties().setHeader("calledMethod", + msg.getMessageProperties().getTargetMethod().getName()); + return m; +}); +``` + +Starting with version 2.2.5, you can configure a `ReplyPostProcessor` to modify the reply message before it is sent; it is called after the `correlationId` header has been set up to match the request. + +``` +@RabbitListener(queues = "test.header", group = "testGroup", replyPostProcessor = "echoCustomHeader") +public String capitalizeWithHeader(String in) { + return in.toUpperCase(); +} + +@Bean +public ReplyPostProcessor echoCustomHeader() { + return (req, resp) -> { + resp.getMessageProperties().setHeader("myHeader", req.getMessageProperties().getHeader("myHeader")); + return resp; + }; +} +``` + +The `@SendTo` value is assumed as a reply `exchange` and `routingKey` pair that follows the `exchange/routingKey` pattern, +where one of those parts can be omitted. +The valid values are as follows: + +* `thing1/thing2`: The `replyTo` exchange and the `routingKey`.`thing1/`: The `replyTo` exchange and the default (empty) `routingKey`.`thing2` or `/thing2`: The `replyTo` `routingKey` and the default (empty) exchange.`/` or empty: The `replyTo` default exchange and the default `routingKey`. + +Also, you can use `@SendTo` without a `value` attribute. +This case is equal to an empty `sendTo` pattern.`@SendTo` is used only if the inbound message does not have a `replyToAddress` property. + +Starting with version 1.5, the `@SendTo` value can be a bean initialization SpEL Expression, as shown in the following example: + +``` +@RabbitListener(queues = "test.sendTo.spel") +@SendTo("#{spelReplyTo}") +public String capitalizeWithSendToSpel(String foo) { + return foo.toUpperCase(); +} +... +@Bean +public String spelReplyTo() { + return "test.sendTo.reply.spel"; +} +``` + +The expression must evaluate to a `String`, which can be a simple queue name (sent to the default exchange) or with +the form `exchange/routingKey` as discussed prior to the preceding example. + +| |The `#{…​}` expression is evaluated once, during initialization.| +|---|----------------------------------------------------------------| + +For dynamic reply routing, the message sender should include a `reply_to` message property or use the alternate +runtime SpEL expression (described after the next example). + +Starting with version 1.6, the `@SendTo` can be a SpEL expression that is evaluated at runtime against the request +and reply, as the following example shows: + +``` +@RabbitListener(queues = "test.sendTo.spel") +@SendTo("!{'some.reply.queue.with.' + result.queueName}") +public Bar capitalizeWithSendToSpel(Foo foo) { + return processTheFooAndReturnABar(foo); +} +``` + +The runtime nature of the SpEL expression is indicated with `!{…​}` delimiters. +The evaluation context `#root` object for the expression has three properties: + +* `request`: The `o.s.amqp.core.Message` request object. + +* `source`: The `o.s.messaging.Message` after conversion. + +* `result`: The method result. + +The context has a map property accessor, a standard type converter, and a bean resolver, which lets other beans in the +context be referenced (for example, `@someBeanName.determineReplyQ(request, result)`). + +In summary, `#{…​}` is evaluated once during initialization, with the `#root` object being the application context. +Beans are referenced by their names.`!{…​}` is evaluated at runtime for each message, with the root object having the properties listed earlier. +Beans are referenced with their names, prefixed by `@`. + +Starting with version 2.1, simple property placeholders are also supported (for example, `${some.reply.to}`). +With earlier versions, the following can be used as a work around, as the following example shows: + +``` +@RabbitListener(queues = "foo") +@SendTo("#{environment['my.send.to']}") +public String listen(Message in) { + ... + return ... +} +``` + +###### Reply ContentType + +If you are using a sophisticated message converter, such as the `ContentTypeDelegatingMessageConverter`, you can control the content type of the reply by setting the `replyContentType` property on the listener. +This allows the converter to select the appropriate delegate converter for the reply. + +``` +@RabbitListener(queues = "q1", messageConverter = "delegating", + replyContentType = "application/json") +public Thing2 listen(Thing1 in) { + ... +} +``` + +By default, for backwards compatibility, any content type property set by the converter will be overwritten by this value after conversion. +Converters such as the `SimpleMessageConverter` use the reply type rather than the content type to determine the conversion needed and sets the content type in the reply message appropriately. +This may not be the desired action and can be overridden by setting the `converterWinsContentType` property to `false`. +For example, if you return a `String` containing JSON, the `SimpleMessageConverter` will set the content type in the reply to `text/plain`. +The following configuration will ensure the content type is set properly, even if the `SimpleMessageConverter` is used. + +``` +@RabbitListener(queues = "q1", replyContentType = "application/json", + converterWinsContentType = "false") +public String listen(Thing in) { + ... + return someJsonString; +} +``` + +These properties (`replyContentType` and `converterWinsContentType`) do not apply when the return type is a Spring AMQP `Message` or a Spring Messaging `Message`. +In the first case, there is no conversion involved; simply set the `contentType` message property. +In the second case, the behavior is controlled using message headers: + +``` +@RabbitListener(queues = "q1", messageConverter = "delegating") +@SendTo("q2") +public Message listen(String in) { + ... + return MessageBuilder.withPayload(in.toUpperCase()) + .setHeader(MessageHeaders.CONTENT_TYPE, "application/xml") + .build(); +} +``` + +This content type will be passed in the `MessageProperties` to the converter. +By default, for backwards compatibility, any content type property set by the converter will be overwritten by this value after conversion. +If you wish to override that behavior, also set the `AmqpHeaders.CONTENT_TYPE_CONVERTER_WINS` to `true` and any value set by the converter will be retained. + +###### Multi-method Listeners + +Starting with version 1.5.0, you can specify the `@RabbitListener` annotation at the class level. +Together with the new `@RabbitHandler` annotation, this lets a single listener invoke different methods, based on +the payload type of the incoming message. +This is best described using an example: + +``` +@RabbitListener(id="multi", queues = "someQueue") +@SendTo("my.reply.queue") +public class MultiListenerBean { + + @RabbitHandler + public String thing2(Thing2 thing2) { + ... + } + + @RabbitHandler + public String cat(Cat cat) { + ... + } + + @RabbitHandler + public String hat(@Header("amqp_receivedRoutingKey") String rk, @Payload Hat hat) { + ... + } + + @RabbitHandler(isDefault = true) + public String defaultMethod(Object object) { + ... + } + +} +``` + +In this case, the individual `@RabbitHandler` methods are invoked if the converted payload is a `Thing2`, a `Cat`, or a `Hat`. +You should understand that the system must be able to identify a unique method based on the payload type. +The type is checked for assignability to a single parameter that has no annotations or that is annotated with the `@Payload` annotation. +Notice that the same method signatures apply, as discussed in the method-level `@RabbitListener` ([described earlier](#message-listener-adapter)). + +Starting with version 2.0.3, a `@RabbitHandler` method can be designated as the default method, which is invoked if there is no match on other methods. +At most, one method can be so designated. + +| |`@RabbitHandler` is intended only for processing message payloads after conversion, if you wish to receive the unconverted raw `Message` object, you must use `@RabbitListener` on the method, not the class.| +|---|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +###### `@Repeatable` `@RabbitListener` + +Starting with version 1.6, the `@RabbitListener` annotation is marked with `@Repeatable`. +This means that the annotation can appear on the same annotated element (method or class) multiple times. +In this case, a separate listener container is created for each annotation, each of which invokes the same listener`@Bean`. +Repeatable annotations can be used with Java 8 or above. + +###### Proxy `@RabbitListener` and Generics + +If your service is intended to be proxied (for example, in the case of `@Transactional`), you should keep in mind some considerations when +the interface has generic parameters. +Consider the following example: + +``` +interface TxService

{ + + String handle(P payload, String header); + +} + +static class TxServiceImpl implements TxService { + + @Override + @RabbitListener(...) + public String handle(Thing thing, String rk) { + ... + } + +} +``` + +With a generic interface and a particular implementation, you are forced to switch to the CGLIB target class proxy because the actual implementation of the interface`handle` method is a bridge method. +In the case of transaction management, the use of CGLIB is configured by using +an annotation option: `@EnableTransactionManagement(proxyTargetClass = true)`. +And in this case, all annotations have to be declared on the target method in the implementation, as the following example shows: + +``` +static class TxServiceImpl implements TxService { + + @Override + @Transactional + @RabbitListener(...) + public String handle(@Payload Foo foo, @Header("amqp_receivedRoutingKey") String rk) { + ... + } + +} +``` + +###### Handling Exceptions + +By default, if an annotated listener method throws an exception, it is thrown to the container and the message are requeued and redelivered, discarded, or routed to a dead letter exchange, depending on the container and broker configuration. +Nothing is returned to the sender. + +Starting with version 2.0, the `@RabbitListener` annotation has two new attributes: `errorHandler` and `returnExceptions`. + +These are not configured by default. + +You can use the `errorHandler` to provide the bean name of a `RabbitListenerErrorHandler` implementation. +This functional interface has one method, as follows: + +``` +@FunctionalInterface +public interface RabbitListenerErrorHandler { + + Object handleError(Message amqpMessage, org.springframework.messaging.Message message, + ListenerExecutionFailedException exception) throws Exception; + +} +``` + +As you can see, you have access to the raw message received from the container, the spring-messaging `Message` object produced by the message converter, and the exception that was thrown by the listener (wrapped in a `ListenerExecutionFailedException`). +The error handler can either return some result (which is sent as the reply) or throw the original or a new exception (which is thrown to the container or returned to the sender, depending on the `returnExceptions` setting). + +The `returnExceptions` attribute, when `true`, causes exceptions to be returned to the sender. +The exception is wrapped in a `RemoteInvocationResult` object. +On the sender side, there is an available `RemoteInvocationAwareMessageConverterAdapter`, which, if configured into the `RabbitTemplate`, re-throws the server-side exception, wrapped in an `AmqpRemoteException`. +The stack trace of the server exception is synthesized by merging the server and client stack traces. + +| |This mechanism generally works only with the default `SimpleMessageConverter`, which uses Java serialization.
Exceptions are generally not “Jackson-friendly” and cannot be serialized to JSON.
If you use JSON, consider using an `errorHandler` to return some other Jackson-friendly `Error` object when an exception is thrown.| +|---|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +| |In version 2.1, this interface moved from package `o.s.amqp.rabbit.listener` to `o.s.amqp.rabbit.listener.api`.| +|---|---------------------------------------------------------------------------------------------------------------| + +Starting with version 2.1.7, the `Channel` is available in a messaging message header; this allows you to ack or nack the failed messasge when using `AcknowledgeMode.MANUAL`: + +``` +public Object handleError(Message amqpMessage, org.springframework.messaging.Message message, + ListenerExecutionFailedException exception) { + ... + message.getHeaders().get(AmqpHeaders.CHANNEL, Channel.class) + .basicReject(message.getHeaders().get(AmqpHeaders.DELIVERY_TAG, Long.class), + true); + } +``` + +Starting with version 2.2.18, if a message conversion exception is thrown, the error handler will be called, with `null` in the `message` argument. +This allows the application to send some result to the caller, indicating that a badly-formed message was received. +Previously, such errors were thrown and handled by the container. + +###### Container Management + +Containers created for annotations are not registered with the application context. +You can obtain a collection of all containers by invoking `getListenerContainers()` on the`RabbitListenerEndpointRegistry` bean. +You can then iterate over this collection, for example, to stop or start all containers or invoke the `Lifecycle` methods +on the registry itself, which will invoke the operations on each container. + +You can also get a reference to an individual container by using its `id`, using `getListenerContainer(String id)` — for +example, `registry.getListenerContainer("multi")` for the container created by the snippet above. + +Starting with version 1.5.2, you can obtain the `id` values of the registered containers with `getListenerContainerIds()`. + +Starting with version 1.5, you can now assign a `group` to the container on the `RabbitListener` endpoint. +This provides a mechanism to get a reference to a subset of containers. +Adding a `group` attribute causes a bean of type `Collection` to be registered with the context with the group name. + +##### @RabbitListener with Batching + +When receiving a [a batch](#template-batching) of messages, the de-batching is normally performed by the container and the listener is invoked with one message at at time. +Starting with version 2.2, you can configure the listener container factory and listener to receive the entire batch in one call, simply set the factory’s `batchListener` property, and make the method payload parameter a `List`: + +``` +@Bean +public SimpleRabbitListenerContainerFactory rabbitListenerContainerFactory() { + SimpleRabbitListenerContainerFactory factory = new SimpleRabbitListenerContainerFactory(); + factory.setConnectionFactory(connectionFactory()); + factory.setBatchListener(true); + return factory; +} + +@RabbitListener(queues = "batch.1") +public void listen1(List in) { + ... +} + +// or + +@RabbitListener(queues = "batch.2") +public void listen2(List> in) { + ... +} +``` + +Setting the `batchListener` property to true automatically turns off the `deBatchingEnabled` container property in containers that the factory creates (unless `consumerBatchEnabled` is `true` - see below). Effectively, the debatching is moved from the container to the listener adapter and the adapter creates the list that is passed to the listener. + +A batch-enabled factory cannot be used with a [multi-method listener](#annotation-method-selection). + +Also starting with version 2.2. when receiving batched messages one-at-a-time, the last message contains a boolean header set to `true`. +This header can be obtained by adding the `@Header(AmqpHeaders.LAST_IN_BATCH)` boolean last` parameter to your listener method. +The header is mapped from `MessageProperties.isLastInBatch()`. +In addition, `AmqpHeaders.BATCH_SIZE` is populated with the size of the batch in every message fragment. + +In addition, a new property `consumerBatchEnabled` has been added to the `SimpleMessageListenerContainer`. +When this is true, the container will create a batch of messages, up to `batchSize`; a partial batch is delivered if `receiveTimeout` elapses with no new messages arriving. +If a producer-created batch is received, it is debatched and added to the consumer-side batch; therefore the actual number of messages delivered may exceed `batchSize`, which represents the number of messages received from the broker.`deBatchingEnabled` must be true when `consumerBatchEnabled` is true; the container factory will enforce this requirement. + +``` +@Bean +public SimpleRabbitListenerContainerFactory consumerBatchContainerFactory() { + SimpleRabbitListenerContainerFactory factory = new SimpleRabbitListenerContainerFactory(); + factory.setConnectionFactory(rabbitConnectionFactory()); + factory.setConsumerTagStrategy(consumerTagStrategy()); + factory.setBatchListener(true); // configures a BatchMessageListenerAdapter + factory.setBatchSize(2); + factory.setConsumerBatchEnabled(true); + return factory; +} +``` + +When using `consumerBatchEnabled` with `@RabbitListener`: + +``` +@RabbitListener(queues = "batch.1", containerFactory = "consumerBatchContainerFactory") +public void consumerBatch1(List amqpMessages) { + this.amqpMessagesReceived = amqpMessages; + this.batch1Latch.countDown(); +} + +@RabbitListener(queues = "batch.2", containerFactory = "consumerBatchContainerFactory") +public void consumerBatch2(List> messages) { + this.messagingMessagesReceived = messages; + this.batch2Latch.countDown(); +} + +@RabbitListener(queues = "batch.3", containerFactory = "consumerBatchContainerFactory") +public void consumerBatch3(List strings) { + this.batch3Strings = strings; + this.batch3Latch.countDown(); +} +``` + +* the first is called with the raw, unconverted `org.springframework.amqp.core.Message` s received. + +* the second is called with the `org.springframework.messaging.Message` s with converted payloads and mapped headers/properties. + +* the third is called with the converted payloads, with no access to headers/properteis. + +You can also add a `Channel` parameter, often used when using `MANUAL` ack mode. +This is not very useful with the third example because you don’t have access to the `delivery_tag` property. + +##### Using Container Factories + +Listener container factories were introduced to support the `@RabbitListener` and registering containers with the `RabbitListenerEndpointRegistry`, as discussed in [Programmatic Endpoint Registration](#async-annotation-driven-registration). + +Starting with version 2.1, they can be used to create any listener container — even a container without a listener (such as for use in Spring Integration). +Of course, a listener must be added before the container is started. + +There are two ways to create such containers: + +* Use a SimpleRabbitListenerEndpoint + +* Add the listener after creation + +The following example shows how to use a `SimpleRabbitListenerEndpoint` to create a listener container: + +``` +@Bean +public SimpleMessageListenerContainer factoryCreatedContainerSimpleListener( + SimpleRabbitListenerContainerFactory rabbitListenerContainerFactory) { + SimpleRabbitListenerEndpoint endpoint = new SimpleRabbitListenerEndpoint(); + endpoint.setQueueNames("queue.1"); + endpoint.setMessageListener(message -> { + ... + }); + return rabbitListenerContainerFactory.createListenerContainer(endpoint); +} +``` + +The following example shows how to add the listener after creation: + +``` +@Bean +public SimpleMessageListenerContainer factoryCreatedContainerNoListener( + SimpleRabbitListenerContainerFactory rabbitListenerContainerFactory) { + SimpleMessageListenerContainer container = rabbitListenerContainerFactory.createListenerContainer(); + container.setMessageListener(message -> { + ... + }); + container.setQueueNames("test.no.listener.yet"); + return container; +} +``` + +In either case, the listener can also be a `ChannelAwareMessageListener`, since it is now a sub-interface of `MessageListener`. + +These techniques are useful if you wish to create several containers with similar properties or use a pre-configured container factory such as the one provided by Spring Boot auto configuration or both. + +| |Containers created this way are normal `@Bean` instances and are not registered in the `RabbitListenerEndpointRegistry`.| +|---|------------------------------------------------------------------------------------------------------------------------| + +##### Asynchronous `@RabbitListener` Return Types + +Starting with version 2.1, `@RabbitListener` (and `@RabbitHandler`) methods can be specified with asynchronous return types `ListenableFuture` and `Mono`, letting the reply be sent asynchronously. + +| |The listener container factory must be configured with `AcknowledgeMode.MANUAL` so that the consumer thread will not ack the message; instead, the asynchronous completion will ack or nack the message when the async operation completes.
When the async result is completed with an error, whether the message is requeued or not depends on the exception type thrown, the container configuration, and the container error handler.
By default, the message will be requeued, unless the container’s `defaultRequeueRejected` property is set to `false` (it is `true` by default).
If the async result is completed with an `AmqpRejectAndDontRequeueException`, the message will not be requeued.
If the container’s `defaultRequeueRejected` property is `false`, you can override that by setting the future’s exception to a `ImmediateRequeueException` and the message will be requeued.
If some exception occurs within the listener method that prevents creation of the async result object, you MUST catch that exception and return an appropriate return object that will cause the message to be acknowledged or requeued.| +|---|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +Starting with versions 2.2.21, 2.3.13, 2.4.1, the `AcknowledgeMode` will be automatically set the `MANUAL` when async return types are detected. +In addition, incoming messages with fatal exceptions will be negatively acknowledged individually, previously any prior unacknowledged message were also negatively acknowledged. + +##### Threading and Asynchronous Consumers + +A number of different threads are involved with asynchronous consumers. + +Threads from the `TaskExecutor` configured in the `SimpleMessageListenerContainer` are used to invoke the `MessageListener` when a new message is delivered by `RabbitMQ Client`. +If not configured, a `SimpleAsyncTaskExecutor` is used. +If you use a pooled executor, you need to ensure the pool size is sufficient to handle the configured concurrency. +With the `DirectMessageListenerContainer`, the `MessageListener` is invoked directly on a `RabbitMQ Client` thread. +In this case, the `taskExecutor` is used for the task that monitors the consumers. + +| |When using the default `SimpleAsyncTaskExecutor`, for the threads the listener is invoked on, the listener container `beanName` is used in the `threadNamePrefix`.
This is useful for log analysis.
We generally recommend always including the thread name in the logging appender configuration.
When a `TaskExecutor` is specifically provided through the `taskExecutor` property on the container, it is used as is, without modification.
It is recommended that you use a similar technique to name the threads created by a custom `TaskExecutor` bean definition, to aid with thread identification in log messages.| +|---|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +The `Executor` configured in the `CachingConnectionFactory` is passed into the `RabbitMQ Client` when creating the connection, and its threads are used to deliver new messages to the listener container. +If this is not configured, the client uses an internal thread pool executor with (at the time of writing) a pool size of `Runtime.getRuntime().availableProcessors() * 2` for each connection. + +If you have a large number of factories or are using `CacheMode.CONNECTION`, you may wish to consider using a shared `ThreadPoolTaskExecutor` with enough threads to satisfy your workload. + +| |With the `DirectMessageListenerContainer`, you need to ensure that the connection factory is configured with a task executor that has sufficient threads to support your desired concurrency across all listener containers that use that factory.
The default pool size (at the time of writing) is `Runtime.getRuntime().availableProcessors() * 2`.| +|---|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +The `RabbitMQ client` uses a `ThreadFactory` to create threads for low-level I/O (socket) operations. +To modify this factory, you need to configure the underlying RabbitMQ `ConnectionFactory`, as discussed in [Configuring the Underlying Client Connection Factory](#connection-factory). + +##### Choosing a Container + +Version 2.0 introduced the `DirectMessageListenerContainer` (DMLC). +Previously, only the `SimpleMessageListenerContainer` (SMLC) was available. +The SMLC uses an internal queue and a dedicated thread for each consumer. +If a container is configured to listen to multiple queues, the same consumer thread is used to process all the queues. +Concurrency is controlled by `concurrentConsumers` and other properties. +As messages arrive from the RabbitMQ client, the client thread hands them off to the consumer thread through the queue. +This architecture was required because, in early versions of the RabbitMQ client, multiple concurrent deliveries were not possible. +Newer versions of the client have a revised threading model and can now support concurrency. +This has allowed the introduction of the DMLC where the listener is now invoked directly on the RabbitMQ Client thread. +Its architecture is, therefore, actually “simpler” than the SMLC. +However, there are some limitations with this approach, and certain features of the SMLC are not available with the DMLC. +Also, concurrency is controlled by `consumersPerQueue` (and the client library’s thread pool). +The `concurrentConsumers` and associated properties are not available with this container. + +The following features are available with the SMLC but not the DMLC: + +* `batchSize`: With the SMLC, you can set this to control how many messages are delivered in a transaction or to reduce the number of acks, but it may cause the number of duplicate deliveries to increase after a failure. + (The DMLC does have `messagesPerAck`, which you can use to reduce the acks, the same as with `batchSize` and the SMLC, but it cannot be used with transactions — each message is delivered and ack’d in a separate transaction). + +* `consumerBatchEnabled`: enables batching of discrete messages in the consumer; see [Message Listener Container Configuration](#containerAttributes) for more information. + +* `maxConcurrentConsumers` and consumer scaling intervals or triggers — there is no auto-scaling in the DMLC. + It does, however, let you programmatically change the `consumersPerQueue` property and the consumers are adjusted accordingly. + +However, the DMLC has the following benefits over the SMLC: + +* Adding and removing queues at runtime is more efficient. + With the SMLC, the entire consumer thread is restarted (all consumers canceled and re-created). + With the DMLC, unaffected consumers are not canceled. + +* The context switch between the RabbitMQ Client thread and the consumer thread is avoided. + +* Threads are shared across consumers rather than having a dedicated thread for each consumer in the SMLC. + However, see the IMPORTANT note about the connection factory configuration in [Threading and Asynchronous Consumers](#threading). + +See [Message Listener Container Configuration](#containerAttributes) for information about which configuration properties apply to each container. + +##### Detecting Idle Asynchronous Consumers + +While efficient, one problem with asynchronous consumers is detecting when they are idle — users might want to take +some action if no messages arrive for some period of time. + +Starting with version 1.6, it is now possible to configure the listener container to publish a`ListenerContainerIdleEvent` when some time passes with no message delivery. +While the container is idle, an event is published every `idleEventInterval` milliseconds. + +To configure this feature, set `idleEventInterval` on the container. +The following example shows how to do so in XML and in Java (for both a `SimpleMessageListenerContainer` and a `SimpleRabbitListenerContainerFactory`): + +``` + + + +``` + +``` +@Bean +public SimpleMessageListenerContainer(ConnectionFactory connectionFactory) { + SimpleMessageListenerContainer container = new SimpleMessageListenerContainer(connectionFactory); + ... + container.setIdleEventInterval(60000L); + ... + return container; +} +``` + +``` +@Bean +public SimpleRabbitListenerContainerFactory rabbitListenerContainerFactory() { + SimpleRabbitListenerContainerFactory factory = new SimpleRabbitListenerContainerFactory(); + factory.setConnectionFactory(rabbitConnectionFactory()); + factory.setIdleEventInterval(60000L); + ... + return factory; +} +``` + +In each of these cases, an event is published once per minute while the container is idle. + +###### Event Consumption + +You can capture idle events by implementing `ApplicationListener` — either a general listener, or one narrowed to only +receive this specific event. +You can also use `@EventListener`, introduced in Spring Framework 4.2. + +The following example combines the `@RabbitListener` and `@EventListener` into a single class. +You need to understand that the application listener gets events for all containers, so you may need to +check the listener ID if you want to take specific action based on which container is idle. +You can also use the `@EventListener` `condition` for this purpose. + +The events have four properties: + +* `source`: The listener container instance + +* `id`: The listener ID (or container bean name) + +* `idleTime`: The time the container had been idle when the event was published + +* `queueNames`: The names of the queue(s) that the container listens to + +The following example shows how to create listeners by using both the `@RabbitListener` and the `@EventListener` annotations: + +``` +public class Listener { + + @RabbitListener(id="someId", queues="#{queue.name}") + public String listen(String foo) { + return foo.toUpperCase(); + } + + @EventListener(condition = "event.listenerId == 'someId'") + public void onApplicationEvent(ListenerContainerIdleEvent event) { + ... + } + +} +``` + +| |Event listeners see events for all containers.
Consequently, in the preceding example, we narrow the events received based on the listener ID.| +|---|--------------------------------------------------------------------------------------------------------------------------------------------------| + +| |If you wish to use the idle event to stop the lister container, you should not call `container.stop()` on the thread that calls the listener.
Doing so always causes delays and unnecessary log messages.
Instead, you should hand off the event to a different thread that can then stop the container.| +|---|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +##### Monitoring Listener Performance + +Starting with version 2.2, the listener containers will automatically create and update Micrometer `Timer` s for the listener, if `Micrometer` is detected on the class path, and a `MeterRegistry` is present in the application context. +The timers can be disabled by setting the container property `micrometerEnabled` to `false`. + +Two timers are maintained - one for successful calls to the listener and one for failures. +With a simple `MessageListener`, there is a pair of timers for each configured queue. + +The timers are named `spring.rabbitmq.listener` and have the following tags: + +* `listenerId` : (listener id or container bean name) + +* `queue` : (the queue name for a simple listener or list of configured queue names when `consumerBatchEnabled` is `true` - because a batch may contain messages from multiple queues) + +* `result` : `success` or `failure` + +* `exception` : `none` or `ListenerExecutionFailedException` + +You can add additional tags using the `micrometerTags` container property. + +#### 4.1.7. Containers and Broker-Named queues + +While it is preferable to use `AnonymousQueue` instances as auto-delete queues, starting with version 2.1, you can use broker named queues with listener containers. +The following example shows how to do so: + +``` +@Bean +public Queue queue() { + return new Queue("", false, true, true); +} + +@Bean +public SimpleMessageListenerContainer container() { + SimpleMessageListenerContainer container = new SimpleMessageListenerContainer(cf()); + container.setQueues(queue()); + container.setMessageListener(m -> { + ... + }); + container.setMissingQueuesFatal(false); + return container; +} +``` + +Notice the empty `String` for the name. +When the `RabbitAdmin` declares queues, it updates the `Queue.actualName` property with the name returned by the broker. +You must use `setQueues()` when you configure the container for this to work, so that the container can access the declared name at runtime. +Just setting the names is insufficient. + +| |You cannot add broker-named queues to the containers while they are running.| +|---|----------------------------------------------------------------------------| + +| |When a connection is reset and a new one is established, the new queue gets a new name.
Since there is a race condition between the container restarting and the queue being re-declared, it is important to set the container’s `missingQueuesFatal` property to `false`, since the container is likely to initially try to reconnect to the old queue.| +|---|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +#### 4.1.8. Message Converters + +The `AmqpTemplate` also defines several methods for sending and receiving messages that delegate to a `MessageConverter`. +The `MessageConverter` provides a single method for each direction: one for converting **to** a `Message` and another for converting **from** a `Message`. +Notice that, when converting to a `Message`, you can also provide properties in addition to the object. +The `object` parameter typically corresponds to the Message body. +The following listing shows the `MessageConverter` interface definition: + +``` +public interface MessageConverter { + + Message toMessage(Object object, MessageProperties messageProperties) + throws MessageConversionException; + + Object fromMessage(Message message) throws MessageConversionException; + +} +``` + +The relevant `Message`-sending methods on the `AmqpTemplate` are simpler than the methods we discussed previously, because they do not require the `Message` instance. +Instead, the `MessageConverter` is responsible for “creating” each `Message` by converting the provided object to the byte array for the `Message` body and then adding any provided `MessageProperties`. +The following listing shows the definitions of the various methods: + +``` +void convertAndSend(Object message) throws AmqpException; + +void convertAndSend(String routingKey, Object message) throws AmqpException; + +void convertAndSend(String exchange, String routingKey, Object message) + throws AmqpException; + +void convertAndSend(Object message, MessagePostProcessor messagePostProcessor) + throws AmqpException; + +void convertAndSend(String routingKey, Object message, + MessagePostProcessor messagePostProcessor) throws AmqpException; + +void convertAndSend(String exchange, String routingKey, Object message, + MessagePostProcessor messagePostProcessor) throws AmqpException; +``` + +On the receiving side, there are only two methods: one that accepts the queue name and one that relies on the template’s “queue” property having been set. +The following listing shows the definitions of the two methods: + +``` +Object receiveAndConvert() throws AmqpException; + +Object receiveAndConvert(String queueName) throws AmqpException; +``` + +| |The `MessageListenerAdapter` mentioned in [Asynchronous Consumer](#async-consumer) also uses a `MessageConverter`.| +|---|------------------------------------------------------------------------------------------------------------------| + +##### `SimpleMessageConverter` + +The default implementation of the `MessageConverter` strategy is called `SimpleMessageConverter`. +This is the converter that is used by an instance of `RabbitTemplate` if you do not explicitly configure an alternative. +It handles text-based content, serialized Java objects, and byte arrays. + +###### Converting From a `Message` + +If the content type of the input `Message` begins with "text" (for example, +"text/plain"), it also checks for the content-encoding property to determine the charset to be used when converting the `Message` body byte array to a Java `String`. +If no content-encoding property had been set on the input `Message`, it uses the UTF-8 charset by default. +If you need to override that default setting, you can configure an instance of `SimpleMessageConverter`, set its `defaultCharset` property, and inject that into a `RabbitTemplate` instance. + +If the content-type property value of the input `Message` is set to "application/x-java-serialized-object", the `SimpleMessageConverter` tries to deserialize (rehydrate) the byte array into a Java object. +While that might be useful for simple prototyping, we do not recommend relying on Java serialization, since it leads to tight coupling between the producer and the consumer. +Of course, it also rules out usage of non-Java systems on either side. +With AMQP being a wire-level protocol, it would be unfortunate to lose much of that advantage with such restrictions. +In the next two sections, we explore some alternatives for passing rich domain object content without relying on Java serialization. + +For all other content-types, the `SimpleMessageConverter` returns the `Message` body content directly as a byte array. + +See [Java Deserialization](#java-deserialization) for important information. + +###### Converting To a `Message` + +When converting to a `Message` from an arbitrary Java Object, the `SimpleMessageConverter` likewise deals with byte arrays, strings, and serializable instances. +It converts each of these to bytes (in the case of byte arrays, there is nothing to convert), and it ses the content-type property accordingly. +If the `Object` to be converted does not match one of those types, the `Message` body is null. + +##### `SerializerMessageConverter` + +This converter is similar to the `SimpleMessageConverter` except that it can be configured with other Spring Framework`Serializer` and `Deserializer` implementations for `application/x-java-serialized-object` conversions. + +See [Java Deserialization](#java-deserialization) for important information. + +##### Jackson2JsonMessageConverter + +This section covers using the `Jackson2JsonMessageConverter` to convert to and from a `Message`. +It has the following sections: + +* [Converting to a `Message`](#Jackson2JsonMessageConverter-to-message) + +* [Converting from a `Message`](#Jackson2JsonMessageConverter-from-message) + +###### Converting to a `Message` + +As mentioned in the previous section, relying on Java serialization is generally not recommended. +One rather common alternative that is more flexible and portable across different languages and platforms is JSON +(JavaScript Object Notation). +The converter can be configured on any `RabbitTemplate` instance to override its usage of the `SimpleMessageConverter`default. +The `Jackson2JsonMessageConverter` uses the `com.fasterxml.jackson` 2.x library. +The following example configures a `Jackson2JsonMessageConverter`: + +``` + + + + + + + + + +``` + +As shown above, `Jackson2JsonMessageConverter` uses a `DefaultClassMapper` by default. +Type information is added to (and retrieved from) `MessageProperties`. +If an inbound message does not contain type information in `MessageProperties`, but you know the expected type, you +can configure a static type by using the `defaultType` property, as the following example shows: + +``` + + + + + + + +``` + +In addition, you can provide custom mappings from the value in the `*TypeId*` header. +The following example shows how to do so: + +``` +@Bean +public Jackson2JsonMessageConverter jsonMessageConverter() { + Jackson2JsonMessageConverter jsonConverter = new Jackson2JsonMessageConverter(); + jsonConverter.setClassMapper(classMapper()); + return jsonConverter; +} + +@Bean +public DefaultClassMapper classMapper() { + DefaultClassMapper classMapper = new DefaultClassMapper(); + Map> idClassMapping = new HashMap<>(); + idClassMapping.put("thing1", Thing1.class); + idClassMapping.put("thing2", Thing2.class); + classMapper.setIdClassMapping(idClassMapping); + return classMapper; +} +``` + +Now, if the sending system sets the header to `thing1`, the converter creates a `Thing1` object, and so on. +See the [Receiving JSON from Non-Spring Applications](#spring-rabbit-json) sample application for a complete discussion about converting messages from non-Spring applications. + +###### Converting from a `Message` + +Inbound messages are converted to objects according to the type information added to headers by the sending system. + +In versions prior to 1.6, if type information is not present, conversion would fail. +Starting with version 1.6, if type information is missing, the converter converts the JSON by using Jackson defaults (usually a map). + +Also, starting with version 1.6, when you use `@RabbitListener` annotations (on methods), the inferred type information is added to the `MessageProperties`. +This lets the converter convert to the argument type of the target method. +This only applies if there is one parameter with no annotations or a single parameter with the `@Payload` annotation. +Parameters of type `Message` are ignored during the analysis. + +| |By default, the inferred type information will override the inbound `*TypeId*` and related headers created
by the sending system.
This lets the receiving system automatically convert to a different domain object.
This applies only if the parameter type is concrete (not abstract or an interface) or it is from the `java.util`package.
In all other cases, the `*TypeId*` and related headers is used.
There are cases where you might wish to override the default behavior and always use the `*TypeId*` information.
For example, suppose you have a `@RabbitListener` that takes a `Thing1` argument but the message contains a `Thing2` that
is a subclass of `Thing1` (which is concrete).
The inferred type would be incorrect.
To handle this situation, set the `TypePrecedence` property on the `Jackson2JsonMessageConverter` to `TYPE_ID` instead
of the default `INFERRED`.
(The property is actually on the converter’s `DefaultJackson2JavaTypeMapper`, but a setter is provided on the converter
for convenience.)
If you inject a custom type mapper, you should set the property on the mapper instead.| +|---|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +| |When converting from the `Message`, an incoming `MessageProperties.getContentType()` must be JSON-compliant (`contentType.contains("json")` is used to check).
Starting with version 2.2, `application/json` is assumed if there is no `contentType` property, or it has the default value `application/octet-stream`.
To revert to the previous behavior (return an unconverted `byte[]`), set the converter’s `assumeSupportedContentType` property to `false`.
If the content type is not supported, a `WARN` log message `Could not convert incoming message with content-type […​]`, is emitted and `message.getBody()` is returned as is — as a `byte[]`.
So, to meet the `Jackson2JsonMessageConverter` requirements on the consumer side, the producer must add the `contentType` message property — for example, as `application/json` or `text/x-json` or by using the `Jackson2JsonMessageConverter`, which sets the header automatically.
The following listing shows a number of converter calls:| +|---|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +``` +@RabbitListener +public void thing1(Thing1 thing1) {...} + +@RabbitListener +public void thing1(@Payload Thing1 thing1, @Header("amqp_consumerQueue") String queue) {...} + +@RabbitListener +public void thing1(Thing1 thing1, o.s.amqp.core.Message message) {...} + +@RabbitListener +public void thing1(Thing1 thing1, o.s.messaging.Message message) {...} + +@RabbitListener +public void thing1(Thing1 thing1, String bar) {...} + +@RabbitListener +public void thing1(Thing1 thing1, o.s.messaging.Message message) {...} +``` + +In the first four cases in the preceding listing, the converter tries to convert to the `Thing1` type. +The fifth example is invalid because we cannot determine which argument should receive the message payload. +With the sixth example, the Jackson defaults apply due to the generic type being a `WildcardType`. + +You can, however, create a custom converter and use the `targetMethod` message property to decide which type to convert +the JSON to. + +| |This type inference can only be achieved when the `@RabbitListener` annotation is declared at the method level.
With class-level `@RabbitListener`, the converted type is used to select which `@RabbitHandler` method to invoke.
For this reason, the infrastructure provides the `targetObject` message property, which you can use in a custom
converter to determine the type.| +|---|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +| |Starting with version 1.6.11, `Jackson2JsonMessageConverter` and, therefore, `DefaultJackson2JavaTypeMapper` (`DefaultClassMapper`) provide the `trustedPackages` option to overcome [Serialization Gadgets](https://pivotal.io/security/cve-2017-4995) vulnerability.
By default and for backward compatibility, the `Jackson2JsonMessageConverter` trusts all packages — that is, it uses `*` for the option.| +|---|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +###### Deserializing Abstract Classes + +Prior to version 2.2.8, if the inferred type of a `@RabbitListener` was an abstract class (including interfaces), the converter would fall back to looking for type information in the headers and, if present, used that information; if that was not present, it would try to create the abstract class. +This caused a problem when a custom `ObjectMapper` that is configured with a custom deserializer to handle the abstract class is used, but the incoming message has invalid type headers. + +Starting with version 2.2.8, the previous behavior is retained by default. If you have such a custom `ObjectMapper` and you want to ignore type headers, and always use the inferred type for conversion, set the `alwaysConvertToInferredType` to `true`. +This is needed for backwards compatibility and to avoid the overhead of an attempted conversion when it would fail (with a standard `ObjectMapper`). + +###### Using Spring Data Projection Interfaces + +Starting with version 2.2, you can convert JSON to a Spring Data Projection interface instead of a concrete type. +This allows very selective, and low-coupled bindings to data, including the lookup of values from multiple places inside the JSON document. +For example the following interface can be defined as message payload type: + +``` +interface SomeSample { + + @JsonPath({ "$.username", "$.user.name" }) + String getUsername(); + +} +``` + +``` +@RabbitListener(queues = "projection") +public void projection(SomeSample in) { + String username = in.getUsername(); + ... +} +``` + +Accessor methods will be used to lookup the property name as field in the received JSON document by default. +The `@JsonPath` expression allows customization of the value lookup, and even to define multiple JSON path expressions, to lookup values from multiple places until an expression returns an actual value. + +To enable this feature, set the `useProjectionForInterfaces` to `true` on the message converter. +You must also add `spring-data:spring-data-commons` and `com.jayway.jsonpath:json-path` to the class path. + +When used as the parameter to a `@RabbitListener` method, the interface type is automatically passed to the converter as normal. + +###### Converting From a `Message` With `RabbitTemplate` + +As mentioned earlier, type information is conveyed in message headers to assist the converter when converting from a message. +This works fine in most cases. +However, when using generic types, it can only convert simple objects and known “container” objects (lists, arrays, and maps). +Starting with version 2.0, the `Jackson2JsonMessageConverter` implements `SmartMessageConverter`, which lets it be used with the new `RabbitTemplate` methods that take a `ParameterizedTypeReference` argument. +This allows conversion of complex generic types, as shown in the following example: + +``` +Thing1> thing1 = + rabbitTemplate.receiveAndConvert(new ParameterizedTypeReference>>() { }); +``` + +| |Starting with version 2.1, the `AbstractJsonMessageConverter` class has been removed.
It is no longer the base class for `Jackson2JsonMessageConverter`.
It has been replaced by `AbstractJackson2MessageConverter`.| +|---|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +##### `MarshallingMessageConverter` + +Yet another option is the `MarshallingMessageConverter`. +It delegates to the Spring OXM library’s implementations of the `Marshaller` and `Unmarshaller` strategy interfaces. +You can read more about that library [here](https://docs.spring.io/spring/docs/current/spring-framework-reference/html/oxm.html). +In terms of configuration, it is most common to provide only the constructor argument, since most implementations of `Marshaller` also implement `Unmarshaller`. +The following example shows how to configure a `MarshallingMessageConverter`: + +``` + + + + + + + + +``` + +##### `Jackson2XmlMessageConverter` + +This class was introduced in version 2.1 and can be used to convert messages from and to XML. + +Both `Jackson2XmlMessageConverter` and `Jackson2JsonMessageConverter` have the same base class: `AbstractJackson2MessageConverter`. + +| |The `AbstractJackson2MessageConverter` class is introduced to replace a removed class: `AbstractJsonMessageConverter`.| +|---|----------------------------------------------------------------------------------------------------------------------| + +The `Jackson2XmlMessageConverter` uses the `com.fasterxml.jackson` 2.x library. + +You can use it the same way as `Jackson2JsonMessageConverter`, except it supports XML instead of JSON. +The following example configures a `Jackson2JsonMessageConverter`: + +``` + + + + + + + +``` + +See [Jackson2JsonMessageConverter](#json-message-converter) for more information. + +| |Starting with version 2.2, `application/xml` is assumed if there is no `contentType` property, or it has the default value `application/octet-stream`.
To revert to the previous behavior (return an unconverted `byte[]`), set the converter’s `assumeSupportedContentType` property to `false`.| +|---|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +##### `ContentTypeDelegatingMessageConverter` + +This class was introduced in version 1.4.2 and allows delegation to a specific `MessageConverter` based on the content type property in the `MessageProperties`. +By default, it delegates to a `SimpleMessageConverter` if there is no `contentType` property or there is a value that matches none of the configured converters. +The following example configures a `ContentTypeDelegatingMessageConverter`: + +``` + + + + + + + + +``` + +##### Java Deserialization + +This section covers how to deserialize Java objects. + +| |There is a possible vulnerability when deserializing java objects from untrusted sources.

If you accept messages from untrusted sources with a `content-type` of `application/x-java-serialized-object`, you should
consider configuring which packages and classes are allowed to be deserialized.
This applies to both the `SimpleMessageConverter` and `SerializerMessageConverter` when it is configured to use a`DefaultDeserializer` either implicitly or via configuration.

By default, the allowed list is empty, meaning all classes are deserialized.

You can set a list of patterns, such as `thing1.`**, `thing1.thing2.Cat` or ``**`.MySafeClass`.

The patterns are checked in order until a match is found.
If there is no match, a `SecurityException` is thrown.

You can set the patterns using the `allowedListPatterns` property on these converters.| +|---|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +##### Message Properties Converters + +The `MessagePropertiesConverter` strategy interface is used to convert between the Rabbit Client `BasicProperties` and Spring AMQP `MessageProperties`. +The default implementation (`DefaultMessagePropertiesConverter`) is usually sufficient for most purposes, but you can implement your own if needed. +The default properties converter converts `BasicProperties` elements of type `LongString` to `String` instances when the size is not greater than `1024` bytes. +Larger `LongString` instances are not converted (see the next paragraph). +This limit can be overridden with a constructor argument. + +Starting with version 1.6, headers longer than the long string limit (default: 1024) are now left as`LongString` instances by default by the `DefaultMessagePropertiesConverter`. +You can access the contents through the `getBytes[]`, `toString()`, or `getStream()` methods. + +Previously, the `DefaultMessagePropertiesConverter` “converted” such headers to a `DataInputStream` (actually it just referenced the `LongString` instance’s `DataInputStream`). +On output, this header was not converted (except to a String — for example, `[[email protected]](/cdn-cgi/l/email-protection)` by calling `toString()` on the stream). + +Large incoming `LongString` headers are now correctly “converted” on output, too (by default). + +A new constructor is provided to let you configure the converter to work as before. +The following listing shows the Javadoc comment and declaration of the method: + +``` +/** + * Construct an instance where LongStrings will be returned + * unconverted or as a java.io.DataInputStream when longer than this limit. + * Use this constructor with 'true' to restore pre-1.6 behavior. + * @param longStringLimit the limit. + * @param convertLongLongStrings LongString when false, + * DataInputStream when true. + * @since 1.6 + */ +public DefaultMessagePropertiesConverter(int longStringLimit, boolean convertLongLongStrings) { ... } +``` + +Also starting with version 1.6, a new property called `correlationIdString` has been added to `MessageProperties`. +Previously, when converting to and from `BasicProperties` used by the RabbitMQ client, an unnecessary `byte[] <→ String` conversion was performed because `MessageProperties.correlationId` is a `byte[]`, but `BasicProperties` uses a `String`. +(Ultimately, the RabbitMQ client uses UTF-8 to convert the `String` to bytes to put in the protocol message). + +To provide maximum backwards compatibility, a new property called `correlationIdPolicy` has been added to the`DefaultMessagePropertiesConverter`. +This takes a `DefaultMessagePropertiesConverter.CorrelationIdPolicy` enum argument. +By default it is set to `BYTES`, which replicates the previous behavior. + +For inbound messages: + +* `STRING`: Only the `correlationIdString` property is mapped + +* `BYTES`: Only the `correlationId` property is mapped + +* `BOTH`: Both properties are mapped + +For outbound messages: + +* `STRING`: Only the `correlationIdString` property is mapped + +* `BYTES`: Only the `correlationId` property is mapped + +* `BOTH`: Both properties are considered, with the `String` property taking precedence + +Also starting with version 1.6, the inbound `deliveryMode` property is no longer mapped to `MessageProperties.deliveryMode`. +It is mapped to `MessageProperties.receivedDeliveryMode` instead. +Also, the inbound `userId` property is no longer mapped to `MessageProperties.userId`. +It is mapped to `MessageProperties.receivedUserId` instead. +These changes are to avoid unexpected propagation of these properties if the same `MessageProperties` object is used for an outbound message. + +Starting with version 2.2, the `DefaultMessagePropertiesConverter` converts any custom headers with values of type `Class` using `getName()` instead of `toString()`; this avoids consuming application having to parse the class name out of the `toString()` representation. +For rolling upgrades, you may need to change your consumers to understand both formats until all producers are upgraded. + +#### 4.1.9. Modifying Messages - Compression and More + +A number of extension points exist. +They let you perform some processing on a message, either before it is sent to RabbitMQ or immediately after it is received. + +As can be seen in [Message Converters](#message-converters), one such extension point is in the `AmqpTemplate` `convertAndReceive` operations, where you can provide a `MessagePostProcessor`. +For example, after your POJO has been converted, the `MessagePostProcessor` lets you set custom headers or properties on the `Message`. + +Starting with version 1.4.2, additional extension points have been added to the `RabbitTemplate` - `setBeforePublishPostProcessors()` and `setAfterReceivePostProcessors()`. +The first enables a post processor to run immediately before sending to RabbitMQ. +When using batching (see [Batching](#template-batching)), this is invoked after the batch is assembled and before the batch is sent. +The second is invoked immediately after a message is received. + +These extension points are used for such features as compression and, for this purpose, several `MessagePostProcessor` implementations are provided.`GZipPostProcessor`, `ZipPostProcessor` and `DeflaterPostProcessor` compress messages before sending, and `GUnzipPostProcessor`, `UnzipPostProcessor` and `InflaterPostProcessor` decompress received messages. + +| |Starting with version 2.1.5, the `GZipPostProcessor` can be configured with the `copyProperties = true` option to make a copy of the original message properties.
By default, these properties are reused for performance reasons, and modified with compression content encoding and the optional `MessageProperties.SPRING_AUTO_DECOMPRESS` header.
If you retain a reference to the original outbound message, its properties will change as well.
So, if your application retains a copy of an outbound message with these message post processors, consider turning the `copyProperties` option on.| +|---|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +| |Starting with version 2.2.12, you can configure the delimiter that the compressing post processors use between content encoding elements.
With versions 2.2.11 and before, this was hard-coded as `:`, it is now set to `, ` by default.
The decompressors will work with both delimiters.
However, if you publish messages with 2.3 or later and consume with 2.2.11 or earlier, you MUST set the `encodingDelimiter` property on the compressor(s) to `:`.
When your consumers are upgraded to 2.2.11 or later, you can revert to the default of `, `.| +|---|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +Similarly, the `SimpleMessageListenerContainer` also has a `setAfterReceivePostProcessors()` method, letting the decompression be performed after messages are received by the container. + +Starting with version 2.1.4, `addBeforePublishPostProcessors()` and `addAfterReceivePostProcessors()` have been added to the `RabbitTemplate` to allow appending new post processors to the list of before publish and after receive post processors respectively. +Also there are methods provided to remove the post processors. +Similarly, `AbstractMessageListenerContainer` also has `addAfterReceivePostProcessors()` and `removeAfterReceivePostProcessor()` methods added. +See the Javadoc of `RabbitTemplate` and `AbstractMessageListenerContainer` for more detail. + +#### 4.1.10. Request/Reply Messaging + +The `AmqpTemplate` also provides a variety of `sendAndReceive` methods that accept the same argument options that were described earlier for the one-way send operations (`exchange`, `routingKey`, and `Message`). +Those methods are quite useful for request-reply scenarios, since they handle the configuration of the necessary `reply-to` property before sending and can listen for the reply message on an exclusive queue that is created internally for that purpose. + +Similar request-reply methods are also available where the `MessageConverter` is applied to both the request and reply. +Those methods are named `convertSendAndReceive`. +See the [Javadoc of `AmqpTemplate`](https://docs.spring.io/spring-amqp/docs/latest-ga/api/org/springframework/amqp/core/AmqpTemplate.html) for more detail. + +Starting with version 1.5.0, each of the `sendAndReceive` method variants has an overloaded version that takes `CorrelationData`. +Together with a properly configured connection factory, this enables the receipt of publisher confirms for the send side of the operation. +See [Correlated Publisher Confirms and Returns](#template-confirms) and the [Javadoc for `RabbitOperations`](https://docs.spring.io/spring-amqp/docs/latest-ga/api/org/springframework/amqp/rabbit/core/RabbitOperations.html) for more information. + +Starting with version 2.0, there are variants of these methods (`convertSendAndReceiveAsType`) that take an additional `ParameterizedTypeReference` argument to convert complex returned types. +The template must be configured with a `SmartMessageConverter`. +See [Converting From a `Message` With `RabbitTemplate`](#json-complex) for more information. + +Starting with version 2.1, you can configure the `RabbitTemplate` with the `noLocalReplyConsumer` option to control a `noLocal` flag for reply consumers. +This is `false` by default. + +##### Reply Timeout + +By default, the send and receive methods timeout after five seconds and return null. +You can modify this behavior by setting the `replyTimeout` property. +Starting with version 1.5, if you set the `mandatory` property to `true` (or the `mandatory-expression` evaluates to `true` for a particular message), if the message cannot be delivered to a queue, an `AmqpMessageReturnedException` is thrown. +This exception has `returnedMessage`, `replyCode`, and `replyText` properties, as well as the `exchange` and `routingKey` used for the send. + +| |This feature uses publisher returns.
You can enable it by setting `publisherReturns` to `true` on the `CachingConnectionFactory` (see [Publisher Confirms and Returns](#cf-pub-conf-ret)).
Also, you must not have registered your own `ReturnCallback` with the `RabbitTemplate`.| +|---|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +Starting with version 2.1.2, a `replyTimedOut` method has been added, letting subclasses be informed of the timeout so that they can clean up any retained state. + +Starting with versions 2.0.11 and 2.1.3, when you use the default `DirectReplyToMessageListenerContainer`, you can add an error handler by setting the template’s `replyErrorHandler` property. +This error handler is invoked for any failed deliveries, such as late replies and messages received without a correlation header. +The exception passed in is a `ListenerExecutionFailedException`, which has a `failedMessage` property. + +##### RabbitMQ Direct reply-to + +| |Starting with version 3.4.0, the RabbitMQ server supports [direct reply-to](https://www.rabbitmq.com/direct-reply-to.html).
This eliminates the main reason for a fixed reply queue (to avoid the need to create a temporary queue for each request).
Starting with Spring AMQP version 1.4.1 direct reply-to is used by default (if supported by the server) instead of creating temporary reply queues.
When no `replyQueue` is provided (or it is set with a name of `amq.rabbitmq.reply-to`), the `RabbitTemplate` automatically detects whether direct reply-to is supported and either uses it or falls back to using a temporary reply queue.
When using direct reply-to, a `reply-listener` is not required and should not be configured.| +|---|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +Reply listeners are still supported with named queues (other than `amq.rabbitmq.reply-to`), allowing control of reply concurrency and so on. + +Starting with version 1.6, if you wish to use a temporary, exclusive, auto-delete queue for each +reply, set the `useTemporaryReplyQueues` property to `true`. +This property is ignored if you set a `replyAddress`. + +You can change the criteria that dictate whether to use direct reply-to by subclassing `RabbitTemplate` and overriding `useDirectReplyTo()` to check different criteria. +The method is called once only, when the first request is sent. + +Prior to version 2.0, the `RabbitTemplate` created a new consumer for each request and canceled the consumer when the reply was received (or timed out). +Now the template uses a `DirectReplyToMessageListenerContainer` instead, letting the consumers be reused. +The template still takes care of correlating the replies, so there is no danger of a late reply going to a different sender. +If you want to revert to the previous behavior, set the `useDirectReplyToContainer` (`direct-reply-to-container` when using XML configuration) property to false. + +The `AsyncRabbitTemplate` has no such option. +It always used a `DirectReplyToContainer` for replies when direct reply-to is used. + +Starting with version 2.3.7, the template has a new property `useChannelForCorrelation`. +When this is `true`, the server does not have to copy the correlation id from the request message headers to the reply message. +Instead, the channel used to send the request is used to correlate the reply to the request. + +##### Message Correlation With A Reply Queue + +When using a fixed reply queue (other than `amq.rabbitmq.reply-to`), you must provide correlation data so that replies can be correlated to requests. +See [RabbitMQ Remote Procedure Call (RPC)](https://www.rabbitmq.com/tutorials/tutorial-six-java.html). +By default, the standard `correlationId` property is used to hold the correlation data. +However, if you wish to use a custom property to hold correlation data, you can set the `correlation-key` attribute on the \. +Explicitly setting the attribute to `correlationId` is the same as omitting the attribute. +The client and server must use the same header for correlation data. + +| |Spring AMQP version 1.1 used a custom property called `spring_reply_correlation` for this data.
If you wish to revert to this behavior with the current version (perhaps to maintain compatibility with another application using 1.1), you must set the attribute to `spring_reply_correlation`.| +|---|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +By default, the template generates its own correlation ID (ignoring any user-supplied value). +If you wish to use your own correlation ID, set the `RabbitTemplate` instance’s `userCorrelationId` property to `true`. + +| |The correlation ID must be unique to avoid the possibility of a wrong reply being returned for a request.| +|---|---------------------------------------------------------------------------------------------------------| + +##### Reply Listener Container + +When using RabbitMQ versions prior to 3.4.0, a new temporary queue is used for each reply. +However, a single reply queue can be configured on the template, which can be more efficient and also lets you set arguments on that queue. +In this case, however, you must also provide a \ sub element. +This element provides a listener container for the reply queue, with the template being the listener. +All of the [Message Listener Container Configuration](#containerAttributes) attributes allowed on a \ are allowed on the element, except for `connection-factory` and `message-converter`, which are inherited from the template’s configuration. + +| |If you run multiple instances of your application or use multiple `RabbitTemplate` instances, you **MUST** use a unique reply queue for each.
RabbitMQ has no ability to select messages from a queue, so, if they all use the same queue, each instance would compete for replies and not necessarily receive their own.| +|---|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +The following example defines a rabbit template with a connection factory: + +``` + + + +``` + +While the container and template share a connection factory, they do not share a channel. +Therefore, requests and replies are not performed within the same transaction (if transactional). + +| |Prior to version 1.5.0, the `reply-address` attribute was not available.
Replies were always routed by using the default exchange and the `reply-queue` name as the routing key.
This is still the default, but you can now specify the new `reply-address` attribute.
The `reply-address` can contain an address with the form `/` and the reply is routed to the specified exchange and routed to a queue bound with the routing key.
The `reply-address` has precedence over `reply-queue`.
When only `reply-address` is in use, the `` must be configured as a separate `` component.
The `reply-address` and `reply-queue` (or `queues` attribute on the ``) must refer to the same queue logically.| +|---|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +With this configuration, a `SimpleListenerContainer` is used to receive the replies, with the `RabbitTemplate` being the `MessageListener`. +When defining a template with the `` namespace element, as shown in the preceding example, the parser defines the container and wires in the template as the listener. + +| |When the template does not use a fixed `replyQueue` (or is using direct reply-to — see [RabbitMQ Direct reply-to](#direct-reply-to)), a listener container is not needed.
Direct `reply-to` is the preferred mechanism when using RabbitMQ 3.4.0 or later.| +|---|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +If you define your `RabbitTemplate` as a `` or use an `@Configuration` class to define it as an `@Bean` or when you create the template programmatically, you need to define and wire up the reply listener container yourself. +If you fail to do this, the template never receives the replies and eventually times out and returns null as the reply to a call to a `sendAndReceive` method. + +Starting with version 1.5, the `RabbitTemplate` detects if it has been +configured as a `MessageListener` to receive replies. +If not, attempts to send and receive messages with a reply address +fail with an `IllegalStateException` (because the replies are never received). + +Further, if a simple `replyAddress` (queue name) is used, the reply listener container verifies that it is listening +to a queue with the same name. +This check cannot be performed if the reply address is an exchange and routing key and a debug log message is written. + +| |When wiring the reply listener and template yourself, it is important to ensure that the template’s `replyAddress` and the container’s `queues` (or `queueNames`) properties refer to the same queue.
The template inserts the reply address into the outbound message `replyTo` property.| +|---|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +The following listing shows examples of how to manually wire up the beans: + +``` + + + + + + + + + + + + + + + + +``` + +``` + @Bean + public RabbitTemplate amqpTemplate() { + RabbitTemplate rabbitTemplate = new RabbitTemplate(connectionFactory()); + rabbitTemplate.setMessageConverter(msgConv()); + rabbitTemplate.setReplyAddress(replyQueue().getName()); + rabbitTemplate.setReplyTimeout(60000); + rabbitTemplate.setUseDirectReplyToContainer(false); + return rabbitTemplate; + } + + @Bean + public SimpleMessageListenerContainer replyListenerContainer() { + SimpleMessageListenerContainer container = new SimpleMessageListenerContainer(); + container.setConnectionFactory(connectionFactory()); + container.setQueues(replyQueue()); + container.setMessageListener(amqpTemplate()); + return container; + } + + @Bean + public Queue replyQueue() { + return new Queue("my.reply.queue"); + } +``` + +A complete example of a `RabbitTemplate` wired with a fixed reply queue, together with a “remote” listener container that handles the request and returns the reply is shown in [this test case](https://github.com/spring-projects/spring-amqp/tree/main/spring-rabbit/src/test/java/org/springframework/amqp/rabbit/listener/JavaConfigFixedReplyQueueTests.java). + +| |When the reply times out (`replyTimeout`), the `sendAndReceive()` methods return null.| +|---|--------------------------------------------------------------------------------------| + +Prior to version 1.3.6, late replies for timed out messages were only logged. +Now, if a late reply is received, it is rejected (the template throws an `AmqpRejectAndDontRequeueException`). +If the reply queue is configured to send rejected messages to a dead letter exchange, the reply can be retrieved for later analysis. +To do so, bind a queue to the configured dead letter exchange with a routing key equal to the reply queue’s name. + +See the [RabbitMQ Dead Letter Documentation](https://www.rabbitmq.com/dlx.html) for more information about configuring dead lettering. +You can also take a look at the `FixedReplyQueueDeadLetterTests` test case for an example. + +##### Async Rabbit Template + +Version 1.6 introduced the `AsyncRabbitTemplate`. +This has similar `sendAndReceive` (and `convertSendAndReceive`) methods to those on the [`AmqpTemplate`](#amqp-template). +However, instead of blocking, they return a `ListenableFuture`. + +The `sendAndReceive` methods return a `RabbitMessageFuture`. +The `convertSendAndReceive` methods return a `RabbitConverterFuture`. + +You can either synchronously retrieve the result later, by invoking `get()` on the future, or you can register a callback that is called asynchronously with the result. +The following listing shows both approaches: + +``` +@Autowired +private AsyncRabbitTemplate template; + +... + +public void doSomeWorkAndGetResultLater() { + + ... + + ListenableFuture future = this.template.convertSendAndReceive("foo"); + + // do some more work + + String reply = null; + try { + reply = future.get(); + } + catch (ExecutionException e) { + ... + } + + ... + +} + +public void doSomeWorkAndGetResultAsync() { + + ... + + RabbitConverterFuture future = this.template.convertSendAndReceive("foo"); + future.addCallback(new ListenableFutureCallback() { + + @Override + public void onSuccess(String result) { + ... + } + + @Override + public void onFailure(Throwable ex) { + ... + } + + }); + + ... + +} +``` + +If `mandatory` is set and the message cannot be delivered, the future throws an `ExecutionException` with a cause of `AmqpMessageReturnedException`, which encapsulates the returned message and information about the return. + +If `enableConfirms` is set, the future has a property called `confirm`, which is itself a `ListenableFuture` with `true` indicating a successful publish. +If the confirm future is `false`, the `RabbitFuture` has a further property called `nackCause`, which contains the reason for the failure, if available. + +| |The publisher confirm is discarded if it is received after the reply, since the reply implies a successful publish.| +|---|-------------------------------------------------------------------------------------------------------------------| + +You can set the `receiveTimeout` property on the template to time out replies (it defaults to `30000` - 30 seconds). +If a timeout occurs, the future is completed with an `AmqpReplyTimeoutException`. + +The template implements `SmartLifecycle`. +Stopping the template while there are pending replies causes the pending `Future` instances to be canceled. + +Starting with version 2.0, the asynchronous template now supports [direct reply-to](https://www.rabbitmq.com/direct-reply-to.html) instead of a configured reply queue. +To enable this feature, use one of the following constructors: + +``` +public AsyncRabbitTemplate(ConnectionFactory connectionFactory, String exchange, String routingKey) + +public AsyncRabbitTemplate(RabbitTemplate template) +``` + +See [RabbitMQ Direct reply-to](#direct-reply-to) to use direct reply-to with the synchronous `RabbitTemplate`. + +Version 2.0 introduced variants of these methods (`convertSendAndReceiveAsType`) that take an additional `ParameterizedTypeReference` argument to convert complex returned types. +You must configure the underlying `RabbitTemplate` with a `SmartMessageConverter`. +See [Converting From a `Message` With `RabbitTemplate`](#json-complex) for more information. + +##### Spring Remoting with AMQP + +| |This feature is deprecated and will be removed in 3.0.
It has been superseded for a long time by [Handling Exceptions](#annotation-error-handling) with the `returnExceptions` being set to true, and configuring a `RemoteInvocationAwareMessageConverterAdapter` on the sending side.
See [Handling Exceptions](#annotation-error-handling) for more information.| +|---|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +The Spring Framework has a general remoting capability, allowing [Remote Procedure Calls (RPC)](https://docs.spring.io/spring/docs/current/spring-framework-reference/html/remoting.html) that use various transports. +Spring-AMQP supports a similar mechanism with a `AmqpProxyFactoryBean` on the client and a `AmqpInvokerServiceExporter` on the server. +This provides RPC over AMQP. +On the client side, a `RabbitTemplate` is used as described [earlier](#reply-listener). +On the server side, the invoker (configured as a `MessageListener`) receives the message, invokes the configured service, and returns the reply by using the inbound message’s `replyTo` information. + +You can inject the client factory bean into any bean (by using its `serviceInterface`). +The client can then invoke methods on the proxy, resulting in remote execution over AMQP. + +| |With the default `MessageConverter` instances, the method parameters and returned value must be instances of `Serializable`.| +|---|----------------------------------------------------------------------------------------------------------------------------| + +On the server side, the `AmqpInvokerServiceExporter` has both `AmqpTemplate` and `MessageConverter` properties. +Currently, the template’s `MessageConverter` is not used. +If you need to supply a custom message converter, you should provide it by setting the `messageConverter` property. +On the client side, you can add a custom message converter to the `AmqpTemplate`, which is provided to the `AmqpProxyFactoryBean` by using its `amqpTemplate` property. + +The following listing shows sample client and server configurations: + +``` + + + + + + + + + + + + + + + + + + +``` + +``` + + + + + + + + + + + + + + + + + +``` + +| |The `AmqpInvokerServiceExporter` can process only properly formed messages, such as those sent from the `AmqpProxyFactoryBean`.
If it receives a message that it cannot interpret, a serialized `RuntimeException` is sent as a reply.
If the message has no `replyToAddress` property, the message is rejected and permanently lost if no dead letter exchange has been configured.| +|---|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +| |By default, if the request message cannot be delivered, the calling thread eventually times out and a `RemoteProxyFailureException` is thrown.
By default, the timeout is five seconds.
You can modify that duration by setting the `replyTimeout` property on the `RabbitTemplate`.
Starting with version 1.5, by setting the `mandatory` property to `true` and enabling returns on the connection factory (see [Publisher Confirms and Returns](#cf-pub-conf-ret)), the calling thread throws an `AmqpMessageReturnedException`.
See [Reply Timeout](#reply-timeout) for more information.| +|---|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +#### 4.1.11. Configuring the Broker + +The AMQP specification describes how the protocol can be used to configure queues, exchanges, and bindings on the broker. +These operations (which are portable from the 0.8 specification and higher) are present in the `AmqpAdmin` interface in the `org.springframework.amqp.core` package. +The RabbitMQ implementation of that class is `RabbitAdmin` located in the `org.springframework.amqp.rabbit.core` package. + +The `AmqpAdmin` interface is based on using the Spring AMQP domain abstractions and is shown in the following listing: + +``` +public interface AmqpAdmin { + + // Exchange Operations + + void declareExchange(Exchange exchange); + + void deleteExchange(String exchangeName); + + // Queue Operations + + Queue declareQueue(); + + String declareQueue(Queue queue); + + void deleteQueue(String queueName); + + void deleteQueue(String queueName, boolean unused, boolean empty); + + void purgeQueue(String queueName, boolean noWait); + + // Binding Operations + + void declareBinding(Binding binding); + + void removeBinding(Binding binding); + + Properties getQueueProperties(String queueName); + +} +``` + +See also [Scoped Operations](#scoped-operations). + +The `getQueueProperties()` method returns some limited information about the queue (message count and consumer count). +The keys for the properties returned are available as constants in the `RabbitTemplate` (`QUEUE_NAME`,`QUEUE_MESSAGE_COUNT`, and `QUEUE_CONSUMER_COUNT`). +The [RabbitMQ REST API](#management-rest-api) provides much more information in the `QueueInfo` object. + +The no-arg `declareQueue()` method defines a queue on the broker with a name that is automatically generated. +The additional properties of this auto-generated queue are `exclusive=true`, `autoDelete=true`, and `durable=false`. + +The `declareQueue(Queue queue)` method takes a `Queue` object and returns the name of the declared queue. +If the `name` property of the provided `Queue` is an empty `String`, the broker declares the queue with a generated name. +That name is returned to the caller. +That name is also added to the `actualName` property of the `Queue`. +You can use this functionality programmatically only by invoking the `RabbitAdmin` directly. +When using auto-declaration by the admin when defining a queue declaratively in the application context, you can set the name property to `""` (the empty string). +The broker then creates the name. +Starting with version 2.1, listener containers can use queues of this type. +See [Containers and Broker-Named queues](#containers-and-broker-named-queues) for more information. + +This is in contrast to an `AnonymousQueue` where the framework generates a unique (`UUID`) name and sets `durable` to`false` and `exclusive`, `autoDelete` to `true`. +A `` with an empty (or missing) `name` attribute always creates an `AnonymousQueue`. + +See [`AnonymousQueue`](#anonymous-queue) to understand why `AnonymousQueue` is preferred over broker-generated queue names as well as +how to control the format of the name. +Starting with version 2.1, anonymous queues are declared with argument `Queue.X_QUEUE_LEADER_LOCATOR` set to `client-local` by default. +This ensures that the queue is declared on the node to which the application is connected. +Declarative queues must have fixed names because they might be referenced elsewhere in the context — such as in the +listener shown in the following example: + +``` + + + +``` + +See [Automatic Declaration of Exchanges, Queues, and Bindings](#automatic-declaration). + +The RabbitMQ implementation of this interface is `RabbitAdmin`, which, when configured by using Spring XML, resembles the following example: + +``` + + + +``` + +When the `CachingConnectionFactory` cache mode is `CHANNEL` (the default), the `RabbitAdmin` implementation does automatic lazy declaration of queues, exchanges, and bindings declared in the same `ApplicationContext`. +These components are declared as soon as a `Connection` is opened to the broker. +There are some namespace features that make this very convenient — for example, +in the Stocks sample application, we have the following: + +``` + + + + + + + + + + + + + + + +``` + +In the preceding example, we use anonymous queues (actually, internally, just queues with names generated by the framework, not by the broker) and refer to them by ID. +We can also declare queues with explicit names, which also serve as identifiers for their bean definitions in the context. +The following example configures a queue with an explicit name: + +``` + +``` + +| |You can provide both `id` and `name` attributes.
This lets you refer to the queue (for example, in a binding) by an ID that is independent of the queue name.
It also allows standard Spring features (such as property placeholders and SpEL expressions for the queue name).
These features are not available when you use the name as the bean identifier.| +|---|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +Queues can be configured with additional arguments — for example, `x-message-ttl`. +When you use the namespace support, they are provided in the form of a `Map` of argument-name/argument-value pairs, which are defined by using the `` element. +The following example shows how to do so: + +``` + + + + + + +``` + +By default, the arguments are assumed to be strings. +For arguments of other types, you must provide the type. +The following example shows how to specify the type: + +``` + + + + + +``` + +When providing arguments of mixed types, you must provide the type for each entry element. +The following example shows how to do so: + +``` + + + + 100 + + + + + +``` + +With Spring Framework 3.2 and later, this can be declared a little more succinctly, as follows: + +``` + + + + + + +``` + +When you use Java configuration, the `Queue.X_QUEUE_LEADER_LOCATOR` argument is supported as a first class property through the `setLeaderLocator()` method on the `Queue` class. +Starting with version 2.1, anonymous queues are declared with this property set to `client-local` by default. +This ensures that the queue is declared on the node the application is connected to. + +| |The RabbitMQ broker does not allow declaration of a queue with mismatched arguments.
For example, if a `queue` already exists with no `time to live` argument, and you attempt to declare it with (for example) `key="x-message-ttl" value="100"`, an exception is thrown.| +|---|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +By default, the `RabbitAdmin` immediately stops processing all declarations when any exception occurs. +This could cause downstream issues, such as a listener container failing to initialize because another queue (defined after the one in error) is not declared. + +This behavior can be modified by setting the `ignore-declaration-exceptions` attribute to `true` on the `RabbitAdmin` instance. +This option instructs the `RabbitAdmin` to log the exception and continue declaring other elements. +When configuring the `RabbitAdmin` using Java, this property is called `ignoreDeclarationExceptions`. +This is a global setting that applies to all elements. +Queues, exchanges, and bindings have a similar property that applies to just those elements. + +Prior to version 1.6, this property took effect only if an `IOException` occurred on the channel, such as when there is a mismatch between current and desired properties. +Now, this property takes effect on any exception, including `TimeoutException` and others. + +In addition, any declaration exceptions result in the publishing of a `DeclarationExceptionEvent`, which is an `ApplicationEvent` that can be consumed by any `ApplicationListener` in the context. +The event contains a reference to the admin, the element that was being declared, and the `Throwable`. + +##### Headers Exchange + +Starting with version 1.3, you can configure the `HeadersExchange` to match on multiple headers. +You can also specify whether any or all headers must match. +The following example shows how to do so: + +``` + + + + + + + + + + + +``` + +Starting with version 1.6, you can configure `Exchanges` with an `internal` flag (defaults to `false`) and such an`Exchange` is properly configured on the Broker through a `RabbitAdmin` (if one is present in the application context). +If the `internal` flag is `true` for an exchange, RabbitMQ does not let clients use the exchange. +This is useful for a dead letter exchange or exchange-to-exchange binding, where you do not wish the exchange to be used +directly by publishers. + +To see how to use Java to configure the AMQP infrastructure, look at the Stock sample application, +where there is the `@Configuration` class `AbstractStockRabbitConfiguration`, which ,in turn has`RabbitClientConfiguration` and `RabbitServerConfiguration` subclasses. +The following listing shows the code for `AbstractStockRabbitConfiguration`: + +``` +@Configuration +public abstract class AbstractStockAppRabbitConfiguration { + + @Bean + public CachingConnectionFactory connectionFactory() { + CachingConnectionFactory connectionFactory = + new CachingConnectionFactory("localhost"); + connectionFactory.setUsername("guest"); + connectionFactory.setPassword("guest"); + return connectionFactory; + } + + @Bean + public RabbitTemplate rabbitTemplate() { + RabbitTemplate template = new RabbitTemplate(connectionFactory()); + template.setMessageConverter(jsonMessageConverter()); + configureRabbitTemplate(template); + return template; + } + + @Bean + public Jackson2JsonMessageConverter jsonMessageConverter() { + return new Jackson2JsonMessageConverter(); + } + + @Bean + public TopicExchange marketDataExchange() { + return new TopicExchange("app.stock.marketdata"); + } + + // additional code omitted for brevity + +} +``` + +In the Stock application, the server is configured by using the following `@Configuration` class: + +``` +@Configuration +public class RabbitServerConfiguration extends AbstractStockAppRabbitConfiguration { + + @Bean + public Queue stockRequestQueue() { + return new Queue("app.stock.request"); + } +} +``` + +This is the end of the whole inheritance chain of `@Configuration` classes. +The end result is that `TopicExchange` and `Queue` are declared to the broker upon application startup. +There is no binding of `TopicExchange` to a queue in the server configuration, as that is done in the client application. +The stock request queue, however, is automatically bound to the AMQP default exchange. +This behavior is defined by the specification. + +The client `@Configuration` class is a little more interesting. +Its declaration follows: + +``` +@Configuration +public class RabbitClientConfiguration extends AbstractStockAppRabbitConfiguration { + + @Value("${stocks.quote.pattern}") + private String marketDataRoutingKey; + + @Bean + public Queue marketDataQueue() { + return amqpAdmin().declareQueue(); + } + + /** + * Binds to the market data exchange. + * Interested in any stock quotes + * that match its routing key. + */ + @Bean + public Binding marketDataBinding() { + return BindingBuilder.bind( + marketDataQueue()).to(marketDataExchange()).with(marketDataRoutingKey); + } + + // additional code omitted for brevity + +} +``` + +The client declares another queue through the `declareQueue()` method on the `AmqpAdmin`. +It binds that queue to the market data exchange with a routing pattern that is externalized in a properties file. + +##### Builder API for Queues and Exchanges + +Version 1.6 introduces a convenient fluent API for configuring `Queue` and `Exchange` objects when using Java configuration. +The following example shows how to use it: + +``` +@Bean +public Queue queue() { + return QueueBuilder.nonDurable("foo") + .autoDelete() + .exclusive() + .withArgument("foo", "bar") + .build(); +} + +@Bean +public Exchange exchange() { + return ExchangeBuilder.directExchange("foo") + .autoDelete() + .internal() + .withArgument("foo", "bar") + .build(); +} +``` + +See the Javadoc for [`org.springframework.amqp.core.QueueBuilder`](https://docs.spring.io/spring-amqp/docs/latest-ga/api/org/springframework/amqp/core/QueueBuilder.html) and [`org.springframework.amqp.core.ExchangeBuilder`](https://docs.spring.io/spring-amqp/docs/latest-ga/api/org/springframework/amqp/core/ExchangeBuilder.html) for more information. + +Starting with version 2.0, the `ExchangeBuilder` now creates durable exchanges by default, to be consistent with the simple constructors on the individual `AbstractExchange` classes. +To make a non-durable exchange with the builder, use `.durable(false)` before invoking `.build()`. +The `durable()` method with no parameter is no longer provided. + +Version 2.2 introduced fluent APIs to add "well known" exchange and queue arguments…​ + +``` +@Bean +public Queue allArgs1() { + return QueueBuilder.nonDurable("all.args.1") + .ttl(1000) + .expires(200_000) + .maxLength(42) + .maxLengthBytes(10_000) + .overflow(Overflow.rejectPublish) + .deadLetterExchange("dlx") + .deadLetterRoutingKey("dlrk") + .maxPriority(4) + .lazy() + .leaderLocator(LeaderLocator.minLeaders) + .singleActiveConsumer() + .build(); +} + +@Bean +public DirectExchange ex() { + return ExchangeBuilder.directExchange("ex.with.alternate") + .durable(true) + .alternate("alternate") + .build(); +} +``` + +##### Declaring Collections of Exchanges, Queues, and Bindings + +You can wrap collections of `Declarable` objects (`Queue`, `Exchange`, and `Binding`) in `Declarables` objects. +The `RabbitAdmin` detects such beans (as well as discrete `Declarable` beans) in the application context, and declares the contained objects on the broker whenever a connection is established (initially and after a connection failure). +The following example shows how to do so: + +``` +@Configuration +public static class Config { + + @Bean + public CachingConnectionFactory cf() { + return new CachingConnectionFactory("localhost"); + } + + @Bean + public RabbitAdmin admin(ConnectionFactory cf) { + return new RabbitAdmin(cf); + } + + @Bean + public DirectExchange e1() { + return new DirectExchange("e1", false, true); + } + + @Bean + public Queue q1() { + return new Queue("q1", false, false, true); + } + + @Bean + public Binding b1() { + return BindingBuilder.bind(q1()).to(e1()).with("k1"); + } + + @Bean + public Declarables es() { + return new Declarables( + new DirectExchange("e2", false, true), + new DirectExchange("e3", false, true)); + } + + @Bean + public Declarables qs() { + return new Declarables( + new Queue("q2", false, false, true), + new Queue("q3", false, false, true)); + } + + @Bean + @Scope(ConfigurableBeanFactory.SCOPE_PROTOTYPE) + public Declarables prototypes() { + return new Declarables(new Queue(this.prototypeQueueName, false, false, true)); + } + + @Bean + public Declarables bs() { + return new Declarables( + new Binding("q2", DestinationType.QUEUE, "e2", "k2", null), + new Binding("q3", DestinationType.QUEUE, "e3", "k3", null)); + } + + @Bean + public Declarables ds() { + return new Declarables( + new DirectExchange("e4", false, true), + new Queue("q4", false, false, true), + new Binding("q4", DestinationType.QUEUE, "e4", "k4", null)); + } + +} +``` + +| |In versions prior to 2.1, you could declare multiple `Declarable` instances by defining beans of type `Collection`.
This can cause undesirable side effects in some cases, because the admin has to iterate over all `Collection` beans.
This feature is now disabled in favor of `Declarables`, as discussed earlier in this section.
You can revert to the previous behavior by setting the `RabbitAdmin` property called `declareCollections` to `true`.| +|---|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +Version 2.2 added the `getDeclarablesByType` method to `Declarables`; this can be used as a convenience, for example, when declaring the listener container bean(s). + +``` +public SimpleMessageListenerContainer container(ConnectionFactory connectionFactory, + Declarables mixedDeclarables, MessageListener listener) { + + SimpleMessageListenerContainer container = new SimpleMessageListenerContainer(connectionFactory); + container.setQueues(mixedDeclarables.getDeclarablesByType(Queue.class).toArray(new Queue[0])); + container.setMessageListener(listener); + return container; +} +``` + +##### Conditional Declaration + +By default, all queues, exchanges, and bindings are declared by all `RabbitAdmin` instances (assuming they have `auto-startup="true"`) in the application context. + +Starting with version 2.1.9, the `RabbitAdmin` has a new property `explicitDeclarationsOnly` (which is `false` by default); when this is set to `true`, the admin will only declare beans that are explicitly configured to be declared by that admin. + +| |Starting with the 1.2 release, you can conditionally declare these elements.
This is particularly useful when an application connects to multiple brokers and needs to specify with which brokers a particular element should be declared.| +|---|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +The classes representing these elements implement `Declarable`, which has two methods: `shouldDeclare()` and `getDeclaringAdmins()`. +The `RabbitAdmin` uses these methods to determine whether a particular instance should actually process the declarations on its `Connection`. + +The properties are available as attributes in the namespace, as shown in the following examples: + +``` + + + + + + + + + + + + + + + + + + + +``` + +| |By default, the `auto-declare` attribute is `true` and, if the `declared-by` is not supplied (or is empty), then all `RabbitAdmin` instances declare the object (as long as the admin’s `auto-startup` attribute is `true`, the default, and the admin’s `explicit-declarations-only` attribute is false).| +|---|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +Similarly, you can use Java-based `@Configuration` to achieve the same effect. +In the following example, the components are declared by `admin1` but not by`admin2`: + +``` +@Bean +public RabbitAdmin admin1() { + return new RabbitAdmin(cf1()); +} + +@Bean +public RabbitAdmin admin2() { + return new RabbitAdmin(cf2()); +} + +@Bean +public Queue queue() { + Queue queue = new Queue("foo"); + queue.setAdminsThatShouldDeclare(admin1()); + return queue; +} + +@Bean +public Exchange exchange() { + DirectExchange exchange = new DirectExchange("bar"); + exchange.setAdminsThatShouldDeclare(admin1()); + return exchange; +} + +@Bean +public Binding binding() { + Binding binding = new Binding("foo", DestinationType.QUEUE, exchange().getName(), "foo", null); + binding.setAdminsThatShouldDeclare(admin1()); + return binding; +} +``` + +##### A Note On the `id` and `name` Attributes + +The `name` attribute on `` and `` elements reflects the name of the entity in the broker. +For queues, if the `name` is omitted, an anonymous queue is created (see [`AnonymousQueue`](#anonymous-queue)). + +In versions prior to 2.0, the `name` was also registered as a bean name alias (similar to `name` on `` elements). + +This caused two problems: + +* It prevented the declaration of a queue and exchange with the same name. + +* The alias was not resolved if it contained a SpEL expression (`#{…​}`). + +Starting with version 2.0, if you declare one of these elements with both an `id` *and* a `name` attribute, the name is no longer declared as a bean name alias. +If you wish to declare a queue and exchange with the same `name`, you must provide an `id`. + +There is no change if the element has only a `name` attribute. +The bean can still be referenced by the `name` — for example, in binding declarations. +However, you still cannot reference it if the name contains SpEL — you must provide an `id` for reference purposes. + +##### `AnonymousQueue` + +In general, when you need a uniquely-named, exclusive, auto-delete queue, we recommend that you use the `AnonymousQueue`instead of broker-defined queue names (using `""` as a `Queue` name causes the broker to generate the queue +name). + +This is because: + +1. The queues are actually declared when the connection to the broker is established. + This is long after the beans are created and wired together. + Beans that use the queue need to know its name. + In fact, the broker might not even be running when the application is started. + +2. If the connection to the broker is lost for some reason, the admin re-declares the `AnonymousQueue` with the same name. + If we used broker-declared queues, the queue name would change. + +You can control the format of the queue name used by `AnonymousQueue` instances. + +By default, the queue name is prefixed by `spring.gen-` followed by a base64 representation of the `UUID` — for example: `spring.gen-MRBv9sqISkuCiPfOYfpo4g`. + +You can provide an `AnonymousQueue.NamingStrategy` implementation in a constructor argument. +The following example shows how to do so: + +``` +@Bean +public Queue anon1() { + return new AnonymousQueue(); +} + +@Bean +public Queue anon2() { + return new AnonymousQueue(new AnonymousQueue.Base64UrlNamingStrategy("something-")); +} + +@Bean +public Queue anon3() { + return new AnonymousQueue(AnonymousQueue.UUIDNamingStrategy.DEFAULT); +} +``` + +The first bean generates a queue name prefixed by `spring.gen-` followed by a base64 representation of the `UUID` — for +example: `spring.gen-MRBv9sqISkuCiPfOYfpo4g`. +The second bean generates a queue name prefixed by `something-` followed by a base64 representation of the `UUID`. +The third bean generates a name by using only the UUID (no base64 conversion) — for example, `f20c818a-006b-4416-bf91-643590fedb0e`. + +The base64 encoding uses the “URL and Filename Safe Alphabet” from RFC 4648. +Trailing padding characters (`=`) are removed. + +You can provide your own naming strategy, whereby you can include other information (such as the application name or client host) in the queue name. + +You can specify the naming strategy when you use XML configuration. +The `naming-strategy` attribute is present on the `` element +for a bean reference that implements `AnonymousQueue.NamingStrategy`. +The following examples show how to specify the naming strategy in various ways: + +``` + + + + + + + + + + + +``` + +The first example creates names such as `spring.gen-MRBv9sqISkuCiPfOYfpo4g`. +The second example creates names with a String representation of a UUID. +The third example creates names such as `custom.gen-MRBv9sqISkuCiPfOYfpo4g`. + +You can also provide your own naming strategy bean. + +Starting with version 2.1, anonymous queues are declared with argument `Queue.X_QUEUE_LEADER_LOCATOR` set to `client-local` by default. +This ensures that the queue is declared on the node to which the application is connected. +You can revert to the previous behavior by calling `queue.setLeaderLocator(null)` after constructing the instance. + +##### Recovering Auto-Delete Declarations + +Normally, the `RabbitAdmin` (s) only recover queues/exchanges/bindings that are declared as beans in the application context; if any such declarations are auto-delete, they will be removed by the broker if the connection is lost. +When the connection is re-established, the admin will redeclare the entities. +Normally, entities created by calling `admin.declareQueue(…​)`, `admin.declareExchange(…​)` and `admin.declareBinding(…​)` will not be recovered. + +Starting with version 2.4, the admin has a new property `redeclareManualDeclarations`; when true, the admin will recover these entities in addition to the beans in the application context. + +Recovery of individual declarations will not be performed if `deleteQueue(…​)`, `deleteExchange(…​)` or `removeBinding(…​)` is called. +Associated bindings are removed from the recoverable entities when queues and exchanges are deleted. + +Finally, calling `resetAllManualDeclarations()` will prevent the recovery of any previously declared entities. + +#### 4.1.12. Broker Event Listener + +When the [Event Exchange Plugin](https://www.rabbitmq.com/event-exchange.html) is enabled, if you add a bean of type `BrokerEventListener` to the application context, it publishes selected broker events as `BrokerEvent` instances, which can be consumed with a normal Spring `ApplicationListener` or `@EventListener` method. +Events are published by the broker to a topic exchange `amq.rabbitmq.event` with a different routing key for each event type. +The listener uses event keys, which are used to bind an `AnonymousQueue` to the exchange so the listener receives only selected events. +Since it is a topic exchange, wildcards can be used (as well as explicitly requesting specific events), as the following example shows: + +``` +@Bean +public BrokerEventListener eventListener() { + return new BrokerEventListener(connectionFactory(), "user.deleted", "channel.#", "queue.#"); +} +``` + +You can further narrow the received events in individual event listeners, by using normal Spring techniques, as the following example shows: + +``` +@EventListener(condition = "event.eventType == 'queue.created'") +public void listener(BrokerEvent event) { + ... +} +``` + +#### 4.1.13. Delayed Message Exchange + +Version 1.6 introduces support for the[Delayed Message Exchange Plugin](https://www.rabbitmq.com/blog/2015/04/16/scheduling-messages-with-rabbitmq/) + +| |The plugin is currently marked as experimental but has been available for over a year (at the time of writing).
If changes to the plugin make it necessary, we plan to add support for such changes as soon as practical.
For that reason, this support in Spring AMQP should be considered experimental, too.
This functionality was tested with RabbitMQ 3.6.0 and version 0.0.1 of the plugin.| +|---|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +To use a `RabbitAdmin` to declare an exchange as delayed, you can set the `delayed` property on the exchange bean to`true`. +The `RabbitAdmin` uses the exchange type (`Direct`, `Fanout`, and so on) to set the `x-delayed-type` argument and +declare the exchange with type `x-delayed-message`. + +The `delayed` property (default: `false`) is also available when configuring exchange beans using XML. +The following example shows how to use it: + +``` + +``` + +To send a delayed message, you can set the `x-delay` header through `MessageProperties`, as the following examples show: + +``` +MessageProperties properties = new MessageProperties(); +properties.setDelay(15000); +template.send(exchange, routingKey, + MessageBuilder.withBody("foo".getBytes()).andProperties(properties).build()); +``` + +``` +rabbitTemplate.convertAndSend(exchange, routingKey, "foo", new MessagePostProcessor() { + + @Override + public Message postProcessMessage(Message message) throws AmqpException { + message.getMessageProperties().setDelay(15000); + return message; + } + +}); +``` + +To check if a message was delayed, use the `getReceivedDelay()` method on the `MessageProperties`. +It is a separate property to avoid unintended propagation to an output message generated from an input message. + +#### 4.1.14. RabbitMQ REST API + +When the management plugin is enabled, the RabbitMQ server exposes a REST API to monitor and configure the broker. +A [Java Binding for the API](https://github.com/rabbitmq/hop) is now provided. +The `com.rabbitmq.http.client.Client` is a standard, immediate, and, therefore, blocking API. +It is based on the [Spring Web](https://docs.spring.io/spring/docs/current/spring-framework-reference/web.html#spring-web) module and its `RestTemplate` implementation. +On the other hand, the `com.rabbitmq.http.client.ReactorNettyClient` is a reactive, non-blocking implementation based on the [Reactor Netty](https://projectreactor.io/docs/netty/release/reference/docs/index.html) project. + +The hop dependency (`com.rabbitmq:http-client`) is now also `optional`. + +See their Javadoc for more information. + +#### 4.1.15. Exception Handling + +Many operations with the RabbitMQ Java client can throw checked exceptions. +For example, there are a lot of cases where `IOException` instances may be thrown. +The `RabbitTemplate`, `SimpleMessageListenerContainer`, and other Spring AMQP components catch those exceptions and convert them into one of the exceptions within `AmqpException` hierarchy. +Those are defined in the 'org.springframework.amqp' package, and `AmqpException` is the base of the hierarchy. + +When a listener throws an exception, it is wrapped in a `ListenerExecutionFailedException`. +Normally the message is rejected and requeued by the broker. +Setting `defaultRequeueRejected` to `false` causes messages to be discarded (or routed to a dead letter exchange). +As discussed in [Message Listeners and the Asynchronous Case](#async-listeners), the listener can throw an `AmqpRejectAndDontRequeueException` (or `ImmediateRequeueAmqpException`) to conditionally control this behavior. + +However, there is a class of errors where the listener cannot control the behavior. +When a message that cannot be converted is encountered (for example, an invalid `content_encoding` header), some exceptions are thrown before the message reaches user code. +With `defaultRequeueRejected` set to `true` (default) (or throwing an `ImmediateRequeueAmqpException`), such messages would be redelivered over and over. +Before version 1.3.2, users needed to write a custom `ErrorHandler`, as discussed in [Exception Handling](#exception-handling), to avoid this situation. + +Starting with version 1.3.2, the default `ErrorHandler` is now a `ConditionalRejectingErrorHandler` that rejects (and does not requeue) messages that fail with an irrecoverable error. +Specifically, it rejects messages that fail with the following errors: + +* `o.s.amqp…​MessageConversionException`: Can be thrown when converting the incoming message payload using a `MessageConverter`. + +* `o.s.messaging…​MessageConversionException`: Can be thrown by the conversion service if additional conversion is required when mapping to a `@RabbitListener` method. + +* `o.s.messaging…​MethodArgumentNotValidException`: Can be thrown if validation (for example, `@Valid`) is used in the listener and the validation fails. + +* `o.s.messaging…​MethodArgumentTypeMismatchException`: Can be thrown if the inbound message was converted to a type that is not correct for the target method. + For example, the parameter is declared as `Message` but `Message` is received. + +* `java.lang.NoSuchMethodException`: Added in version 1.6.3. + +* `java.lang.ClassCastException`: Added in version 1.6.3. + +You can configure an instance of this error handler with a `FatalExceptionStrategy` so that users can provide their own rules for conditional message rejection — for example, a delegate implementation to the `BinaryExceptionClassifier` from Spring Retry ([Message Listeners and the Asynchronous Case](#async-listeners)). +In addition, the `ListenerExecutionFailedException` now has a `failedMessage` property that you can use in the decision. +If the `FatalExceptionStrategy.isFatal()` method returns `true`, the error handler throws an `AmqpRejectAndDontRequeueException`. +The default `FatalExceptionStrategy` logs a warning message when an exception is determined to be fatal. + +Since version 1.6.3, a convenient way to add user exceptions to the fatal list is to subclass `ConditionalRejectingErrorHandler.DefaultExceptionStrategy` and override the `isUserCauseFatal(Throwable cause)` method to return `true` for fatal exceptions. + +A common pattern for handling DLQ messages is to set a `time-to-live` on those messages as well as additional DLQ configuration such that these messages expire and are routed back to the main queue for retry. +The problem with this technique is that messages that cause fatal exceptions loop forever. +Starting with version 2.1, the `ConditionalRejectingErrorHandler` detects an `x-death` header on a message that causes a fatal exception to be thrown. +The message is logged and discarded. +You can revert to the previous behavior by setting the `discardFatalsWithXDeath` property on the `ConditionalRejectingErrorHandler` to `false`. + +| |Starting with version 2.1.9, messages with these fatal exceptions are rejected and NOT requeued by default, even if the container acknowledge mode is MANUAL.
These exceptions generally occur before the listener is invoked so the listener does not have a chance to ack or nack the message so it remained in the queue in an un-acked state.
To revert to the previous behavior, set the `rejectManual` property on the `ConditionalRejectingErrorHandler` to `false`.| +|---|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +#### 4.1.16. Transactions + +The Spring Rabbit framework has support for automatic transaction management in the synchronous and asynchronous use cases with a number of different semantics that can be selected declaratively, as is familiar to existing users of Spring transactions. +This makes many if not most common messaging patterns easy to implement. + +There are two ways to signal the desired transaction semantics to the framework. +In both the `RabbitTemplate` and `SimpleMessageListenerContainer`, there is a flag `channelTransacted` which, if `true`, tells the framework to use a transactional channel and to end all operations (send or receive) with a commit or rollback (depending on the outcome), with an exception signaling a rollback. +Another signal is to provide an external transaction with one of Spring’s `PlatformTransactionManager` implementations as a context for the ongoing operation. +If there is already a transaction in progress when the framework is sending or receiving a message, and the `channelTransacted` flag is `true`, the commit or rollback of the messaging transaction is deferred until the end of the current transaction. +If the `channelTransacted` flag is `false`, no transaction semantics apply to the messaging operation (it is auto-acked). + +The `channelTransacted` flag is a configuration time setting. +It is declared and processed once when the AMQP components are created, usually at application startup. +The external transaction is more dynamic in principle because the system responds to the current thread state at runtime. +However, in practice, it is often also a configuration setting, when the transactions are layered onto an application declaratively. + +For synchronous use cases with `RabbitTemplate`, the external transaction is provided by the caller, either declaratively or imperatively according to taste (the usual Spring transaction model). +The following example shows a declarative approach (usually preferred because it is non-invasive), where the template has been configured with `channelTransacted=true`: + +``` +@Transactional +public void doSomething() { + String incoming = rabbitTemplate.receiveAndConvert(); + // do some more database processing... + String outgoing = processInDatabaseAndExtractReply(incoming); + rabbitTemplate.convertAndSend(outgoing); +} +``` + +In the preceding example, a `String` payload is received, converted, and sent as a message body inside a method marked as `@Transactional`. +If the database processing fails with an exception, the incoming message is returned to the broker, and the outgoing message is not sent. +This applies to any operations with the `RabbitTemplate` inside a chain of transactional methods (unless, for instance, the `Channel` is directly manipulated to commit the transaction early). + +For asynchronous use cases with `SimpleMessageListenerContainer`, if an external transaction is needed, it has to be requested by the container when it sets up the listener. +To signal that an external transaction is required, the user provides an implementation of `PlatformTransactionManager` to the container when it is configured. +The following example shows how to do so: + +``` +@Configuration +public class ExampleExternalTransactionAmqpConfiguration { + + @Bean + public SimpleMessageListenerContainer messageListenerContainer() { + SimpleMessageListenerContainer container = new SimpleMessageListenerContainer(); + container.setConnectionFactory(rabbitConnectionFactory()); + container.setTransactionManager(transactionManager()); + container.setChannelTransacted(true); + container.setQueueName("some.queue"); + container.setMessageListener(exampleListener()); + return container; + } + +} +``` + +In the preceding example, the transaction manager is added as a dependency injected from another bean definition (not shown), and the `channelTransacted` flag is also set to `true`. +The effect is that if the listener fails with an exception, the transaction is rolled back, and the message is also returned to the broker. +Significantly, if the transaction fails to commit (for example, because of +a database constraint error or connectivity problem), the AMQP transaction is also rolled back, and the message is returned to the broker. +This is sometimes known as a “Best Efforts 1 Phase Commit”, and is a very powerful pattern for reliable messaging. +If the `channelTransacted` flag was set to `false` (the default) in the preceding example, the external transaction would still be provided for the listener, but all messaging operations would be auto-acked, so the effect is to commit the messaging operations even on a rollback of the business operation. + +##### Conditional Rollback + +Prior to version 1.6.6, adding a rollback rule to a container’s `transactionAttribute` when using an external transaction manager (such as JDBC) had no effect. +Exceptions always rolled back the transaction. + +Also, when using a [transaction advice](https://docs.spring.io/spring-framework/docs/current/spring-framework-reference/html/transaction.html#transaction-declarative) in the container’s advice chain, conditional rollback was not very useful, because all listener exceptions are wrapped in a `ListenerExecutionFailedException`. + +The first problem has been corrected, and the rules are now applied properly. +Further, the `ListenerFailedRuleBasedTransactionAttribute` is now provided. +It is a subclass of `RuleBasedTransactionAttribute`, with the only difference being that it is aware of the `ListenerExecutionFailedException` and uses the cause of such exceptions for the rule. +This transaction attribute can be used directly in the container or through a transaction advice. + +The following example uses this rule: + +``` +@Bean +public AbstractMessageListenerContainer container() { + ... + container.setTransactionManager(transactionManager); + RuleBasedTransactionAttribute transactionAttribute = + new ListenerFailedRuleBasedTransactionAttribute(); + transactionAttribute.setRollbackRules(Collections.singletonList( + new NoRollbackRuleAttribute(DontRollBackException.class))); + container.setTransactionAttribute(transactionAttribute); + ... +} +``` + +##### A note on Rollback of Received Messages + +AMQP transactions apply only to messages and acks sent to the broker. +Consequently, when there is a rollback of a Spring transaction and a message has been received, Spring AMQP has to not only rollback the transaction but also manually reject the message (sort of a nack, but that is not what the specification calls it). +The action taken on message rejection is independent of transactions and depends on the `defaultRequeueRejected` property (default: `true`). +For more information about rejecting failed messages, see [Message Listeners and the Asynchronous Case](#async-listeners). + +For more information about RabbitMQ transactions and their limitations, see [RabbitMQ Broker Semantics](https://www.rabbitmq.com/semantics.html). + +| |Prior to RabbitMQ 2.7.0, such messages (and any that are unacked when a channel is closed or aborts) went to the back of the queue on a Rabbit broker.
Since 2.7.0, rejected messages go to the front of the queue, in a similar manner to JMS rolled back messages.| +|---|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +| |Previously, message requeue on transaction rollback was inconsistent between local transactions and when a `TransactionManager` was provided.
In the former case, the normal requeue logic (`AmqpRejectAndDontRequeueException` or `defaultRequeueRejected=false`) applied (see [Message Listeners and the Asynchronous Case](#async-listeners)).
With a transaction manager, the message was unconditionally requeued on rollback.
Starting with version 2.0, the behavior is consistent and the normal requeue logic is applied in both cases.
To revert to the previous behavior, you can set the container’s `alwaysRequeueWithTxManagerRollback` property to `true`.
See [Message Listener Container Configuration](#containerAttributes).| +|---|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +##### Using `RabbitTransactionManager` + +The [RabbitTransactionManager](https://docs.spring.io/spring-amqp/docs/latest_ga/api/org/springframework/amqp/rabbit/transaction/RabbitTransactionManager.html) is an alternative to executing Rabbit operations within, and synchronized with, external transactions. +This transaction manager is an implementation of the [`PlatformTransactionManager`](https://docs.spring.io/spring/docs/current/javadoc-api/org/springframework/transaction/PlatformTransactionManager.html) interface and should be used with a single Rabbit `ConnectionFactory`. + +| |This strategy is not able to provide XA transactions — for example, in order to share transactions between messaging and database access.| +|---|-----------------------------------------------------------------------------------------------------------------------------------------| + +Application code is required to retrieve the transactional Rabbit resources through `ConnectionFactoryUtils.getTransactionalResourceHolder(ConnectionFactory, boolean)` instead of a standard `Connection.createChannel()` call with subsequent channel creation. +When using Spring AMQP’s [RabbitTemplate](https://docs.spring.io/spring-amqp/docs/latest_ga/api/org/springframework/amqp/rabbit/core/RabbitTemplate.html), it will autodetect a thread-bound Channel and automatically participate in its transaction. + +With Java Configuration, you can setup a new RabbitTransactionManager by using the following bean: + +``` +@Bean +public RabbitTransactionManager rabbitTransactionManager() { + return new RabbitTransactionManager(connectionFactory); +} +``` + +If you prefer XML configuration, you can declare the following bean in your XML Application Context file: + +``` + + + +``` + +##### Transaction Synchronization + +Synchronizing a RabbitMQ transaction with some other (e.g. DBMS) transaction provides "Best Effort One Phase Commit" semantics. +It is possible that the RabbitMQ transaction fails to commit during the after completion phase of transaction synchronization. +This is logged by the `spring-tx` infrastructure as an error, but no exception is thrown to the calling code. +Starting with version 2.3.10, you can call `ConnectionUtils.checkAfterCompletion()` after the transaction has committed on the same thread that processed the transaction. +It will simply return if no exception occurred; otherwise it will throw an `AfterCompletionFailedException` which will have a property representing the synchronization status of the completion. + +Enable this feature by calling `ConnectionFactoryUtils.enableAfterCompletionFailureCapture(true)`; this is a global flag and applies to all threads. + +#### 4.1.17. Message Listener Container Configuration + +There are quite a few options for configuring a `SimpleMessageListenerContainer` (SMLC) and a `DirectMessageListenerContainer` (DMLC) related to transactions and quality of service, and some of them interact with each other. +Properties that apply to the SMLC, DMLC, or `StreamListenerContainer` (StLC) (see [Using the RabbitMQ Stream Plugin](#stream-support) are indicated by the check mark in the appropriate column. +See [Choosing a Container](#choose-container) for information to help you decide which container is appropriate for your application. + +The following table shows the container property names and their equivalent attribute names (in parentheses) when using the namespace to configure a ``. +The `type` attribute on that element can be `simple` (default) or `direct` to specify an `SMLC` or `DMLC` respectively. +Some properties are not exposed by the namespace. +These are indicated by `N/A` for the attribute. + +| Property
(Attribute) | Description | SMLC | DMLC | StLC | +|-----------------------------------------------------------------------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|--------------------------------|--------------------------------|--------------------------------| +| | | +| | | +| | | +| | | +| | | +| | | +| | Prior to version 1.6, if there was more than one admin in the context, the container would randomly select one.
If there were no admins, it would create one internally.
In either case, this could cause unexpected results.
Starting with version 1.6, for `autoDeclare` to work, there must be exactly one `RabbitAdmin` in the context, or a reference to a specific instance must be configured on the container using the `rabbitAdmin` property. | | | | +| | +| | | | +| | | +| | | +| | | | +| | | | +| | | +| | | | +| | | | +| | | | +| | +| | | | +| | | +| | | +| | | +| | | +| | | | +| | | +| | | +| | | +| | | +| | | +| | | +| | | +| (group) | This is available only when using the namespace.
When specified, a bean of type `Collection` is registered with this name, and the
container for each `` element is added to the collection.
This allows, for example, starting and stopping the group of containers by iterating over the collection.
If multiple `` elements have the same group value, the containers in the collection form
an aggregate of all containers so designated. |![tickmark](https://docs.spring.io/spring-amqp/docs/current/reference/html/images/tickmark.png)|![tickmark](https://docs.spring.io/spring-amqp/docs/current/reference/html/images/tickmark.png)| | +| | | +| | | +| | | | +| | | +| | | +| | If the broker is not available during initial startup, the container starts and the conditions are checked when the connection is established. | | | | +| | The check is done against all queues in the context, not just the queues that a particular listener is configured to use.
If you wish to limit the checks to just those queues used by a container, you should configure a separate `RabbitAdmin` for the container, and provide a reference to it using the `rabbitAdmin` property.
See [Conditional Declaration](#conditional-declaration) for more information. | | | | +| | Mismatched queue argument detection is disabled while starting a container for a `@RabbitListener` in a bean that is marked `@Lazy`.
This is to avoid a potential deadlock which can delay the start of such containers for up to 60 seconds.
Applications using lazy listener beans should check the queue arguments before getting a reference to the lazy bean. | | | | +| | | +| | Missing queue detection is disabled while starting a container for a `@RabbitListener` in a bean that is marked `@Lazy`.
This is to avoid a potential deadlock which can delay the start of such containers for up to 60 seconds.
Applications using lazy listener beans should check the queue(s) before getting a reference to the lazy bean. | | | | +| | | +| | | +| | | +|| | +| | | +| | There are scenarios where the prefetch value should
be low — for example, with large messages, especially if the processing is slow (messages could add up
to a large amount of memory in the client process), and if strict message ordering is necessary
(the prefetch value should be set back to 1 in this case).
Also, with low-volume messaging and multiple consumers (including concurrency within a single listener container instance), you may wish to reduce the prefetch to get a more even distribution of messages across consumers. | | | | +| | | +| | | | +| | | +| | | +| | | | +| | | +| | | | +| | | +| | | | +| | +| | | +| | | +| | | + +#### 4.1.18. Listener Concurrency + +##### SimpleMessageListenerContainer + +By default, the listener container starts a single consumer that receives messages from the queues. + +When examining the table in the previous section, you can see a number of properties and attributes that control concurrency. +The simplest is `concurrentConsumers`, which creates that (fixed) number of consumers that concurrently process messages. + +Prior to version 1.3.0, this was the only setting available and the container had to be stopped and started again to change the setting. + +Since version 1.3.0, you can now dynamically adjust the `concurrentConsumers` property. +If it is changed while the container is running, consumers are added or removed as necessary to adjust to the new setting. + +In addition, a new property called `maxConcurrentConsumers` has been added and the container dynamically adjusts the concurrency based on workload. +This works in conjunction with four additional properties: `consecutiveActiveTrigger`, `startConsumerMinInterval`, `consecutiveIdleTrigger`, and `stopConsumerMinInterval`. +With the default settings, the algorithm to increase consumers works as follows: + +If the `maxConcurrentConsumers` has not been reached and an existing consumer is active for ten consecutive cycles AND at least 10 seconds has elapsed since the last consumer was started, a new consumer is started. +A consumer is considered active if it received at least one message in `batchSize` \* `receiveTimeout` milliseconds. + +With the default settings, the algorithm to decrease consumers works as follows: + +If there are more than `concurrentConsumers` running and a consumer detects ten consecutive timeouts (idle) AND the last consumer was stopped at least 60 seconds ago, a consumer is stopped. +The timeout depends on the `receiveTimeout` and the `batchSize` properties. +A consumer is considered idle if it receives no messages in `batchSize` \* `receiveTimeout` milliseconds. +So, with the default timeout (one second) and a `batchSize` of four, stopping a consumer is considered after 40 seconds of idle time (four timeouts correspond to one idle detection). + +| |Practically, consumers can be stopped only if the whole container is idle for some time.
This is because the broker shares its work across all the active consumers.| +|---|------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +Each consumer uses a single channel, regardless of the number of configured queues. + +Starting with version 2.0, the `concurrentConsumers` and `maxConcurrentConsumers` properties can be set with the `concurrency` property — for example, `2-4`. + +##### Using `DirectMessageListenerContainer` + +With this container, concurrency is based on the configured queues and `consumersPerQueue`. +Each consumer for each queue uses a separate channel, and the concurrency is controlled by the rabbit client library. +By default, at the time of writing, it uses a pool of `DEFAULT_NUM_THREADS = Runtime.getRuntime().availableProcessors() * 2` threads. + +You can configure a `taskExecutor` to provide the required maximum concurrency. + +#### 4.1.19. Exclusive Consumer + +Starting with version 1.3, you can configure the listener container with a single exclusive consumer. +This prevents other containers from consuming from the queues until the current consumer is cancelled. +The concurrency of such a container must be `1`. + +When using exclusive consumers, other containers try to consume from the queues according to the `recoveryInterval` property and log a `WARN` message if the attempt fails. + +#### 4.1.20. Listener Container Queues + +Version 1.3 introduced a number of improvements for handling multiple queues in a listener container. + +The container must be configured to listen on at least one queue. +This was the case previously, too, but now queues can be added and removed at runtime. +The container recycles (cancels and re-creates) the consumers when any pre-fetched messages have been processed. +See the [Javadoc](https://docs.spring.io/spring-amqp/docs/latest-ga/api/org/springframework/amqp/rabbit/listener/AbstractMessageListenerContainer.html) for the `addQueues`, `addQueueNames`, `removeQueues` and `removeQueueNames` methods. +When removing queues, at least one queue must remain. + +A consumer now starts if any of its queues are available. +Previously, the container would stop if any queues were unavailable. +Now, this is only the case if none of the queues are available. +If not all queues are available, the container tries to passively declare (and consume from) the missing queues every 60 seconds. + +Also, if a consumer receives a cancel from the broker (for example, if a queue is deleted) the consumer tries to recover, and the recovered consumer continues to process messages from any other configured queues. +Previously, a cancel on one queue cancelled the entire consumer and, eventually, the container would stop due to the missing queue. + +If you wish to permanently remove a queue, you should update the container before or after deleting to queue, to avoid future attempts trying to consume from it. + +#### 4.1.21. Resilience: Recovering from Errors and Broker Failures + +Some of the key (and most popular) high-level features that Spring AMQP provides are to do with recovery and automatic re-connection in the event of a protocol error or broker failure. +We have seen all the relevant components already in this guide, but it should help to bring them all together here and call out the features and recovery scenarios individually. + +The primary reconnection features are enabled by the `CachingConnectionFactory` itself. +It is also often beneficial to use the `RabbitAdmin` auto-declaration features. +In addition, if you care about guaranteed delivery, you probably also need to use the `channelTransacted` flag in `RabbitTemplate` and `SimpleMessageListenerContainer` and the `AcknowledgeMode.AUTO` (or manual if you do the acks yourself) in the `SimpleMessageListenerContainer`. + +##### Automatic Declaration of Exchanges, Queues, and Bindings + +The `RabbitAdmin` component can declare exchanges, queues, and bindings on startup. +It does this lazily, through a `ConnectionListener`. +Consequently, if the broker is not present on startup, it does not matter. +The first time a `Connection` is used (for example, +by sending a message) the listener fires and the admin features is applied. +A further benefit of doing the auto declarations in a listener is that, if the connection is dropped for any reason (for example, +broker death, network glitch, and others), they are applied again when the connection is re-established. + +| |Queues declared this way must have fixed names — either explicitly declared or generated by the framework for `AnonymousQueue` instances.
Anonymous queues are non-durable, exclusive, and auto-deleting.| +|---|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +| |Automatic declaration is performed only when the `CachingConnectionFactory` cache mode is `CHANNEL` (the default).
This limitation exists because exclusive and auto-delete queues are bound to the connection.| +|---|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +Starting with version 2.2.2, the `RabbitAdmin` will detect beans of type `DeclarableCustomizer` and apply the function before actually processing the declaration. +This is useful, for example, to set a new argument (property) before it has first class support within the framework. + +``` +@Bean +public DeclarableCustomizer customizer() { + return dec -> { + if (dec instanceof Queue && ((Queue) dec).getName().equals("my.queue")) { + dec.addArgument("some.new.queue.argument", true); + } + return dec; + }; +} +``` + +It is also useful in projects that don’t provide direct access to the `Declarable` bean definitions. + +See also [RabbitMQ Automatic Connection/Topology recovery](#auto-recovery). + +##### Failures in Synchronous Operations and Options for Retry + +If you lose your connection to the broker in a synchronous sequence when using `RabbitTemplate` (for instance), Spring AMQP throws an `AmqpException` (usually, but not always, `AmqpIOException`). +We do not try to hide the fact that there was a problem, so you have to be able to catch and respond to the exception. +The easiest thing to do if you suspect that the connection was lost (and it was not your fault) is to try the operation again. +You can do this manually, or you could look at using Spring Retry to handle the retry (imperatively or declaratively). + +Spring Retry provides a couple of AOP interceptors and a great deal of flexibility to specify the parameters of the retry (number of attempts, exception types, backoff algorithm, and others). +Spring AMQP also provides some convenience factory beans for creating Spring Retry interceptors in a convenient form for AMQP use cases, with strongly typed callback interfaces that you can use to implement custom recovery logic. +See the Javadoc and properties of `StatefulRetryOperationsInterceptor` and `StatelessRetryOperationsInterceptor` for more detail. +Stateless retry is appropriate if there is no transaction or if a transaction is started inside the retry callback. +Note that stateless retry is simpler to configure and analyze than stateful retry, but it is not usually appropriate if there is an ongoing transaction that must be rolled back or definitely is going to roll back. +A dropped connection in the middle of a transaction should have the same effect as a rollback. +Consequently, for reconnections where the transaction is started higher up the stack, stateful retry is usually the best choice. +Stateful retry needs a mechanism to uniquely identify a message. +The simplest approach is to have the sender put a unique value in the `MessageId` message property. +The provided message converters provide an option to do this: you can set `createMessageIds` to `true`. +Otherwise, you can inject a `MessageKeyGenerator` implementation into the interceptor. +The key generator must return a unique key for each message. +In versions prior to version 2.0, a `MissingMessageIdAdvice` was provided. +It enabled messages without a `messageId` property to be retried exactly once (ignoring the retry settings). +This advice is no longer provided, since, along with `spring-retry` version 1.2, its functionality is built into the interceptor and message listener containers. + +| |For backwards compatibility, a message with a null message ID is considered fatal for the consumer (consumer is stopped) by default (after one retry).
To replicate the functionality provided by the `MissingMessageIdAdvice`, you can set the `statefulRetryFatalWithNullMessageId` property to `false` on the listener container.
With that setting, the consumer continues to run and the message is rejected (after one retry).
It is discarded or routed to the dead letter queue (if one is configured).| +|---|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +Starting with version 1.3, a builder API is provided to aid in assembling these interceptors by using Java (in `@Configuration` classes). +The following example shows how to do so: + +``` +@Bean +public StatefulRetryOperationsInterceptor interceptor() { + return RetryInterceptorBuilder.stateful() + .maxAttempts(5) + .backOffOptions(1000, 2.0, 10000) // initialInterval, multiplier, maxInterval + .build(); +} +``` + +Only a subset of retry capabilities can be configured this way. +More advanced features would need the configuration of a `RetryTemplate` as a Spring bean. +See the [Spring Retry Javadoc](https://docs.spring.io/spring-retry/docs/api/current/) for complete information about available policies and their configuration. + +##### Retry with Batch Listeners + +It is not recommended to configure retry with a batch listener, unless the batch was created by the producer, in a single record. +See [Batched Messages](#de-batching) for information about consumer and producer-created batches. +With a consumer-created batch, the framework has no knowledge about which message in the batch caused the failure so recovery after the retries are exhausted is not possible. +With producer-created batches, since there is only one message that actually failed, the whole message can be recovered. +Applications may want to inform a custom recoverer where in the batch the failure occurred, perhaps by setting an index property of the thrown exception. + +A retry recoverer for a batch listener must implement `MessageBatchRecoverer`. + +##### Message Listeners and the Asynchronous Case + +If a `MessageListener` fails because of a business exception, the exception is handled by the message listener container, which then goes back to listening for another message. +If the failure is caused by a dropped connection (not a business exception), the consumer that is collecting messages for the listener has to be cancelled and restarted. +The `SimpleMessageListenerContainer` handles this seamlessly, and it leaves a log to say that the listener is being restarted. +In fact, it loops endlessly, trying to restart the consumer. +Only if the consumer is very badly behaved indeed will it give up. +One side effect is that if the broker is down when the container starts, it keeps trying until a connection can be established. + +Business exception handling, as opposed to protocol errors and dropped connections, might need more thought and some custom configuration, especially if transactions or container acks are in use. +Prior to 2.8.x, RabbitMQ had no definition of dead letter behavior. +Consequently, by default, a message that is rejected or rolled back because of a business exception can be redelivered endlessly. +To put a limit on the client on the number of re-deliveries, one choice is a `StatefulRetryOperationsInterceptor` in the advice chain of the listener. +The interceptor can have a recovery callback that implements a custom dead letter action — whatever is appropriate for your particular environment. + +Another alternative is to set the container’s `defaultRequeueRejected` property to `false`. +This causes all failed messages to be discarded. +When using RabbitMQ 2.8.x or higher, this also facilitates delivering the message to a dead letter exchange. + +Alternatively, you can throw a `AmqpRejectAndDontRequeueException`. +Doing so prevents message requeuing, regardless of the setting of the `defaultRequeueRejected` property. + +Starting with version 2.1, an `ImmediateRequeueAmqpException` is introduced to perform exactly the opposite logic: the message will be requeued, regardless of the setting of the `defaultRequeueRejected` property. + +Often, a combination of both techniques is used. +You can use a `StatefulRetryOperationsInterceptor` in the advice chain with a `MessageRecoverer` that throws an `AmqpRejectAndDontRequeueException`. +The `MessageRecover` is called when all retries have been exhausted. +The `RejectAndDontRequeueRecoverer` does exactly that. +The default `MessageRecoverer` consumes the errant message and emits a `WARN` message. + +Starting with version 1.3, a new `RepublishMessageRecoverer` is provided, to allow publishing of failed messages after retries are exhausted. + +When a recoverer consumes the final exception, the message is ack’d and is not sent to the dead letter exchange, if any. + +| |When `RepublishMessageRecoverer` is used on the consumer side, the received message has `deliveryMode` in the `receivedDeliveryMode` message property.
In this case the `deliveryMode` is `null`.
That means a `NON_PERSISTENT` delivery mode on the broker.
Starting with version 2.0, you can configure the `RepublishMessageRecoverer` for the `deliveryMode` to set into the message to republish if it is `null`.
By default, it uses `MessageProperties` default value - `MessageDeliveryMode.PERSISTENT`.| +|---|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +The following example shows how to set a `RepublishMessageRecoverer` as the recoverer: + +``` +@Bean +RetryOperationsInterceptor interceptor() { + return RetryInterceptorBuilder.stateless() + .maxAttempts(5) + .recoverer(new RepublishMessageRecoverer(amqpTemplate(), "something", "somethingelse")) + .build(); +} +``` + +The `RepublishMessageRecoverer` publishes the message with additional information in message headers, such as the exception message, stack trace, original exchange, and routing key. +Additional headers can be added by creating a subclass and overriding `additionalHeaders()`. +The `deliveryMode` (or any other properties) can also be changed in the `additionalHeaders()`, as the following example shows: + +``` +RepublishMessageRecoverer recoverer = new RepublishMessageRecoverer(amqpTemplate, "error") { + + protected Map additionalHeaders(Message message, Throwable cause) { + message.getMessageProperties() + .setDeliveryMode(message.getMessageProperties().getReceivedDeliveryMode()); + return null; + } + +}; +``` + +Starting with version 2.0.5, the stack trace may be truncated if it is too large; this is because all headers have to fit in a single frame. +By default, if the stack trace would cause less than 20,000 bytes ('headroom') to be available for other headers, it will be truncated. +This can be adjusted by setting the recoverer’s `frameMaxHeadroom` property, if you need more or less space for other headers. +Starting with versions 2.1.13, 2.2.3, the exception message is included in this calculation, and the amount of stack trace will be maximized using the following algorithm: + +* if the stack trace alone would exceed the limit, the exception message header will be truncated to 97 bytes plus `…​` and the stack trace is truncated too. + +* if the stack trace is small, the message will be truncated (plus `…​`) to fit in the available bytes (but the message within the stack trace itself is truncated to 97 bytes plus `…​`). + +Whenever a truncation of any kind occurs, the original exception will be logged to retain the complete information. + +Starting with version 2.3.3, a new subclass `RepublishMessageRecovererWithConfirms` is provided; this supports both styles of publisher confirms and will wait for the confirmation before returning (or throw an exception if not confirmed or the message is returned). + +If the confirm type is `CORRELATED`, the subclass will also detect if a message is returned and throw an `AmqpMessageReturnedException`; if the publication is negatively acknowledged, it will throw an `AmqpNackReceivedException`. + +If the confirm type is `SIMPLE`, the subclass will invoke the `waitForConfirmsOrDie` method on the channel. + +See [Publisher Confirms and Returns](#cf-pub-conf-ret) for more information about confirms and returns. + +Starting with version 2.1, an `ImmediateRequeueMessageRecoverer` is added to throw an `ImmediateRequeueAmqpException`, which notifies a listener container to requeue the current failed message. + +##### Exception Classification for Spring Retry + +Spring Retry has a great deal of flexibility for determining which exceptions can invoke retry. +The default configuration retries for all exceptions. +Given that user exceptions are wrapped in a `ListenerExecutionFailedException`, we need to ensure that the classification examines the exception causes. +The default classifier looks only at the top level exception. + +Since Spring Retry 1.0.3, the `BinaryExceptionClassifier` has a property called `traverseCauses` (default: `false`). +When `true`, it travers exception causes until it finds a match or there is no cause. + +To use this classifier for retry, you can use a `SimpleRetryPolicy` created with the constructor that takes the max attempts, the `Map` of `Exception` instances, and the boolean (`traverseCauses`) and inject this policy into the `RetryTemplate`. + +#### Support + +Version 2.3 added more convenience when communicating between a single application and multiple brokers or broker clusters. +The main benefit, on the consumer side, is that the infrastructure can automatically associate auto-declared queues with the appropriate broker. + +This is best illustrated with an example: + +``` +@SpringBootApplication(exclude = RabbitAutoConfiguration.class) +public class Application { + + public static void main(String[] args) { + SpringApplication.run(Application.class, args); + } + + @Bean + CachingConnectionFactory cf1() { + return new CachingConnectionFactory("localhost"); + } + + @Bean + CachingConnectionFactory cf2() { + return new CachingConnectionFactory("otherHost"); + } + + @Bean + CachingConnectionFactory cf3() { + return new CachingConnectionFactory("thirdHost"); + } + + @Bean + SimpleRoutingConnectionFactory rcf(CachingConnectionFactory cf1, + CachingConnectionFactory cf2, CachingConnectionFactory cf3) { + + SimpleRoutingConnectionFactory rcf = new SimpleRoutingConnectionFactory(); + rcf.setDefaultTargetConnectionFactory(cf1); + rcf.setTargetConnectionFactories(Map.of("one", cf1, "two", cf2, "three", cf3)); + return rcf; + } + + @Bean("factory1-admin") + RabbitAdmin admin1(CachingConnectionFactory cf1) { + return new RabbitAdmin(cf1); + } + + @Bean("factory2-admin") + RabbitAdmin admin2(CachingConnectionFactory cf2) { + return new RabbitAdmin(cf2); + } + + @Bean("factory3-admin") + RabbitAdmin admin3(CachingConnectionFactory cf3) { + return new RabbitAdmin(cf3); + } + + @Bean + public RabbitListenerEndpointRegistry rabbitListenerEndpointRegistry() { + return new RabbitListenerEndpointRegistry(); + } + + @Bean + public RabbitListenerAnnotationBeanPostProcessor postProcessor(RabbitListenerEndpointRegistry registry) { + MultiRabbitListenerAnnotationBeanPostProcessor postProcessor + = new MultiRabbitListenerAnnotationBeanPostProcessor(); + postProcessor.setEndpointRegistry(registry); + postProcessor.setContainerFactoryBeanName("defaultContainerFactory"); + return postProcessor; + } + + @Bean + public SimpleRabbitListenerContainerFactory factory1(CachingConnectionFactory cf1) { + SimpleRabbitListenerContainerFactory factory = new SimpleRabbitListenerContainerFactory(); + factory.setConnectionFactory(cf1); + return factory; + } + + @Bean + public SimpleRabbitListenerContainerFactory factory2(CachingConnectionFactory cf2) { + SimpleRabbitListenerContainerFactory factory = new SimpleRabbitListenerContainerFactory(); + factory.setConnectionFactory(cf2); + return factory; + } + + @Bean + public SimpleRabbitListenerContainerFactory factory3(CachingConnectionFactory cf3) { + SimpleRabbitListenerContainerFactory factory = new SimpleRabbitListenerContainerFactory(); + factory.setConnectionFactory(cf3); + return factory; + } + + @Bean + RabbitTemplate template(RoutingConnectionFactory rcf) { + return new RabbitTemplate(rcf); + } + + @Bean + ConnectionFactoryContextWrapper wrapper(SimpleRoutingConnectionFactory rcf) { + return new ConnectionFactoryContextWrapper(rcf); + } + +} + +@Component +class Listeners { + + @RabbitListener(queuesToDeclare = @Queue("q1"), containerFactory = "factory1") + public void listen1(String in) { + + } + + @RabbitListener(queuesToDeclare = @Queue("q2"), containerFactory = "factory2") + public void listen2(String in) { + + } + + @RabbitListener(queuesToDeclare = @Queue("q3"), containerFactory = "factory3") + public void listen3(String in) { + + } + +} +``` + +As you can see, we have declared 3 sets of infrastructure (connection factories, admins, container factories). +As discussed earlier, `@RabbitListener` can define which container factory to use; in this case, they also use `queuesToDeclare` which causes the queue(s) to be declared on the broker, if it doesn’t exist. +By naming the `RabbitAdmin` beans with the convention `-admin`, the infrastructure is able to determine which admin should declare the queue. +This will also work with `bindings = @QueueBinding(…​)` whereby the exchange and binding will also be declared. +It will NOT work with `queues`, since that expects the queue(s) to already exist. + +On the producer side, a convenient `ConnectionFactoryContextWrapper` class is provided, to make using the `RoutingConnectionFactory` (see [Routing Connection Factory](#routing-connection-factory)) simpler. + +As you can see above, a `SimpleRoutingConnectionFactory` bean has been added with routing keys `one`, `two` and `three`. +There is also a `RabbitTemplate` that uses that factory. +Here is an example of using that template with the wrapper to route to one of the broker clusters. + +``` +@Bean +public ApplicationRunner runner(RabbitTemplate template, ConnectionFactoryContextWrapper wrapper) { + return args -> { + wrapper.run("one", () -> template.convertAndSend("q1", "toCluster1")); + wrapper.run("two", () -> template.convertAndSend("q2", "toCluster2")); + wrapper.run("three", () -> template.convertAndSend("q3", "toCluster3")); + }; +} +``` + +#### 4.1.23. Debugging + +Spring AMQP provides extensive logging, especially at the `DEBUG` level. + +If you wish to monitor the AMQP protocol between the application and broker, you can use a tool such as WireShark, which has a plugin to decode the protocol. +Alternatively, the RabbitMQ Java client comes with a very useful class called `Tracer`. +When run as a `main`, by default, it listens on port 5673 and connects to port 5672 on localhost. +You can run it and change your connection factory configuration to connect to port 5673 on localhost. +It displays the decoded protocol on the console. +Refer to the `Tracer` Javadoc for more information. + +### 4.2. Using the RabbitMQ Stream Plugin + +Version 2.4 introduces initial support for the [RabbitMQ Stream Plugin Java Client](https://github.com/rabbitmq/rabbitmq-stream-java-client) for the [RabbitMQ Stream Plugin](https://rabbitmq.com/stream.html). + +* `RabbitStreamTemplate` + +* `StreamListenerContainer` + +#### 4.2.1. Sending Messages + +The `RabbitStreamTemplate` provides a subset of the `RabbitTemplate` (AMQP) functionality. + +Example 1. RabbitStreamOperations + +``` +public interface RabbitStreamOperations extends AutoCloseable { + + ListenableFuture send(Message message); + + ListenableFuture convertAndSend(Object message); + + ListenableFuture convertAndSend(Object message, @Nullable MessagePostProcessor mpp); + + ListenableFuture send(com.rabbitmq.stream.Message message); + + MessageBuilder messageBuilder(); + + MessageConverter messageConverter(); + + StreamMessageConverter streamMessageConverter(); + + @Override + void close() throws AmqpException; + +} +``` + +The `RabbitStreamTemplate` implementation has the following constructor and properties: + +Example 2. RabbitStreamTemplate + +``` +public RabbitStreamTemplate(Environment environment, String streamName) { +} + +public void setMessageConverter(MessageConverter messageConverter) { +} + +public void setStreamConverter(StreamMessageConverter streamConverter) { +} + +public synchronized void setProducerCustomizer(ProducerCustomizer producerCustomizer) { +} +``` + +The `MessageConverter` is used in the `convertAndSend` methods to convert the object to a Spring AMQP `Message`. + +The `StreamMessageConverter` is used to convert from a Spring AMQP `Message` to a native stream `Message`. + +You can also send native stream `Message` s directly; with the `messageBuilder()` method provding access to the `Producer` 's message builder. + +The `ProducerCustomizer` provides a mechanism to customize the producer before it is built. + +Refer to the [Java Client Documentation](https://rabbitmq.github.io/rabbitmq-stream-java-client/stable/htmlsingle/) about customizing the `Environment` and `Producer`. + +#### 4.2.2. Receiving Messages + +Asynchronous message reception is provided by the `StreamListenerContainer` (and the `StreamRabbitListenerContainerFactory` when using `@RabbitListener`). + +The listener container requires an `Environment` as well as a single stream name. + +You can either receive Spring AMQP `Message` s using the classic `MessageListener`, or you can receive native stream `Message` s using a new interface: + +``` +public interface StreamMessageListener extends MessageListener { + + void onStreamMessage(Message message, Context context); + +} +``` + +See [Message Listener Container Configuration](#containerAttributes) for information about supported properties. + +Similar the template, the container has a `ConsumerCustomizer` property. + +Refer to the [Java Client Documentation](https://rabbitmq.github.io/rabbitmq-stream-java-client/stable/htmlsingle/) about customizing the `Environment` and `Consumer`. + +When using `@RabbitListener`, configure a `StreamRabbitListenerContainerFactory`; at this time, most `@RabbitListener` properties (`concurrency`, etc) are ignored. Only `id`, `queues`, `autoStartup` and `containerFactory` are supported. +In addition, `queues` can only contain one stream name. + +#### 4.2.3. Examples + +``` +@Bean +RabbitStreamTemplate streamTemplate(Environment env) { + RabbitStreamTemplate template = new RabbitStreamTemplate(env, "test.stream.queue1"); + template.setProducerCustomizer((name, builder) -> builder.name("test")); + return template; +} + +@Bean +RabbitListenerContainerFactory rabbitListenerContainerFactory(Environment env) { + return new StreamRabbitListenerContainerFactory(env); +} + +@RabbitListener(queues = "test.stream.queue1") +void listen(String in) { + ... +} + +@Bean +RabbitListenerContainerFactory nativeFactory(Environment env) { + StreamRabbitListenerContainerFactory factory = new StreamRabbitListenerContainerFactory(env); + factory.setNativeListener(true); + factory.setConsumerCustomizer((id, builder) -> { + builder.name("myConsumer") + .offset(OffsetSpecification.first()) + .manualTrackingStrategy(); + }); + return factory; +} + +@RabbitListener(id = "test", queues = "test.stream.queue2", containerFactory = "nativeFactory") +void nativeMsg(Message in, Context context) { + ... + context.storeOffset(); +} +``` + +### 4.3. Logging Subsystem AMQP Appenders + +The framework provides logging appenders for some popular logging subsystems: + +* logback (since Spring AMQP version 1.4) + +* log4j2 (since Spring AMQP version 1.6) + +The appenders are configured by using the normal mechanisms for the logging subsystem, available properties are specified in the following sections. + +#### 4.3.1. Common properties + +The following properties are available with all appenders: + +| Property | Default | Description | +|-------------------------------------------|-------------------------------------------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| ```
exchangeName
``` | ```
logs
``` | Name of the exchange to which to publish log events. | +| ```
exchangeType
``` | ```
topic
``` | Type of the exchange to which to publish log events — needed only if the appender declares the exchange.
See `declareExchange`. | +| ```
routingKeyPattern
``` | ```
%c.%p
``` | Logging subsystem pattern format to use to generate a routing key. | +| ```
applicationId
``` | ```

``` | Application ID — added to the routing key if the pattern includes `%X{applicationId}`. | +| ```
senderPoolSize
``` | ```
2
``` | The number of threads to use to publish log events. | +| ```
maxSenderRetries
``` | ```
30
``` | How many times to retry sending a message if the broker is unavailable or there is some other error.
Retries are delayed as follows: `N ^ log(N)`, where `N` is the retry number. | +| ```
addresses
``` | ```

``` | A comma-delimited list of broker addresses in the following form: `host:port[,host:port]*` - overrides `host` and `port`. | +| ```
host
``` | ```
localhost
``` | RabbitMQ host to which to connect . | +| ```
port
``` | ```
5672
``` | RabbitMQ port to which to connect. | +| ```
virtualHost
``` | ```
/
``` | RabbitMQ virtual host to which to connect. | +| ```
username
``` | ```
guest
``` | RabbitMQ user to use when connecting. | +| ```
password
``` | ```
guest
``` | RabbitMQ password for this user. | +| ```
useSsl
``` | ```
false
``` | Whether to use SSL for the RabbitMQ connection.
See [`RabbitConnectionFactoryBean` and Configuring SSL](#rabbitconnectionfactorybean-configuring-ssl) | +| ```
verifyHostname
``` | ```
true
``` | Enable server hostname verification for TLS connections.
See [`RabbitConnectionFactoryBean` and Configuring SSL](#rabbitconnectionfactorybean-configuring-ssl) | +| ```
sslAlgorithm
``` | ```
null
``` | The SSL algorithm to use. | +| ```
sslPropertiesLocation
``` | ```
null
``` | Location of the SSL properties file. | +| ```
keyStore
``` | ```
null
``` | Location of the keystore. | +| ```
keyStorePassphrase
``` | ```
null
``` | Passphrase for the keystore. | +| ```
keyStoreType
``` | ```
JKS
``` | The keystore type. | +| ```
trustStore
``` | ```
null
``` | Location of the truststore. | +| ```
trustStorePassphrase
``` | ```
null
``` | Passphrase for the truststore. | +| ```
trustStoreType
``` | ```
JKS
``` | The truststore type. | +| ```
saslConfig
``` |```
null (RabbitMQ client default applies)
```| The `saslConfig` - see the javadoc for `RabbitUtils.stringToSaslConfig` for valid values. | +| ```
contentType
``` | ```
text/plain
``` | `content-type` property of log messages. | +| ```
contentEncoding
``` | ```

``` | `content-encoding` property of log messages. | +| ```
declareExchange
``` | ```
false
``` | Whether or not to declare the configured exchange when this appender starts.
See also `durable` and `autoDelete`. | +| ```
durable
``` | ```
true
``` | When `declareExchange` is `true`, the durable flag is set to this value. | +| ```
autoDelete
``` | ```
false
``` | When `declareExchange` is `true`, the auto-delete flag is set to this value. | +| ```
charset
``` | ```
null
``` | Character set to use when converting `String` to `byte[]`.
Default: null (the system default charset is used).
If the character set is unsupported on the current platform, we fall back to using the system character set. | +| ```
deliveryMode
``` | ```
PERSISTENT
``` | `PERSISTENT` or `NON_PERSISTENT` to determine whether or not RabbitMQ should persist the messages. | +| ```
generateId
``` | ```
false
``` | Used to determine whether the `messageId` property is set to a unique value. | +|```
clientConnectionProperties
```| ```
null
``` | A comma-delimited list of `key:value` pairs for custom client properties to the RabbitMQ connection. | +| ```
addMdcAsHeaders
``` | ```
true
``` |MDC properties were always added into RabbitMQ message headers until this property was introduced.
It can lead to issues for big MDC as while RabbitMQ has limited buffer size for all headers and this buffer is pretty small.
This property was introduced to avoid issues in cases of big MDC.
By default this value set to `true` for backward compatibility.
The `false` turns off serialization MDC into headers.
Please note, the `JsonLayout` adds MDC into the message by default.| + +#### 4.3.2. Log4j 2 Appender + +The following example shows how to configure a Log4j 2 appender: + +``` + + ... + + + +``` + +| |Starting with versions 1.6.10 and 1.7.3, by default, the log4j2 appender publishes the messages to RabbitMQ on the calling thread.
This is because Log4j 2 does not, by default, create thread-safe events.
If the broker is down, the `maxSenderRetries` is used to retry, with no delay between retries.
If you wish to restore the previous behavior of publishing the messages on separate threads (`senderPoolSize`), you can set the `async` property to `true`.
However, you also need to configure Log4j 2 to use the `DefaultLogEventFactory` instead of the `ReusableLogEventFactory`.
One way to do that is to set the system property `-Dlog4j2.enable.threadlocals=false`.
If you use asynchronous publishing with the `ReusableLogEventFactory`, events have a high likelihood of being corrupted due to cross-talk.| +|---|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +#### 4.3.3. Logback Appender + +The following example shows how to configure a logback appender: + +``` + + + %n ]]> + + foo:5672,bar:5672 + 36 + false + myApplication + %property{applicationId}.%c.%p + true + UTF-8 + false + NON_PERSISTENT + true + false + +``` + +Starting with version 1.7.1, the Logback `AmqpAppender` provides an `includeCallerData` option, which is `false` by default. +Extracting caller data can be rather expensive, because the log event has to create a throwable and inspect it to determine the calling location. +Therefore, by default, caller data associated with an event is not extracted when the event is added to the event queue. +You can configure the appender to include caller data by setting the `includeCallerData` property to `true`. + +Starting with version 2.0.0, the Logback `AmqpAppender` supports [Logback encoders](https://logback.qos.ch/manual/encoders.html) with the `encoder` option. +The `encoder` and `layout` options are mutually exclusive. + +#### 4.3.4. Customizing the Messages + +By default, AMQP appenders populate the following message properties: + +* `deliveryMode` + +* contentType + +* `contentEncoding`, if configured + +* `messageId`, if `generateId` is configured + +* `timestamp` of the log event + +* `appId`, if applicationId is configured + +In addition they populate headers with the following values: + +* `categoryName` of the log event + +* The level of the log event + +* `thread`: the name of the thread where log event happened + +* The location of the stack trace of the log event call + +* A copy of all the MDC properties (unless `addMdcAsHeaders` is set to `false`) + +Each of the appenders can be subclassed, letting you modify the messages before publishing. +The following example shows how to customize log messages: + +``` +public class MyEnhancedAppender extends AmqpAppender { + + @Override + public Message postProcessMessageBeforeSend(Message message, Event event) { + message.getMessageProperties().setHeader("foo", "bar"); + return message; + } + +} +``` + +Starting with 2.2.4, the log4j2 `AmqpAppender` can be extended using `@PluginBuilderFactory` and extending also `AmqpAppender.Builder` + +``` +@Plugin(name = "MyEnhancedAppender", category = "Core", elementType = "appender", printObject = true) +public class MyEnhancedAppender extends AmqpAppender { + + public MyEnhancedAppender(String name, Filter filter, Layout layout, + boolean ignoreExceptions, AmqpManager manager, BlockingQueue eventQueue, String foo, String bar) { + super(name, filter, layout, ignoreExceptions, manager, eventQueue); + + @Override + public Message postProcessMessageBeforeSend(Message message, Event event) { + message.getMessageProperties().setHeader("foo", "bar"); + return message; + } + + @PluginBuilderFactory + public static Builder newBuilder() { + return new Builder(); + } + + protected static class Builder extends AmqpAppender.Builder { + + @Override + protected AmqpAppender buildInstance(String name, Filter filter, Layout layout, + boolean ignoreExceptions, AmqpManager manager, BlockingQueue eventQueue) { + return new MyEnhancedAppender(name, filter, layout, ignoreExceptions, manager, eventQueue); + } + } + +} +``` + +#### 4.3.5. Customizing the Client Properties + +You can add custom client properties by adding either string properties or more complex properties. + +##### Simple String Properties + +Each appender supports adding client properties to the RabbitMQ connection. + +The following example shows how to add a custom client property for logback: + +``` + + ... + thing1:thing2,cat:hat + ... + +``` + +Example 3. log4j2 + +``` + + ... + + +``` + +The properties are a comma-delimited list of `key:value` pairs. +Keys and values cannot contain commas or colons. + +These properties appear on the RabbitMQ Admin UI when the connection is viewed. + +##### Advanced Technique for Logback + +You can subclass the Logback appender. +Doing so lets you modify the client connection properties before the connection is established. +The following example shows how to do so: + +``` +public class MyEnhancedAppender extends AmqpAppender { + + private String thing1; + + @Override + protected void updateConnectionClientProperties(Map clientProperties) { + clientProperties.put("thing1", this.thing1); + } + + public void setThing1(String thing1) { + this.thing1 = thing1; + } + +} +``` + +Then you can add `thing2` to logback.xml. + +For String properties such as those shown in the preceding example, the previous technique can be used. +Subclasses allow for adding richer properties (such as adding a `Map` or numeric property). + +#### 4.3.6. Providing a Custom Queue Implementation + +The `AmqpAppenders` use a `BlockingQueue` to asynchronously publish logging events to RabbitMQ. +By default, a `LinkedBlockingQueue` is used. +However, you can supply any kind of custom `BlockingQueue` implementation. + +The following example shows how to do so for Logback: + +``` +public class MyEnhancedAppender extends AmqpAppender { + + @Override + protected BlockingQueue createEventQueue() { + return new ArrayBlockingQueue(); + } + +} +``` + +The Log4j 2 appender supports using a [`BlockingQueueFactory`](https://logging.apache.org/log4j/2.x/manual/appenders.html#BlockingQueueFactory), as the following example shows: + +``` + + ... + + + + +``` + +### 4.4. Sample Applications + +The [Spring AMQP Samples](https://github.com/SpringSource/spring-amqp-samples) project includes two sample applications. +The first is a simple “Hello World” example that demonstrates both synchronous and asynchronous message reception. +It provides an excellent starting point for acquiring an understanding of the essential components. +The second sample is based on a stock-trading use case to demonstrate the types of interaction that would be common in real world applications. +In this chapter, we provide a quick walk-through of each sample so that you can focus on the most important components. +The samples are both Maven-based, so you should be able to import them directly into any Maven-aware IDE (such as [SpringSource Tool Suite](https://www.springsource.org/sts)). + +#### 4.4.1. The “Hello World” Sample + +The “Hello World” sample demonstrates both synchronous and asynchronous message reception. +You can import the `spring-rabbit-helloworld` sample into the IDE and then follow the discussion below. + +##### Synchronous Example + +Within the `src/main/java` directory, navigate to the `org.springframework.amqp.helloworld` package. +Open the `HelloWorldConfiguration` class and notice that it contains the `@Configuration` annotation at the class level and notice some `@Bean` annotations at method-level. +This is an example of Spring’s Java-based configuration. +You can read more about that [here](https://docs.spring.io/spring/docs/current/spring-framework-reference/html/beans.html#beans-java). + +The following listing shows how the connection factory is created: + +``` +@Bean +public CachingConnectionFactory connectionFactory() { + CachingConnectionFactory connectionFactory = + new CachingConnectionFactory("localhost"); + connectionFactory.setUsername("guest"); + connectionFactory.setPassword("guest"); + return connectionFactory; +} +``` + +The configuration also contains an instance of `RabbitAdmin`, which, by default, looks for any beans of type exchange, queue, or binding and then declares them on the broker. +In fact, the `helloWorldQueue` bean that is generated in `HelloWorldConfiguration` is an example because it is an instance of `Queue`. + +The following listing shows the `helloWorldQueue` bean definition: + +``` +@Bean +public Queue helloWorldQueue() { + return new Queue(this.helloWorldQueueName); +} +``` + +Looking back at the `rabbitTemplate` bean configuration, you can see that it has the name of `helloWorldQueue` set as its `queue` property (for receiving messages) and for its `routingKey` property (for sending messages). + +Now that we have explored the configuration, we can look at the code that actually uses these components. +First, open the `Producer` class from within the same package. +It contains a `main()` method where the Spring `ApplicationContext` is created. + +The following listing shows the `main` method: + +``` +public static void main(String[] args) { + ApplicationContext context = + new AnnotationConfigApplicationContext(RabbitConfiguration.class); + AmqpTemplate amqpTemplate = context.getBean(AmqpTemplate.class); + amqpTemplate.convertAndSend("Hello World"); + System.out.println("Sent: Hello World"); +} +``` + +In the preceding example, the `AmqpTemplate` bean is retrieved and used for sending a `Message`. +Since the client code should rely on interfaces whenever possible, the type is `AmqpTemplate` rather than `RabbitTemplate`. +Even though the bean created in `HelloWorldConfiguration` is an instance of `RabbitTemplate`, relying on the interface means that this code is more portable (you can change the configuration independently of the code). +Since the `convertAndSend()` method is invoked, the template delegates to its `MessageConverter` instance. +In this case, it uses the default `SimpleMessageConverter`, but a different implementation could be provided to the `rabbitTemplate` bean, as defined in `HelloWorldConfiguration`. + +Now open the `Consumer` class. +It actually shares the same configuration base class, which means it shares the `rabbitTemplate` bean. +That is why we configured that template with both a `routingKey` (for sending) and a `queue` (for receiving). +As we describe in [`AmqpTemplate`](#amqp-template), you could instead pass the 'routingKey' argument to the send method and the 'queue' argument to the receive method. +The `Consumer` code is basically a mirror image of the Producer, calling `receiveAndConvert()` rather than `convertAndSend()`. + +The following listing shows the main method for the `Consumer`: + +``` +public static void main(String[] args) { + ApplicationContext context = + new AnnotationConfigApplicationContext(RabbitConfiguration.class); + AmqpTemplate amqpTemplate = context.getBean(AmqpTemplate.class); + System.out.println("Received: " + amqpTemplate.receiveAndConvert()); +} +``` + +If you run the `Producer` and then run the `Consumer`, you should see `Received: Hello World` in the console output. + +##### Asynchronous Example + +[Synchronous Example](#hello-world-sync) walked through the synchronous Hello World sample. +This section describes a slightly more advanced but significantly more powerful option. +With a few modifications, the Hello World sample can provide an example of asynchronous reception, also known as message-driven POJOs. +In fact, there is a sub-package that provides exactly that: `org.springframework.amqp.samples.helloworld.async`. + +Again, we start with the sending side. +Open the `ProducerConfiguration` class and notice that it creates a `connectionFactory` and a `rabbitTemplate` bean. +This time, since the configuration is dedicated to the message sending side, we do not even need any queue definitions, and the `RabbitTemplate` has only the 'routingKey' property set. +Recall that messages are sent to an exchange rather than being sent directly to a queue. +The AMQP default exchange is a direct exchange with no name. +All queues are bound to that default exchange with their name as the routing key. +That is why we only need to provide the routing key here. + +The following listing shows the `rabbitTemplate` definition: + +``` +public RabbitTemplate rabbitTemplate() { + RabbitTemplate template = new RabbitTemplate(connectionFactory()); + template.setRoutingKey(this.helloWorldQueueName); + return template; +} +``` + +Since this sample demonstrates asynchronous message reception, the producing side is designed to continuously send messages (if it were a message-per-execution model like the synchronous version, it would not be quite so obvious that it is, in fact, a message-driven consumer). +The component responsible for continuously sending messages is defined as an inner class within the `ProducerConfiguration`. +It is configured to run every three seconds. + +The following listing shows the component: + +``` +static class ScheduledProducer { + + @Autowired + private volatile RabbitTemplate rabbitTemplate; + + private final AtomicInteger counter = new AtomicInteger(); + + @Scheduled(fixedRate = 3000) + public void sendMessage() { + rabbitTemplate.convertAndSend("Hello World " + counter.incrementAndGet()); + } +} +``` + +You do not need to understand all of the details, since the real focus should be on the receiving side (which we cover next). +However, if you are not yet familiar with Spring task scheduling support, you can learn more [here](https://docs.spring.io/spring/docs/current/spring-framework-reference/html/scheduling.html#scheduling-annotation-support). +The short story is that the `postProcessor` bean in the `ProducerConfiguration` registers the task with a scheduler. + +Now we can turn to the receiving side. +To emphasize the message-driven POJO behavior, we start with the component that react to the messages. +The class is called `HelloWorldHandler` and is shown in the following listing: + +``` +public class HelloWorldHandler { + + public void handleMessage(String text) { + System.out.println("Received: " + text); + } + +} +``` + +That class is a POJO. +It does not extend any base class, it does not implement any interfaces, and it does not even contain any imports. +It is being “adapted” to the `MessageListener` interface by the Spring AMQP `MessageListenerAdapter`. +You can then configure that adapter on a `SimpleMessageListenerContainer`. +For this sample, the container is created in the `ConsumerConfiguration` class. +You can see the POJO wrapped in the adapter there. + +The following listing shows how the `listenerContainer` is defined: + +``` +@Bean +public SimpleMessageListenerContainer listenerContainer() { + SimpleMessageListenerContainer container = new SimpleMessageListenerContainer(); + container.setConnectionFactory(connectionFactory()); + container.setQueueName(this.helloWorldQueueName); + container.setMessageListener(new MessageListenerAdapter(new HelloWorldHandler())); + return container; +} +``` + +The `SimpleMessageListenerContainer` is a Spring lifecycle component and, by default, starts automatically. +If you look in the `Consumer` class, you can see that its `main()` method consists of nothing more than a one-line bootstrap to create the `ApplicationContext`. +The Producer’s `main()` method is also a one-line bootstrap, since the component whose method is annotated with `@Scheduled` also starts automatically. +You can start the `Producer` and `Consumer` in any order, and you should see messages being sent and received every three seconds. + +#### 4.4.2. Stock Trading + +The Stock Trading sample demonstrates more advanced messaging scenarios than [the Hello World sample](#hello-world-sample). +However, the configuration is very similar, if a bit more involved. +Since we walked through the Hello World configuration in detail, here, we focus on what makes this sample different. +There is a server that pushes market data (stock quotations) to a topic exchange. +Then, clients can subscribe to the market data feed by binding a queue with a routing pattern (for example,`app.stock.quotes.nasdaq.*`). +The other main feature of this demo is a request-reply “stock trade” interaction that is initiated by the client and handled by the server. +That involves a private `replyTo` queue that is sent by the client within the order request message itself. + +The server’s core configuration is in the `RabbitServerConfiguration` class within the `org.springframework.amqp.rabbit.stocks.config.server` package. +It extends the `AbstractStockAppRabbitConfiguration`. +That is where the resources common to the server and client are defined, including the market data topic exchange (whose name is 'app.stock.marketdata') and the queue that the server exposes for stock trades (whose name is 'app.stock.request'). +In that common configuration file, you also see that a `Jackson2JsonMessageConverter` is configured on the `RabbitTemplate`. + +The server-specific configuration consists of two things. +First, it configures the market data exchange on the `RabbitTemplate` so that it does not need to provide that exchange name with every call to send a `Message`. +It does this within an abstract callback method defined in the base configuration class. +The following listing shows that method: + +``` +public void configureRabbitTemplate(RabbitTemplate rabbitTemplate) { + rabbitTemplate.setExchange(MARKET_DATA_EXCHANGE_NAME); +} +``` + +Second, the stock request queue is declared. +It does not require any explicit bindings in this case, because it is bound to the default no-name exchange with its own name as the routing key. +As mentioned earlier, the AMQP specification defines that behavior. +The following listing shows the definition of the `stockRequestQueue` bean: + +``` +@Bean +public Queue stockRequestQueue() { + return new Queue(STOCK_REQUEST_QUEUE_NAME); +} +``` + +Now that you have seen the configuration of the server’s AMQP resources, navigate to the `org.springframework.amqp.rabbit.stocks` package under the `src/test/java` directory. +There, you can see the actual `Server` class that provides a `main()` method. +It creates an `ApplicationContext` based on the `server-bootstrap.xml` config file. +There, you can see the scheduled task that publishes dummy market data. +That configuration relies upon Spring’s `task` namespace support. +The bootstrap config file also imports a few other files. +The most interesting one is `server-messaging.xml`, which is directly under `src/main/resources`. +There, you can see the `messageListenerContainer` bean that is responsible for handling the stock trade requests. +Finally, have a look at the `serverHandler` bean that is defined in `server-handlers.xml` (which is also in 'src/main/resources'). +That bean is an instance of the `ServerHandler` class and is a good example of a message-driven POJO that can also send reply messages. +Notice that it is not itself coupled to the framework or any of the AMQP concepts. +It accepts a `TradeRequest` and returns a `TradeResponse`. +The following listing shows the definition of the `handleMessage` method: + +``` +public TradeResponse handleMessage(TradeRequest tradeRequest) { ... +} +``` + +Now that we have seen the most important configuration and code for the server, we can turn to the client. +The best starting point is probably `RabbitClientConfiguration`, in the `org.springframework.amqp.rabbit.stocks.config.client` package. +Notice that it declares two queues without providing explicit names. +The following listing shows the bean definitions for the two queues: + +``` +@Bean +public Queue marketDataQueue() { + return amqpAdmin().declareQueue(); +} + +@Bean +public Queue traderJoeQueue() { + return amqpAdmin().declareQueue(); +} +``` + +Those are private queues, and unique names are generated automatically. +The first generated queue is used by the client to bind to the market data exchange that has been exposed by the server. +Recall that, in AMQP, consumers interact with queues while producers interact with exchanges. +The “binding” of queues to exchanges is what tells the broker to deliver (or route) messages from a given exchange to a queue. +Since the market data exchange is a topic exchange, the binding can be expressed with a routing pattern. +The `RabbitClientConfiguration` does so with a `Binding` object, and that object is generated with the `BindingBuilder` fluent API. +The following listing shows the `Binding`: + +``` +@Value("${stocks.quote.pattern}") +private String marketDataRoutingKey; + +@Bean +public Binding marketDataBinding() { + return BindingBuilder.bind( + marketDataQueue()).to(marketDataExchange()).with(marketDataRoutingKey); +} +``` + +Notice that the actual value has been externalized in a properties file (`client.properties` under `src/main/resources`), and that we use Spring’s `@Value` annotation to inject that value. +This is generally a good idea. +Otherwise, the value would have been hardcoded in a class and unmodifiable without recompilation. +In this case, it is much easier to run multiple versions of the client while making changes to the routing pattern used for binding. +We can try that now. + +Start by running `org.springframework.amqp.rabbit.stocks.Server` and then `org.springframework.amqp.rabbit.stocks.Client`. +You should see dummy quotations for `NASDAQ` stocks, because the current value associated with the 'stocks.quote.pattern' key in client.properties is 'app.stock.quotes.nasdaq.**'. +Now, while keeping the existing `Server` and `Client` running, change that property value to 'app.stock.quotes.nyse.**' and start a second `Client` instance. +You should see that the first client still receives NASDAQ quotes while the second client receives NYSE quotes. +You could instead change the pattern to get all stocks or even an individual ticker. + +The final feature we explore is the request-reply interaction from the client’s perspective. +Recall that we have already seen the `ServerHandler` that accepts `TradeRequest` objects and returns `TradeResponse` objects. +The corresponding code on the `Client` side is `RabbitStockServiceGateway` in the `org.springframework.amqp.rabbit.stocks.gateway` package. +It delegates to the `RabbitTemplate` in order to send messages. +The following listing shows the `send` method: + +``` +public void send(TradeRequest tradeRequest) { + getRabbitTemplate().convertAndSend(tradeRequest, new MessagePostProcessor() { + public Message postProcessMessage(Message message) throws AmqpException { + message.getMessageProperties().setReplyTo(new Address(defaultReplyToQueue)); + try { + message.getMessageProperties().setCorrelationId( + UUID.randomUUID().toString().getBytes("UTF-8")); + } + catch (UnsupportedEncodingException e) { + throw new AmqpException(e); + } + return message; + } + }); +} +``` + +Notice that, prior to sending the message, it sets the `replyTo` address. +It provides the queue that was generated by the `traderJoeQueue` bean definition (shown earlier). +The following listing shows the `@Bean` definition for the `StockServiceGateway` class itself: + +``` +@Bean +public StockServiceGateway stockServiceGateway() { + RabbitStockServiceGateway gateway = new RabbitStockServiceGateway(); + gateway.setRabbitTemplate(rabbitTemplate()); + gateway.setDefaultReplyToQueue(traderJoeQueue()); + return gateway; +} +``` + +If you are no longer running the server and client, start them now. +Try sending a request with the format of '100 TCKR'. +After a brief artificial delay that simulates “processing” of the request, you should see a confirmation message appear on the client. + +#### 4.4.3. Receiving JSON from Non-Spring Applications + +Spring applications, when sending JSON, set the `*TypeId*` header to the fully qualified class name to assist the receiving application in converting the JSON back to a Java object. + +The `spring-rabbit-json` sample explores several techniques to convert the JSON from a non-Spring application. + +See also [Jackson2JsonMessageConverter](#json-message-converter) as well as the [Javadoc for the `DefaultClassMapper`](https://docs.spring.io/spring-amqp/docs/current/api/index.html?org/springframework/amqp/support/converter/DefaultClassMapper.html). + +### 4.5. Testing Support + +Writing integration for asynchronous applications is necessarily more complex than testing simpler applications. +This is made more complex when abstractions such as the `@RabbitListener` annotations come into the picture. +The question is how to verify that, after sending a message, the listener received the message as expected. + +The framework itself has many unit and integration tests. +Some using mocks while, others use integration testing with a live RabbitMQ broker. +You can consult those tests for some ideas for testing scenarios. + +Spring AMQP version 1.6 introduced the `spring-rabbit-test` jar, which provides support for testing some of these more complex scenarios. +It is anticipated that this project will expand over time, but we need community feedback to make suggestions for the features needed to help with testing. +Please use [JIRA](https://jira.spring.io/browse/AMQP) or [GitHub Issues](https://github.com/spring-projects/spring-amqp/issues) to provide such feedback. + +#### 4.5.1. @SpringRabbitTest + +Use this annotation to add infrastructure beans to the Spring test `ApplicationContext`. +This is not necessary when using, for example `@SpringBootTest` since Spring Boot’s auto configuration will add the beans. + +Beans that are registered are: + +* `CachingConnectionFactory` (`autoConnectionFactory`). If `@RabbitEnabled` is present, its connectionn factory is used. + +* `RabbitTemplate` (`autoRabbitTemplate`) + +* `RabbitAdmin` (`autoRabbitAdmin`) + +* `RabbitListenerContainerFactory` (`autoContainerFactory`) + +In addition, the beans associated with `@EnableRabbit` (to support `@RabbitListener`) are added. + +Example 4. Junit5 example + +``` +@SpringJunitConfig +@SpringRabbitTest +public class MyRabbitTests { + + @Autowired + private RabbitTemplate template; + + @Autowired + private RabbitAdmin admin; + + @Autowired + private RabbitListenerEndpointRegistry registry; + + @Test + void test() { + ... + } + + @Configuration + public static class Config { + + ... + + } + +} +``` + +With JUnit4, replace `@SpringJunitConfig` with `@RunWith(SpringRunnner.class)`. + +#### 4.5.2. Mockito `Answer` Implementations + +There are currently two `Answer` implementations to help with testing. + +The first, `LatchCountDownAndCallRealMethodAnswer`, provides an `Answer` that returns `null` and counts down a latch. +The following example shows how to use `LatchCountDownAndCallRealMethodAnswer`: + +``` +LatchCountDownAndCallRealMethodAnswer answer = this.harness.getLatchAnswerFor("myListener", 2); +doAnswer(answer) + .when(listener).foo(anyString(), anyString()); + +... + +assertThat(answer.await(10)).isTrue(); +``` + +The second, `LambdaAnswer` provides a mechanism to optionally call the real method and provides an opportunity +to return a custom result, based on the `InvocationOnMock` and the result (if any). + +Consider the following POJO: + +``` +public class Thing { + + public String thing(String thing) { + return thing.toUpperCase(); + } + +} +``` + +The following class tests the `Thing` POJO: + +``` +Thing thing = spy(new Thing()); + +doAnswer(new LambdaAnswer(true, (i, r) -> r + r)) + .when(thing).thing(anyString()); +assertEquals("THINGTHING", thing.thing("thing")); + +doAnswer(new LambdaAnswer(true, (i, r) -> r + i.getArguments()[0])) + .when(thing).thing(anyString()); +assertEquals("THINGthing", thing.thing("thing")); + +doAnswer(new LambdaAnswer(false, (i, r) -> + "" + i.getArguments()[0] + i.getArguments()[0])).when(thing).thing(anyString()); +assertEquals("thingthing", thing.thing("thing")); +``` + +Starting with version 2.2.3, the answers capture any exceptions thrown by the method under test. +Use `answer.getExceptions()` to get a reference to them. + +When used in conjunction with the [`@RabbitListenerTest` and `RabbitListenerTestHarness`](#test-harness) use `harness.getLambdaAnswerFor("listenerId", true, …​)` to get a properly constructed answer for the listener. + +#### 4.5.3. `@RabbitListenerTest` and `RabbitListenerTestHarness` + +Annotating one of your `@Configuration` classes with `@RabbitListenerTest` causes the framework to replace the +standard `RabbitListenerAnnotationBeanPostProcessor` with a subclass called `RabbitListenerTestHarness` (it also enables`@RabbitListener` detection through `@EnableRabbit`). + +The `RabbitListenerTestHarness` enhances the listener in two ways. +First, it wraps the listener in a `Mockito Spy`, enabling normal `Mockito` stubbing and verification operations. +It can also add an `Advice` to the listener, enabling access to the arguments, result, and any exceptions that are thrown. +You can control which (or both) of these are enabled with attributes on the `@RabbitListenerTest`. +The latter is provided for access to lower-level data about the invocation. +It also supports blocking the test thread until the async listener is called. + +| |`final` `@RabbitListener` methods cannot be spied or advised.
Also, only listeners with an `id` attribute can be spied or advised.| +|---|--------------------------------------------------------------------------------------------------------------------------------------| + +Consider some examples. + +The following example uses spy: + +``` +@Configuration +@RabbitListenerTest +public class Config { + + @Bean + public Listener listener() { + return new Listener(); + } + + ... + +} + +public class Listener { + + @RabbitListener(id="foo", queues="#{queue1.name}") + public String foo(String foo) { + return foo.toUpperCase(); + } + + @RabbitListener(id="bar", queues="#{queue2.name}") + public void foo(@Payload String foo, @Header("amqp_receivedRoutingKey") String rk) { + ... + } + +} + +public class MyTests { + + @Autowired + private RabbitListenerTestHarness harness; (1) + + @Test + public void testTwoWay() throws Exception { + assertEquals("FOO", this.rabbitTemplate.convertSendAndReceive(this.queue1.getName(), "foo")); + + Listener listener = this.harness.getSpy("foo"); (2) + assertNotNull(listener); + verify(listener).foo("foo"); + } + + @Test + public void testOneWay() throws Exception { + Listener listener = this.harness.getSpy("bar"); + assertNotNull(listener); + + LatchCountDownAndCallRealMethodAnswer answer = this.harness.getLatchAnswerFor("bar", 2); (3) + doAnswer(answer).when(listener).foo(anyString(), anyString()); (4) + + this.rabbitTemplate.convertAndSend(this.queue2.getName(), "bar"); + this.rabbitTemplate.convertAndSend(this.queue2.getName(), "baz"); + + assertTrue(answer.await(10)); + verify(listener).foo("bar", this.queue2.getName()); + verify(listener).foo("baz", this.queue2.getName()); + } + +} +``` + +|**1**| Inject the harness into the test case so we can get access to the spy. | +|-----|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +|**2**| Get a reference to the spy so we can verify it was invoked as expected.
Since this is a send and receive operation, there is no need to suspend the test thread because it was already
suspended in the `RabbitTemplate` waiting for the reply. | +|**3**|In this case, we’re only using a send operation so we need a latch to wait for the asynchronous call to the listener
on the container thread.
We use one of the [Answer\](#mockito-answer) implementations to help with that.
IMPORTANT: Due to the way the listener is spied, it is important to use `harness.getLatchAnswerFor()` to get a properly configured answer for the spy.| +|**4**| Configure the spy to invoke the `Answer`. | + +The following example uses the capture advice: + +``` +@Configuration +@ComponentScan +@RabbitListenerTest(spy = false, capture = true) +public class Config { + +} + +@Service +public class Listener { + + private boolean failed; + + @RabbitListener(id="foo", queues="#{queue1.name}") + public String foo(String foo) { + return foo.toUpperCase(); + } + + @RabbitListener(id="bar", queues="#{queue2.name}") + public void foo(@Payload String foo, @Header("amqp_receivedRoutingKey") String rk) { + if (!failed && foo.equals("ex")) { + failed = true; + throw new RuntimeException(foo); + } + failed = false; + } + +} + +public class MyTests { + + @Autowired + private RabbitListenerTestHarness harness; (1) + + @Test + public void testTwoWay() throws Exception { + assertEquals("FOO", this.rabbitTemplate.convertSendAndReceive(this.queue1.getName(), "foo")); + + InvocationData invocationData = + this.harness.getNextInvocationDataFor("foo", 0, TimeUnit.SECONDS); (2) + assertThat(invocationData.getArguments()[0], equalTo("foo")); (3) + assertThat((String) invocationData.getResult(), equalTo("FOO")); + } + + @Test + public void testOneWay() throws Exception { + this.rabbitTemplate.convertAndSend(this.queue2.getName(), "bar"); + this.rabbitTemplate.convertAndSend(this.queue2.getName(), "baz"); + this.rabbitTemplate.convertAndSend(this.queue2.getName(), "ex"); + + InvocationData invocationData = + this.harness.getNextInvocationDataFor("bar", 10, TimeUnit.SECONDS); (4) + Object[] args = invocationData.getArguments(); + assertThat((String) args[0], equalTo("bar")); + assertThat((String) args[1], equalTo(queue2.getName())); + + invocationData = this.harness.getNextInvocationDataFor("bar", 10, TimeUnit.SECONDS); + args = invocationData.getArguments(); + assertThat((String) args[0], equalTo("baz")); + + invocationData = this.harness.getNextInvocationDataFor("bar", 10, TimeUnit.SECONDS); + args = invocationData.getArguments(); + assertThat((String) args[0], equalTo("ex")); + assertEquals("ex", invocationData.getThrowable().getMessage()); (5) + } + +} +``` + +|**1**| Inject the harness into the test case so we can get access to the spy. | +|-----|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +|**2**|Use `harness.getNextInvocationDataFor()` to retrieve the invocation data - in this case since it was a request/reply
scenario there is no need to wait for any time because the test thread was suspended in the `RabbitTemplate` waiting
for the result.| +|**3**| We can then verify that the argument and result was as expected. | +|**4**| This time we need some time to wait for the data, since it’s an async operation on the container thread and we need
to suspend the test thread. | +|**5**| When the listener throws an exception, it is available in the `throwable` property of the invocation data. | + +| |When using custom `Answer` s with the harness, in order to operate properly, such answers should subclass `ForwardsInvocation` and get the actual listener (not the spy) from the harness (`getDelegate("myListener")`) and call `super.answer(invocation)`.
See the provided [Mockito `Answer` Implementations](#mockito-answer) source code for examples.| +|---|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +#### 4.5.4. Using `TestRabbitTemplate` + +The `TestRabbitTemplate` is provided to perform some basic integration testing without the need for a broker. +When you add it as a `@Bean` in your test case, it discovers all the listener containers in the context, whether declared as `@Bean` or `` or using the `@RabbitListener` annotation. +It currently only supports routing by queue name. +The template extracts the message listener from the container and invokes it directly on the test thread. +Request-reply messaging (`sendAndReceive` methods) is supported for listeners that return replies. + +The following test case uses the template: + +``` +@RunWith(SpringRunner.class) +public class TestRabbitTemplateTests { + + @Autowired + private TestRabbitTemplate template; + + @Autowired + private Config config; + + @Test + public void testSimpleSends() { + this.template.convertAndSend("foo", "hello1"); + assertThat(this.config.fooIn, equalTo("foo:hello1")); + this.template.convertAndSend("bar", "hello2"); + assertThat(this.config.barIn, equalTo("bar:hello2")); + assertThat(this.config.smlc1In, equalTo("smlc1:")); + this.template.convertAndSend("foo", "hello3"); + assertThat(this.config.fooIn, equalTo("foo:hello1")); + this.template.convertAndSend("bar", "hello4"); + assertThat(this.config.barIn, equalTo("bar:hello2")); + assertThat(this.config.smlc1In, equalTo("smlc1:hello3hello4")); + + this.template.setBroadcast(true); + this.template.convertAndSend("foo", "hello5"); + assertThat(this.config.fooIn, equalTo("foo:hello1foo:hello5")); + this.template.convertAndSend("bar", "hello6"); + assertThat(this.config.barIn, equalTo("bar:hello2bar:hello6")); + assertThat(this.config.smlc1In, equalTo("smlc1:hello3hello4hello5hello6")); + } + + @Test + public void testSendAndReceive() { + assertThat(this.template.convertSendAndReceive("baz", "hello"), equalTo("baz:hello")); + } +``` + +``` + @Configuration + @EnableRabbit + public static class Config { + + public String fooIn = ""; + + public String barIn = ""; + + public String smlc1In = "smlc1:"; + + @Bean + public TestRabbitTemplate template() throws IOException { + return new TestRabbitTemplate(connectionFactory()); + } + + @Bean + public ConnectionFactory connectionFactory() throws IOException { + ConnectionFactory factory = mock(ConnectionFactory.class); + Connection connection = mock(Connection.class); + Channel channel = mock(Channel.class); + willReturn(connection).given(factory).createConnection(); + willReturn(channel).given(connection).createChannel(anyBoolean()); + given(channel.isOpen()).willReturn(true); + return factory; + } + + @Bean + public SimpleRabbitListenerContainerFactory rabbitListenerContainerFactory() throws IOException { + SimpleRabbitListenerContainerFactory factory = new SimpleRabbitListenerContainerFactory(); + factory.setConnectionFactory(connectionFactory()); + return factory; + } + + @RabbitListener(queues = "foo") + public void foo(String in) { + this.fooIn += "foo:" + in; + } + + @RabbitListener(queues = "bar") + public void bar(String in) { + this.barIn += "bar:" + in; + } + + @RabbitListener(queues = "baz") + public String baz(String in) { + return "baz:" + in; + } + + @Bean + public SimpleMessageListenerContainer smlc1() throws IOException { + SimpleMessageListenerContainer container = new SimpleMessageListenerContainer(connectionFactory()); + container.setQueueNames("foo", "bar"); + container.setMessageListener(new MessageListenerAdapter(new Object() { + + @SuppressWarnings("unused") + public void handleMessage(String in) { + smlc1In += in; + } + + })); + return container; + } + + } + +} +``` + +#### 4.5.5. JUnit4 `@Rules` + +Spring AMQP version 1.7 and later provide an additional jar called `spring-rabbit-junit`. +This jar contains a couple of utility `@Rule` instances for use when running JUnit4 tests. +See [JUnit5 Conditions](#junit5-conditions) for JUnit5 testing. + +##### Using `BrokerRunning` + +`BrokerRunning` provides a mechanism to let tests succeed when a broker is not running (on `localhost`, by default). + +It also has utility methods to initialize and empty queues and delete queues and exchanges. + +The following example shows its usage: + +``` +@ClassRule +public static BrokerRunning brokerRunning = BrokerRunning.isRunningWithEmptyQueues("foo", "bar"); + +@AfterClass +public static void tearDown() { + brokerRunning.removeTestQueues("some.other.queue.too") // removes foo, bar as well +} +``` + +There are several `isRunning…​` static methods, such as `isBrokerAndManagementRunning()`, which verifies the broker has the management plugin enabled. + +###### Configuring the Rule + +There are times when you want tests to fail if there is no broker, such as a nightly CI build. +To disable the rule at runtime, set an environment variable called `RABBITMQ_SERVER_REQUIRED` to `true`. + +You can override the broker properties, such as hostname with either setters or environment variables: + +The following example shows how to override properties with setters: + +``` +@ClassRule +public static BrokerRunning brokerRunning = BrokerRunning.isRunningWithEmptyQueues("foo", "bar"); + +static { + brokerRunning.setHostName("10.0.0.1") +} + +@AfterClass +public static void tearDown() { + brokerRunning.removeTestQueues("some.other.queue.too") // removes foo, bar as well +} +``` + +You can also override properties by setting the following environment variables: + +``` +public static final String BROKER_ADMIN_URI = "RABBITMQ_TEST_ADMIN_URI"; +public static final String BROKER_HOSTNAME = "RABBITMQ_TEST_HOSTNAME"; +public static final String BROKER_PORT = "RABBITMQ_TEST_PORT"; +public static final String BROKER_USER = "RABBITMQ_TEST_USER"; +public static final String BROKER_PW = "RABBITMQ_TEST_PASSWORD"; +public static final String BROKER_ADMIN_USER = "RABBITMQ_TEST_ADMIN_USER"; +public static final String BROKER_ADMIN_PW = "RABBITMQ_TEST_ADMIN_PASSWORD"; +``` + +These environment variables override the default settings (`localhost:5672` for amqp and `[localhost:15672/api/](http://localhost:15672/api/)` for the management REST API). + +Changing the host name affects both the `amqp` and `management` REST API connection (unless the admin uri is explicitly set). + +`BrokerRunning` also provides a `static` method called `setEnvironmentVariableOverrides` that lets you can pass in a map containing these variables. +They override system environment variables. +This might be useful if you wish to use different configuration for tests in multiple test suites. +IMPORTANT: The method must be called before invoking any of the `isRunning()` static methods that create the rule instance. +Variable values are applied to all instances created after this invocation. +Invoke `clearEnvironmentVariableOverrides()` to reset the rule to use defaults (including any actual environment variables). + +In your test cases, you can use the `brokerRunning` when creating the connection factory; `getConnectionFactory()` returns the rule’s RabbitMQ `ConnectionFactory`. +The following example shows how to do so: + +``` +@Bean +public CachingConnectionFactory rabbitConnectionFactory() { + return new CachingConnectionFactory(brokerRunning.getConnectionFactory()); +} +``` + +##### Using `LongRunningIntegrationTest` + +`LongRunningIntegrationTest` is a rule that disables long running tests. +You might want to use this on a developer system but ensure that the rule is disabled on, for example, nightly CI builds. + +The following example shows its usage: + +``` +@Rule +public LongRunningIntegrationTest longTests = new LongRunningIntegrationTest(); +``` + +To disable the rule at runtime, set an environment variable called `RUN_LONG_INTEGRATION_TESTS` to `true`. + +#### 4.5.6. JUnit5 Conditions + +Version 2.0.2 introduced support for JUnit5. + +##### Using the `@RabbitAvailable` Annotation + +This class-level annotation is similar to the `BrokerRunning` `@Rule` discussed in [JUnit4 `@Rules`](#junit-rules). +It is processed by the `RabbitAvailableCondition`. + +The annotation has three properties: + +* `queues`: An array of queues that are declared (and purged) before each test and deleted when all tests are complete. + +* `management`: Set this to `true` if your tests also require the management plugin installed on the broker. + +* `purgeAfterEach`: (Since version 2.2) when `true` (default), the `queues` will be purged between tests. + +It is used to check whether the broker is available and skip the tests if not. +As discussed in [Configuring the Rule](#brokerRunning-configure), the environment variable called `RABBITMQ_SERVER_REQUIRED`, if `true`, causes the tests to fail fast if there is no broker. +You can configure the condition by using environment variables as discussed in [Configuring the Rule](#brokerRunning-configure). + +In addition, the `RabbitAvailableCondition` supports argument resolution for parameterized test constructors and methods. +Two argument types are supported: + +* `BrokerRunningSupport`: The instance (before 2.2, this was a JUnit 4 `BrokerRunning` instance) + +* `ConnectionFactory`: The `BrokerRunningSupport` instance’s RabbitMQ connection factory + +The following example shows both: + +``` +@RabbitAvailable(queues = "rabbitAvailableTests.queue") +public class RabbitAvailableCTORInjectionTests { + + private final ConnectionFactory connectionFactory; + + public RabbitAvailableCTORInjectionTests(BrokerRunningSupport brokerRunning) { + this.connectionFactory = brokerRunning.getConnectionFactory(); + } + + @Test + public void test(ConnectionFactory cf) throws Exception { + assertSame(cf, this.connectionFactory); + Connection conn = this.connectionFactory.newConnection(); + Channel channel = conn.createChannel(); + DeclareOk declareOk = channel.queueDeclarePassive("rabbitAvailableTests.queue"); + assertEquals(0, declareOk.getConsumerCount()); + channel.close(); + conn.close(); + } + +} +``` + +The preceding test is in the framework itself and verifies the argument injection and that the condition created the queue properly. + +A practical user test might be as follows: + +``` +@RabbitAvailable(queues = "rabbitAvailableTests.queue") +public class RabbitAvailableCTORInjectionTests { + + private final CachingConnectionFactory connectionFactory; + + public RabbitAvailableCTORInjectionTests(BrokerRunningSupport brokerRunning) { + this.connectionFactory = + new CachingConnectionFactory(brokerRunning.getConnectionFactory()); + } + + @Test + public void test() throws Exception { + RabbitTemplate template = new RabbitTemplate(this.connectionFactory); + ... + } +} +``` + +When you use a Spring annotation application context within a test class, you can get a reference to the condition’s connection factory through a static method called `RabbitAvailableCondition.getBrokerRunning()`. + +| |Starting with version 2.2, `getBrokerRunning()` returns a `BrokerRunningSupport` object; previously, the JUnit 4 `BrokerRunnning` instance was returned.
The new class has the same API as `BrokerRunning`.| +|---|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +The following test comes from the framework and demonstrates the usage: + +``` +@RabbitAvailable(queues = { + RabbitTemplateMPPIntegrationTests.QUEUE, + RabbitTemplateMPPIntegrationTests.REPLIES }) +@SpringJUnitConfig +@DirtiesContext(classMode = ClassMode.AFTER_EACH_TEST_METHOD) +public class RabbitTemplateMPPIntegrationTests { + + public static final String QUEUE = "mpp.tests"; + + public static final String REPLIES = "mpp.tests.replies"; + + @Autowired + private RabbitTemplate template; + + @Autowired + private Config config; + + @Test + public void test() { + + ... + + } + + @Configuration + @EnableRabbit + public static class Config { + + @Bean + public CachingConnectionFactory cf() { + return new CachingConnectionFactory(RabbitAvailableCondition + .getBrokerRunning() + .getConnectionFactory()); + } + + @Bean + public RabbitTemplate template() { + + ... + + } + + @Bean + public SimpleRabbitListenerContainerFactory + rabbitListenerContainerFactory() { + + ... + + } + + @RabbitListener(queues = QUEUE) + public byte[] foo(byte[] in) { + return in; + } + + } + +} +``` + +##### Using the `@LongRunning` Annotation + +Similar to the `LongRunningIntegrationTest` JUnit4 `@Rule`, this annotation causes tests to be skipped unless an environment variable (or system property) is set to `true`. +The following example shows how to use it: + +``` +@RabbitAvailable(queues = SimpleMessageListenerContainerLongTests.QUEUE) +@LongRunning +public class SimpleMessageListenerContainerLongTests { + + public static final String QUEUE = "SimpleMessageListenerContainerLongTests.queue"; + +... + +} +``` + +By default, the variable is `RUN_LONG_INTEGRATION_TESTS`, but you can specify the variable name in the annotation’s `value` attribute. + +## 5. Spring Integration - Reference + +This part of the reference documentation provides a quick introduction to the AMQP support within the Spring Integration project. + +### 5.1. Spring Integration AMQP Support + +This brief chapter covers the relationship between the Spring Integration and the Spring AMQP projects. + +#### 5.1.1. Introduction + +The [Spring Integration](https://www.springsource.org/spring-integration) project includes AMQP Channel Adapters and Gateways that build upon the Spring AMQP project. +Those adapters are developed and released in the Spring Integration project. +In Spring Integration, “Channel Adapters” are unidirectional (one-way), whereas “Gateways” are bidirectional (request-reply). +We provide an inbound-channel-adapter, an outbound-channel-adapter, an inbound-gateway, and an outbound-gateway. + +Since the AMQP adapters are part of the Spring Integration release, the documentation is available as part of the Spring Integration distribution. +We provide a quick overview of the main features here. +See the [Spring Integration Reference Guide](https://docs.spring.io/spring-integration/reference/htmlsingle/) for much more detail. + +#### 5.1.2. Inbound Channel Adapter + +To receive AMQP Messages from a queue, you can configure an ``. +The following example shows how to configure an inbound channel adapter: + +``` + +``` + +#### 5.1.3. Outbound Channel Adapter + +To send AMQP Messages to an exchange, you can configure an ``. +You can optionally provide a 'routing-key' in addition to the exchange name. +The following example shows how to define an outbound channel adapter: + +``` + +``` + +#### 5.1.4. Inbound Gateway + +To receive an AMQP Message from a queue and respond to its reply-to address, you can configure an ``. +The following example shows how to define an inbound gateway: + +``` + +``` + +#### 5.1.5. Outbound Gateway + +To send AMQP Messages to an exchange and receive back a response from a remote client, you can configure an ``. +You can optionally provide a 'routing-key' in addition to the exchange name. +The following example shows how to define an outbound gateway: + +``` + +``` + +## 6. Other Resources + +In addition to this reference documentation, there exist a number of other resources that may help you learn about AMQP. + +### 6.1. Further Reading + +For those who are not familiar with AMQP, the [specification](https://www.amqp.org/resources/download) is actually quite readable. +It is, of course, the authoritative source of information, and the Spring AMQP code should be easy to understand for anyone who is familiar with the spec. +Our current implementation of the RabbitMQ support is based on their 2.8.x version, and it officially supports AMQP 0.8 and 0.9.1. +We recommend reading the 0.9.1 document. + +There are many great articles, presentations, and blogs available on the RabbitMQ [Getting Started](https://www.rabbitmq.com/how.html) page. +Since that is currently the only supported implementation for Spring AMQP, we also recommend that as a general starting point for all broker-related concerns. + +## Appendix A: Change History + +This section describes what changes have been made as versions have changed. + +### A.1. Current Release + +See [What’s New](#whats-new). + +### A.2. Previous Releases + +#### A.2.1. Changes in 2.3 Since 2.2 + +This section describes the changes between version 2.2 and version 2.3. +See [Change History](#change-history) for changes in previous versions. + +##### Connection Factory Changes + +Two additional connection factories are now provided. +See [Choosing a Connection Factory](#choosing-factory) for more information. + +##### `@RabbitListener` Changes + +You can now specify a reply content type. +See [Reply ContentType](#reply-content-type) for more information. + +##### Message Converter Changes + +The `Jackson2JMessageConverter` s can now deserialize abstract classes (including interfaces) if the `ObjectMapper` is configured with a custom deserializer. +See [Deserializing Abstract Classes](#jackson-abstract) for more information. + +##### Testing Changes + +A new annotation `@SpringRabbitTest` is provided to automatically configure some infrastructure beans for when you are not using `SpringBootTest`. +See [@SpringRabbitTest](#spring-rabbit-test) for more information. + +##### RabbitTemplate Changes + +The template’s `ReturnCallback` has been refactored as `ReturnsCallback` for simpler use in lambda expressions. +See [Correlated Publisher Confirms and Returns](#template-confirms) for more information. + +When using returns and correlated confirms, the `CorrelationData` now requires a unique `id` property. +See [Correlated Publisher Confirms and Returns](#template-confirms) for more information. + +When using direct reply-to, you can now configure the template such that the server does not need to return correlation data with the reply. +See [RabbitMQ Direct reply-to](#direct-reply-to) for more information. + +##### Listener Container Changes + +A new listener container property `consumeDelay` is now available; it is helpful when using the [RabbitMQ Sharding Plugin](https://github.com/rabbitmq/rabbitmq-sharding). + +The default `JavaLangErrorHandler` now calls `System.exit(99)`. +To revert to the previous behavior (do nothing), add a no-op handler. + +The containers now support the `globalQos` property to apply the `prefetchCount` globally for the channel rather than for each consumer on the channel. + +See [Message Listener Container Configuration](#containerAttributes) for more information. + +##### MessagePostProcessor Changes + +The compressing `MessagePostProcessor` s now use a comma to separate multiple content encodings instead of a colon. +The decompressors can handle both formats but, if you produce messages with this version that are consumed by versions earlier than 2.2.12, you should configure the compressor to use the old delimiter. +See the IMPORTANT note in [Modifying Messages - Compression and More](#post-processing) for more information. + +##### Multiple Broker Support Improvements + +See [Multiple Broker (or Cluster) Support](#multi-rabbit) for more information. + +##### RepublishMessageRecoverer Changes + +A new subclass of this recoverer is not provided that supports publisher confirms. +See [Message Listeners and the Asynchronous Case](#async-listeners) for more information. + +#### A.2.2. Changes in 2.2 Since 2.1 + +This section describes the changes between version 2.1 and version 2.2. + +##### Package Changes + +The following classes/interfaces have been moved from `org.springframework.amqp.rabbit.core.support` to `org.springframework.amqp.rabbit.batch`: + +* `BatchingStrategy` + +* `MessageBatch` + +* `SimpleBatchingStrategy` + +In addition, `ListenerExecutionFailedException` has been moved from `org.springframework.amqp.rabbit.listener.exception` to `org.springframework.amqp.rabbit.support`. + +##### Dependency Changes + +JUnit (4) is now an optional dependency and will no longer appear as a transitive dependency. + +The `spring-rabbit-junit` module is now a **compile** dependency in the `spring-rabbit-test` module for a better target application development experience when with only a single `spring-rabbit-test` we get the full stack of testing utilities for AMQP components. + +##### "Breaking" API Changes + +the JUnit (5) `RabbitAvailableCondition.getBrokerRunning()` now returns a `BrokerRunningSupport` instance instead of a `BrokerRunning`, which depends on JUnit 4. +It has the same API so it’s just a matter of changing the class name of any references. +See [JUnit5 Conditions](#junit5-conditions) for more information. + +##### ListenerContainer Changes + +Messages with fatal exceptions are now rejected and NOT requeued, by default, even if the acknowledge mode is manual. +See [Exception Handling](#exception-handling) for more information. + +Listener performance can now be monitored using Micrometer `Timer` s. +See [Monitoring Listener Performance](#micrometer) for more information. + +##### @RabbitListener Changes + +You can now configure an `executor` on each listener, overriding the factory configuration, to more easily identify threads associated with the listener. +You can now override the container factory’s `acknowledgeMode` property with the annotation’s `ackMode` property. +See [overriding container factory properties](#listener-property-overrides) for more information. + +When using [batching](#receiving-batch), `@RabbitListener` methods can now receive a complete batch of messages in one call instead of getting them one-at-a-time. + +When receiving batched messages one-at-a-time, the last message has the `isLastInBatch` message property set to true. + +In addition, received batched messages now contain the `amqp_batchSize` header. + +Listeners can also consume batches created in the `SimpleMessageListenerContainer`, even if the batch is not created by the producer. +See [Choosing a Container](#choose-container) for more information. + +Spring Data Projection interfaces are now supported by the `Jackson2JsonMessageConverter`. +See [Using Spring Data Projection Interfaces](#data-projection) for more information. + +The `Jackson2JsonMessageConverter` now assumes the content is JSON if there is no `contentType` property, or it is the default (`application/octet-string`). +See [Converting from a `Message`](#Jackson2JsonMessageConverter-from-message) for more information. + +Similarly. the `Jackson2XmlMessageConverter` now assumes the content is XML if there is no `contentType` property, or it is the default (`application/octet-string`). +See [`Jackson2XmlMessageConverter`](#jackson2xml) for more information. + +When a `@RabbitListener` method returns a result, the bean and `Method` are now available in the reply message properties. +This allows configuration of a `beforeSendReplyMessagePostProcessor` to, for example, set a header in the reply to indicate which method was invoked on the server. +See [Reply Management](#async-annotation-driven-reply) for more information. + +You can now configure a `ReplyPostProcessor` to make modifications to a reply message before it is sent. +See [Reply Management](#async-annotation-driven-reply) for more information. + +##### AMQP Logging Appenders Changes + +The Log4J and Logback `AmqpAppender` s now support a `verifyHostname` SSL option. + +Also these appenders now can be configured to not add MDC entries as headers. +The `addMdcAsHeaders` boolean option has been introduces to configure such a behavior. + +The appenders now support the `SaslConfig` property. + +See [Logging Subsystem AMQP Appenders](#logging) for more information. + +##### MessageListenerAdapter Changes + +The `MessageListenerAdapter` provides now a new `buildListenerArguments(Object, Channel, Message)` method to build an array of arguments to be passed into target listener and an old one is deprecated. +See [`MessageListenerAdapter`](#message-listener-adapter) for more information. + +##### Exchange/Queue Declaration Changes + +The `ExchangeBuilder` and `QueueBuilder` fluent APIs used to create `Exchange` and `Queue` objects for declaration by `RabbitAdmin` now support "well known" arguments. +See [Builder API for Queues and Exchanges](#builder-api) for more information. + +The `RabbitAdmin` has a new property `explicitDeclarationsOnly`. +See [Conditional Declaration](#conditional-declaration) for more information. + +##### Connection Factory Changes + +The `CachingConnectionFactory` has a new property `shuffleAddresses`. +When providing a list of broker node addresses, the list will be shuffled before creating a connection so that the order in which the connections are attempted is random. +See [Connecting to a Cluster](#cluster) for more information. + +When using Publisher confirms and returns, the callbacks are now invoked on the connection factory’s `executor`. +This avoids a possible deadlock in the `amqp-clients` library if you perform rabbit operations from within the callback. +See [Correlated Publisher Confirms and Returns](#template-confirms) for more information. + +Also, the publisher confirm type is now specified with the `ConfirmType` enum instead of the two mutually exclusive setter methods. + +The `RabbitConnectionFactoryBean` now uses TLS 1.2 by default when SSL is enabled. +See [`RabbitConnectionFactoryBean` and Configuring SSL](#rabbitconnectionfactorybean-configuring-ssl) for more information. + +##### New MessagePostProcessor Classes + +Classes `DeflaterPostProcessor` and `InflaterPostProcessor` were added to support compression and decompression, respectively, when the message content-encoding is set to `deflate`. + +##### Other Changes + +The `Declarables` object (for declaring multiple queues, exchanges, bindings) now has a filtered getter for each type. +See [Declaring Collections of Exchanges, Queues, and Bindings](#collection-declaration) for more information. + +You can now customize each `Declarable` bean before the `RabbitAdmin` processes the declaration thereof. +See [Automatic Declaration of Exchanges, Queues, and Bindings](#automatic-declaration) for more information. + +`singleActiveConsumer()` has been added to the `QueueBuilder` to set the `x-single-active-consumer` queue argument. +See [Builder API for Queues and Exchanges](#builder-api) for more information. + +Outbound headers with values of type `Class` are now mapped using `getName()` instead of `toString()`. +See [Message Properties Converters](#message-properties-converters) for more information. + +Recovery of failed producer-created batches is now supported. +See [Retry with Batch Listeners](#batch-retry) for more information. + +#### A.2.3. Changes in 2.1 Since 2.0 + +##### AMQP Client library + +Spring AMQP now uses the 5.4.x version of the `amqp-client` library provided by the RabbitMQ team. +This client has auto-recovery configured by default. +See [RabbitMQ Automatic Connection/Topology recovery](#auto-recovery). + +| |As of version 4.0, the client enables automatic recovery by default.
While compatible with this feature, Spring AMQP has its own recovery mechanisms and the client recovery feature generally is not needed.
We recommend disabling `amqp-client` automatic recovery, to avoid getting `AutoRecoverConnectionNotCurrentlyOpenException` instances when the broker is available but the connection has not yet recovered.
Starting with version 1.7.1, Spring AMQP disables it unless you explicitly create your own RabbitMQ connection factory and provide it to the `CachingConnectionFactory`.
RabbitMQ `ConnectionFactory` instances created by the `RabbitConnectionFactoryBean` also have the option disabled by default.| +|---|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +##### Package Changes + +Certain classes have moved to different packages. +Most are internal classes and do not affect user applications. +Two exceptions are `ChannelAwareMessageListener` and `RabbitListenerErrorHandler`. +These interfaces are now in `org.springframework.amqp.rabbit.listener.api`. + +##### Publisher Confirms Changes + +Channels enabled for publisher confirmations are not returned to the cache while there are outstanding confirmations. +See [Correlated Publisher Confirms and Returns](#template-confirms) for more information. + +##### Listener Container Factory Improvements + +You can now use the listener container factories to create any listener container, not only those for use with `@RabbitListener` annotations or the `@RabbitListenerEndpointRegistry`. +See [Using Container Factories](#using-container-factories) for more information. + +`ChannelAwareMessageListener` now inherits from `MessageListener`. + +##### Broker Event Listener + +A `BrokerEventListener` is introduced to publish selected broker events as `ApplicationEvent` instances. +See [Broker Event Listener](#broker-events) for more information. + +##### RabbitAdmin Changes + +The `RabbitAdmin` discovers beans of type `Declarables` (which is a container for `Declarable` - `Queue`, `Exchange`, and `Binding` objects) and declare the contained objects on the broker. +Users are discouraged from using the old mechanism of declaring `>` (and others) and should use `Declarables` beans instead. +By default, the old mechanism is disabled. +See [Declaring Collections of Exchanges, Queues, and Bindings](#collection-declaration) for more information. + +`AnonymousQueue` instances are now declared with `x-queue-master-locator` set to `client-local` by default, to ensure the queues are created on the node the application is connected to. +See [Configuring the Broker](#broker-configuration) for more information. + +##### RabbitTemplate Changes + +You can now configure the `RabbitTemplate` with the `noLocalReplyConsumer` option to control a `noLocal` flag for reply consumers in the `sendAndReceive()` operations. +See [Request/Reply Messaging](#request-reply) for more information. + +`CorrelationData` for publisher confirmations now has a `ListenableFuture`, which you can use to get the acknowledgment instead of using a callback. +When returns and confirmations are enabled, the correlation data, if provided, is populated with the returned message. +See [Correlated Publisher Confirms and Returns](#template-confirms) for more information. + +A method called `replyTimedOut` is now provided to notify subclasses that a reply has timed out, allowing for any state cleanup. +See [Reply Timeout](#reply-timeout) for more information. + +You can now specify an `ErrorHandler` to be invoked when using request/reply with a `DirectReplyToMessageListenerContainer` (the default) when exceptions occur when replies are delivered (for example, late replies). +See `setReplyErrorHandler` on the `RabbitTemplate`. +(Also since 2.0.11). + +##### Message Conversion + +We introduced a new `Jackson2XmlMessageConverter` to support converting messages from and to XML format. +See [`Jackson2XmlMessageConverter`](#jackson2xml) for more information. + +##### Management REST API + +The `RabbitManagementTemplate` is now deprecated in favor of the direct `com.rabbitmq.http.client.Client` (or `com.rabbitmq.http.client.ReactorNettyClient`) usage. +See [RabbitMQ REST API](#management-rest-api) for more information. + +##### `@RabbitListener` Changes + +The listener container factory can now be configured with a `RetryTemplate` and, optionally, a `RecoveryCallback` used when sending replies. +See [Enable Listener Endpoint Annotations](#async-annotation-driven-enable) for more information. + +##### Async `@RabbitListener` Return + +`@RabbitListener` methods can now return `ListenableFuture` or `Mono`. +See [Asynchronous `@RabbitListener` Return Types](#async-returns) for more information. + +##### Connection Factory Bean Changes + +By default, the `RabbitConnectionFactoryBean` now calls `enableHostnameVerification()`. +To revert to the previous behavior, set the `enableHostnameVerification` property to `false`. + +##### Connection Factory Changes + +The `CachingConnectionFactory` now unconditionally disables auto-recovery in the underlying RabbitMQ `ConnectionFactory`, even if a pre-configured instance is provided in a constructor. +While steps have been taken to make Spring AMQP compatible with auto recovery, certain corner cases have arisen where issues remain. +Spring AMQP has had its own recovery mechanism since 1.0.0 and does not need to use the recovery provided by the client. +While it is still possible to enable the feature (using `cachingConnectionFactory.getRabbitConnectionFactory()` `.setAutomaticRecoveryEnabled()`) after the `CachingConnectionFactory` is constructed, **we strongly recommend that you not do so**. +We recommend that you use a separate RabbitMQ `ConnectionFactory` if you need auto recovery connections when using the client factory directly (rather than using Spring AMQP components). + +##### Listener Container Changes + +The default `ConditionalRejectingErrorHandler` now completely discards messages that cause fatal errors if an `x-death` header is present. +See [Exception Handling](#exception-handling) for more information. + +##### Immediate requeue + +A new `ImmediateRequeueAmqpException` is introduced to notify a listener container that the message has to be re-queued. +To use this feature, a new `ImmediateRequeueMessageRecoverer` implementation is added. + +See [Message Listeners and the Asynchronous Case](#async-listeners) for more information. + +#### A.2.4. Changes in 2.0 Since 1.7 + +##### Using `CachingConnectionFactory` + +Starting with version 2.0.2, you can configure the `RabbitTemplate` to use a different connection to that used by listener containers. +This change avoids deadlocked consumers when producers are blocked for any reason. +See [Using a Separate Connection](#separate-connection) for more information. + +##### AMQP Client library + +Spring AMQP now uses the new 5.0.x version of the `amqp-client` library provided by the RabbitMQ team. +This client has auto recovery configured by default. +See [RabbitMQ Automatic Connection/Topology recovery](#auto-recovery). + +| |As of version 4.0, the client enables automatic recovery by default.
While compatible with this feature, Spring AMQP has its own recovery mechanisms, and the client recovery feature generally is not needed.
We recommend that you disable `amqp-client` automatic recovery, to avoid getting `AutoRecoverConnectionNotCurrentlyOpenException` instances when the broker is available but the connection has not yet recovered.
Starting with version 1.7.1, Spring AMQP disables it unless you explicitly create your own RabbitMQ connection factory and provide it to the `CachingConnectionFactory`.
RabbitMQ `ConnectionFactory` instances created by the `RabbitConnectionFactoryBean` also have the option disabled by default.| +|---|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +##### General Changes + +The `ExchangeBuilder` now builds durable exchanges by default. +The `@Exchange` annotation used within a `@QeueueBinding` also declares durable exchanges by default. +The `@Queue` annotation used within a `@RabbitListener` by default declares durable queues if named and non-durable if anonymous. +See [Builder API for Queues and Exchanges](#builder-api) and [Annotation-driven Listener Endpoints](#async-annotation-driven) for more information. + +##### Deleted Classes + +`UniquelyNameQueue` is no longer provided. +It is unusual to create a durable non-auto-delete queue with a unique name. +This class has been deleted. +If you require its functionality, use `new Queue(UUID.randomUUID().toString())`. + +##### New Listener Container + +The `DirectMessageListenerContainer` has been added alongside the existing `SimpleMessageListenerContainer`. +See [Choosing a Container](#choose-container) and [Message Listener Container Configuration](#containerAttributes) for information about choosing which container to use as well as how to configure them. + +##### Log4j Appender + +This appender is no longer available due to the end-of-life of log4j. +See [Logging Subsystem AMQP Appenders](#logging) for information about the available log appenders. + +##### `RabbitTemplate` Changes + +| |Previously, a non-transactional `RabbitTemplate` participated in an existing transaction if it ran on a transactional listener container thread.
This was a serious bug.
However, users might have relied on this behavior.
Starting with version 1.6.2, you must set the `channelTransacted` boolean on the template for it to participate in the container transaction.| +|---|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +The `RabbitTemplate` now uses a `DirectReplyToMessageListenerContainer` (by default) instead of creating a new consumer for each request. +See [RabbitMQ Direct reply-to](#direct-reply-to) for more information. + +The `AsyncRabbitTemplate` now supports direct reply-to. +See [Async Rabbit Template](#async-template) for more information. + +The `RabbitTemplate` and `AsyncRabbitTemplate` now have `receiveAndConvert` and `convertSendAndReceiveAsType` methods that take a `ParameterizedTypeReference` argument, letting the caller specify the type to which to convert the result. +This is particularly useful for complex types or when type information is not conveyed in message headers. +It requires a `SmartMessageConverter` such as the `Jackson2JsonMessageConverter`. +See [Receiving Messages](#receiving-messages), [Request/Reply Messaging](#request-reply), [Async Rabbit Template](#async-template), and [Converting From a `Message` With `RabbitTemplate`](#json-complex) for more information. + +You can now use a `RabbitTemplate` to perform multiple operations on a dedicated channel. +See [Scoped Operations](#scoped-operations) for more information. + +##### Listener Adapter + +A convenient `FunctionalInterface` is available for using lambdas with the `MessageListenerAdapter`. +See [`MessageListenerAdapter`](#message-listener-adapter) for more information. + +##### Listener Container Changes + +###### Prefetch Default Value + +The prefetch default value used to be 1, which could lead to under-utilization of efficient consumers. +The default prefetch value is now 250, which should keep consumers busy in most common scenarios and, +thus, improve throughput. + +| |There are scenarios where the prefetch value should
be low — for example, with large messages, especially if the processing is slow (messages could add up
to a large amount of memory in the client process), and if strict message ordering is necessary
(the prefetch value should be set back to 1 in this case).
Also, with low-volume messaging and multiple consumers (including concurrency within a single listener container instance), you may wish to reduce the prefetch to get a more even distribution of messages across consumers.| +|---|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +For more background about prefetch, see this post about [consumer utilization in RabbitMQ](https://www.rabbitmq.com/blog/2014/04/14/finding-bottlenecks-with-rabbitmq-3-3/)and this post about [queuing theory](https://www.rabbitmq.com/blog/2012/05/11/some-queuing-theory-throughput-latency-and-bandwidth/). + +###### Message Count + +Previously, `MessageProperties.getMessageCount()` returned `0` for messages emitted by the container. +This property applies only when you use `basicGet` (for example, from `RabbitTemplate.receive()` methods) and is now initialized to `null` for container messages. + +###### Transaction Rollback Behavior + +Message re-queue on transaction rollback is now consistent, regardless of whether or not a transaction manager is configured. +See [A note on Rollback of Received Messages](#transaction-rollback) for more information. + +###### Shutdown Behavior + +If the container threads do not respond to a shutdown within `shutdownTimeout`, the channels are forced closed by default. +See [Message Listener Container Configuration](#containerAttributes) for more information. + +###### After Receive Message Post Processors + +If a `MessagePostProcessor` in the `afterReceiveMessagePostProcessors` property returns `null`, the message is discarded (and acknowledged if appropriate). + +##### Connection Factory Changes + +The connection and channel listener interfaces now provide a mechanism to obtain information about exceptions. +See [Connection and Channel Listeners](#connection-channel-listeners) and [Publishing is Asynchronous — How to Detect Successes and Failures](#publishing-is-async) for more information. + +A new `ConnectionNameStrategy` is now provided to populate the application-specific identification of the target RabbitMQ connection from the `AbstractConnectionFactory`. +See [Connection and Resource Management](#connections) for more information. + +##### Retry Changes + +The `MissingMessageIdAdvice` is no longer provided. +Its functionality is now built-in. +See [Failures in Synchronous Operations and Options for Retry](#retry) for more information. + +##### Anonymous Queue Naming + +By default, `AnonymousQueues` are now named with the default `Base64UrlNamingStrategy` instead of a simple `UUID` string. +See [`AnonymousQueue`](#anonymous-queue) for more information. + +##### `@RabbitListener` Changes + +You can now provide simple queue declarations (bound only to the default exchange) in `@RabbitListener` annotations. +See [Annotation-driven Listener Endpoints](#async-annotation-driven) for more information. + +You can now configure `@RabbitListener` annotations so that any exceptions are returned to the sender. +You can also configure a `RabbitListenerErrorHandler` to handle exceptions. +See [Handling Exceptions](#annotation-error-handling) for more information. + +You can now bind a queue with multiple routing keys when you use the `@QueueBinding` annotation. +Also `@QueueBinding.exchange()` now supports custom exchange types and declares durable exchanges by default. + +You can now set the `concurrency` of the listener container at the annotation level rather than having to configure a different container factory for different concurrency settings. + +You can now set the `autoStartup` property of the listener container at the annotation level, overriding the default setting in the container factory. + +You can now set after receive and before send (reply) `MessagePostProcessor` instances in the `RabbitListener` container factories. + +See [Annotation-driven Listener Endpoints](#async-annotation-driven) for more information. + +Starting with version 2.0.3, one of the `@RabbitHandler` annotations on a class-level `@RabbitListener` can be designated as the default. +See [Multi-method Listeners](#annotation-method-selection) for more information. + +##### Container Conditional Rollback + +When using an external transaction manager (such as JDBC), rule-based rollback is now supported when you provide the container with a transaction attribute. +It is also now more flexible when you use a transaction advice. +See [Conditional Rollback](#conditional-rollback) for more information. + +##### Remove Jackson 1.x support + +Deprecated in previous versions, Jackson `1.x` converters and related components have now been deleted. +You can use similar components based on Jackson 2.x. +See [Jackson2JsonMessageConverter](#json-message-converter) for more information. + +##### JSON Message Converter + +When the `*TypeId*` is set to `Hashtable` for an inbound JSON message, the default conversion type is now `LinkedHashMap`. +Previously, it was `Hashtable`. +To revert to a `Hashtable`, you can use `setDefaultMapType` on the `DefaultClassMapper`. + +##### XML Parsers + +When parsing `Queue` and `Exchange` XML components, the parsers no longer register the `name` attribute value as a bean alias if an `id` attribute is present. +See [A Note On the `id` and `name` Attributes](#note-id-name) for more information. + +##### Blocked Connection + +You can now inject the `com.rabbitmq.client.BlockedListener` into the `org.springframework.amqp.rabbit.connection.Connection` object. +Also, the `ConnectionBlockedEvent` and `ConnectionUnblockedEvent` events are emitted by the `ConnectionFactory` when the connection is blocked or unblocked by the Broker. + +See [Connection and Resource Management](#connections) for more information. + +#### A.2.5. Changes in 1.7 Since 1.6 + +##### AMQP Client library + +Spring AMQP now uses the new 4.0.x version of the `amqp-client` library provided by the RabbitMQ team. +This client has auto-recovery configured by default. +See [RabbitMQ Automatic Connection/Topology recovery](#auto-recovery). + +| |The 4.0.x client enables automatic recovery by default.
While compatible with this feature, Spring AMQP has its own recovery mechanisms, and the client recovery feature generally is not needed.
We recommend disabling `amqp-client` automatic recovery, to avoid getting `AutoRecoverConnectionNotCurrentlyOpenException` instances when the broker is available but the connection has not yet recovered.
Starting with version 1.7.1, Spring AMQP disables it unless you explicitly create your own RabbitMQ connection factory and provide it to the `CachingConnectionFactory`.
RabbitMQ `ConnectionFactory` instances created by the `RabbitConnectionFactoryBean` also have the option disabled by default.| +|---|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +##### Log4j 2 upgrade + +The minimum Log4j 2 version (for the `AmqpAppender`) is now `2.7`. +The framework is no longer compatible with previous versions. +See [Logging Subsystem AMQP Appenders](#logging) for more information. + +##### Logback Appender + +This appender no longer captures caller data (method, line number) by default. +You can re-enable it by setting the `includeCallerData` configuration option. +See [Logging Subsystem AMQP Appenders](#logging) for information about the available log appenders. + +##### Spring Retry Upgrade + +The minimum Spring Retry version is now `1.2`. +The framework is no longer compatible with previous versions. + +###### Shutdown Behavior + +You can now set `forceCloseChannel` to `true` so that, if the container threads do not respond to a shutdown within `shutdownTimeout`, the channels are forced closed, +causing any unacked messages to be re-queued. +See [Message Listener Container Configuration](#containerAttributes) for more information. + +##### FasterXML Jackson upgrade + +The minimum Jackson version is now `2.8`. +The framework is no longer compatible with previous versions. + +##### JUnit `@Rules` + +Rules that have previously been used internally by the framework have now been made available in a separate jar called `spring-rabbit-junit`. +See [JUnit4 `@Rules`](#junit-rules) for more information. + +##### Container Conditional Rollback + +When you use an external transaction manager (such as JDBC), rule-based rollback is now supported when you provide the container with a transaction attribute. +It is also now more flexible when you use a transaction advice. + +##### Connection Naming Strategy + +A new `ConnectionNameStrategy` is now provided to populate the application-specific identification of the target RabbitMQ connection from the `AbstractConnectionFactory`. +See [Connection and Resource Management](#connections) for more information. + +##### Listener Container Changes + +###### Transaction Rollback Behavior + +You can now configure message re-queue on transaction rollback to be consistent, regardless of whether or not a transaction manager is configured. +See [A note on Rollback of Received Messages](#transaction-rollback) for more information. + +#### A.2.6. Earlier Releases + +See [Previous Releases](#previous-whats-new) for changes in previous versions. + +#### A.2.7. Changes in 1.6 Since 1.5 + +##### Testing Support + +A new testing support library is now provided. +See [Testing Support](#testing) for more information. + +##### Builder + +Builders that provide a fluent API for configuring `Queue` and `Exchange` objects are now available. +See [Builder API for Queues and Exchanges](#builder-api) for more information. + +##### Namespace Changes + +###### Connection Factory + +You can now add a `thread-factory` to a connection factory bean declaration — for example, to name the threads +created by the `amqp-client` library. +See [Connection and Resource Management](#connections) for more information. + +When you use `CacheMode.CONNECTION`, you can now limit the total number of connections allowed. +See [Connection and Resource Management](#connections) for more information. + +###### Queue Definitions + +You can now provide a naming strategy for anonymous queues. +See [`AnonymousQueue`](#anonymous-queue) for more information. + +##### Listener Container Changes + +###### Idle Message Listener Detection + +You can now configure listener containers to publish `ApplicationEvent` instances when idle. +See [Detecting Idle Asynchronous Consumers](#idle-containers) for more information. + +###### Mismatched Queue Detection + +By default, when a listener container starts, if queues with mismatched properties or arguments are detected, +the container logs the exception but continues to listen. +The container now has a property called `mismatchedQueuesFatal`, which prevents the container (and context) from +starting if the problem is detected during startup. +It also stops the container if the problem is detected later, such as after recovering from a connection failure. +See [Message Listener Container Configuration](#containerAttributes) for more information. + +###### Listener Container Logging + +Now, listener container provides its `beanName` to the internal `SimpleAsyncTaskExecutor` as a `threadNamePrefix`. +It is useful for logs analysis. + +###### Default Error Handler + +The default error handler (`ConditionalRejectingErrorHandler`) now considers irrecoverable `@RabbitListener`exceptions as fatal. +See [Exception Handling](#exception-handling) for more information. + +##### `AutoDeclare` and `RabbitAdmin` Instances + +See [Message Listener Container Configuration](#containerAttributes) (`autoDeclare`) for some changes to the semantics of that option with respect to the use +of `RabbitAdmin` instances in the application context. + +##### `AmqpTemplate`: Receive with Timeout + +A number of new `receive()` methods with `timeout` have been introduced for the `AmqpTemplate`and its `RabbitTemplate` implementation. +See [Polling Consumer](#polling-consumer) for more information. + +##### Using `AsyncRabbitTemplate` + +A new `AsyncRabbitTemplate` has been introduced. +This template provides a number of send and receive methods, where the return value is a `ListenableFuture`, which can +be used later to obtain the result either synchronously or asynchronously. +See [Async Rabbit Template](#async-template) for more information. + +##### `RabbitTemplate` Changes + +1.4.1 introduced the ability to use [direct reply-to](https://www.rabbitmq.com/direct-reply-to.html) when the broker supports it. +It is more efficient than using a temporary queue for each reply. +This version lets you override this default behavior and use a temporary queue by setting the `useTemporaryReplyQueues` property to `true`. +See [RabbitMQ Direct reply-to](#direct-reply-to) for more information. + +The `RabbitTemplate` now supports a `user-id-expression` (`userIdExpression` when using Java configuration). +See [Validated User-ID RabbitMQ documentation](https://www.rabbitmq.com/validated-user-id.html) and [Validated User Id](#template-user-id) for more information. + +##### Message Properties + +###### Using `CorrelationId` + +The `correlationId` message property can now be a `String`. +See [Message Properties Converters](#message-properties-converters) for more information. + +###### Long String Headers + +Previously, the `DefaultMessagePropertiesConverter` “converted” headers longer than the long string limit (default 1024) +to a `DataInputStream` (actually, it referenced the `LongString` instance’s `DataInputStream`). +On output, this header was not converted (except to a String — for example, `[[email protected]](/cdn-cgi/l/email-protection)` by calling`toString()` on the stream). + +With this release, long `LongString` instances are now left as `LongString` instances by default. +You can access the contents by using the `getBytes[]`, `toString()`, or `getStream()` methods. +A large incoming `LongString` is now correctly “converted” on output too. + +See [Message Properties Converters](#message-properties-converters) for more information. + +###### Inbound Delivery Mode + +The `deliveryMode` property is no longer mapped to the `MessageProperties.deliveryMode`. +This change avoids unintended propagation if the the same `MessageProperties` object is used to send an outbound message. +Instead, the inbound `deliveryMode` header is mapped to `MessageProperties.receivedDeliveryMode`. + +See [Message Properties Converters](#message-properties-converters) for more information. + +When using annotated endpoints, the header is provided in the header named `AmqpHeaders.RECEIVED_DELIVERY_MODE`. + +See [Annotated Endpoint Method Signature](#async-annotation-driven-enable-signature) for more information. + +###### Inbound User ID + +The `user_id` property is no longer mapped to the `MessageProperties.userId`. +This change avoids unintended propagation if the the same `MessageProperties` object is used to send an outbound message. +Instead, the inbound `userId` header is mapped to `MessageProperties.receivedUserId`. + +See [Message Properties Converters](#message-properties-converters) for more information. + +When you use annotated endpoints, the header is provided in the header named `AmqpHeaders.RECEIVED_USER_ID`. + +See [Annotated Endpoint Method Signature](#async-annotation-driven-enable-signature) for more information. + +##### `RabbitAdmin` Changes + +###### Declaration Failures + +Previously, the `ignoreDeclarationFailures` flag took effect only for `IOException` on the channel (such as mis-matched +arguments). +It now takes effect for any exception (such as `TimeoutException`). +In addition, a `DeclarationExceptionEvent` is now published whenever a declaration fails. +The `RabbitAdmin` last declaration event is also available as a property `lastDeclarationExceptionEvent`. +See [Configuring the Broker](#broker-configuration) for more information. + +##### `@RabbitListener` Changes + +###### Multiple Containers for Each Bean + +When you use Java 8 or later, you can now add multiple `@RabbitListener` annotations to `@Bean` classes or +their methods. +When using Java 7 or earlier, you can use the `@RabbitListeners` container annotation to provide the same +functionality. +See [`@Repeatable` `@RabbitListener`](#repeatable-rabbit-listener) for more information. + +###### `@SendTo` SpEL Expressions + +`@SendTo` for routing replies with no `replyTo` property can now be SpEL expressions evaluated against the +request/reply. +See [Reply Management](#async-annotation-driven-reply) for more information. + +###### `@QueueBinding` Improvements + +You can now specify arguments for queues, exchanges, and bindings in `@QueueBinding` annotations. +Header exchanges are now supported by `@QueueBinding`. +See [Annotation-driven Listener Endpoints](#async-annotation-driven) for more information. + +##### Delayed Message Exchange + +Spring AMQP now has first class support for the RabbitMQ Delayed Message Exchange plugin. +See [Delayed Message Exchange](#delayed-message-exchange) for more information. + +##### Exchange Internal Flag + +Any `Exchange` definitions can now be marked as `internal`, and `RabbitAdmin` passes the value to the broker when +declaring the exchange. +See [Configuring the Broker](#broker-configuration) for more information. + +##### `CachingConnectionFactory` Changes + +###### `CachingConnectionFactory` Cache Statistics + +The `CachingConnectionFactory` now provides cache properties at runtime and over JMX. +See [Runtime Cache Properties](#runtime-cache-properties) for more information. + +###### Accessing the Underlying RabbitMQ Connection Factory + +A new getter has been added to provide access to the underlying factory. +You can use this getter, for example, to add custom connection properties. +See [Adding Custom Client Connection Properties](#custom-client-props) for more information. + +###### Channel Cache + +The default channel cache size has been increased from 1 to 25. +See [Connection and Resource Management](#connections) for more information. + +In addition, the `SimpleMessageListenerContainer` no longer adjusts the cache size to be at least as large as the number +of `concurrentConsumers` — this was superfluous, since the container consumer channels are never cached. + +##### Using `RabbitConnectionFactoryBean` + +The factory bean now exposes a property to add client connection properties to connections made by the resulting +factory. + +##### Java Deserialization + +You can now configure a “allowed list” of allowable classes when you use Java deserialization. +You should consider creating an allowed list if you accept messages with serialized java objects from +untrusted sources. +See [Java Deserialization](#java-deserialization) for more information. + +##### JSON `MessageConverter` + +Improvements to the JSON message converter now allow the consumption of messages that do not have type information +in message headers. +See [Message Conversion for Annotated Methods](#async-annotation-conversion) and [Jackson2JsonMessageConverter](#json-message-converter) for more information. + +##### Logging Appenders + +###### Log4j 2 + +A log4j 2 appender has been added, and the appenders can now be configured with an `addresses` property to connect +to a broker cluster. + +###### Client Connection Properties + +You can now add custom client connection properties to RabbitMQ connections. + +See [Logging Subsystem AMQP Appenders](#logging) for more information. + +#### A.2.8. Changes in 1.5 Since 1.4 + +##### `spring-erlang` Is No Longer Supported + +The `spring-erlang` jar is no longer included in the distribution. +Use [the RabbitMQ REST API](#management-rest-api) instead. + +##### `CachingConnectionFactory` Changes + +###### Empty Addresses Property in `CachingConnectionFactory` + +Previously, if the connection factory was configured with a host and port but an empty String was also supplied for`addresses`, the host and port were ignored. +Now, an empty `addresses` String is treated the same as a `null`, and the host and port are used. + +###### URI Constructor + +The `CachingConnectionFactory` has an additional constructor, with a `URI` parameter, to configure the broker connection. + +###### Connection Reset + +A new method called `resetConnection()` has been added to let users reset the connection (or connections). +You might use this, for example, to reconnect to the primary broker after failing over to the secondary broker. +This **does** impact in-process operations. +The existing `destroy()` method does exactly the same, but the new method has a less daunting name. + +##### Properties to Control Container Queue Declaration Behavior + +When the listener container consumers start, they attempt to passively declare the queues to ensure they are available +on the broker. +Previously, if these declarations failed (for example, because the queues didn’t exist) or when an HA queue was being +moved, the retry logic was fixed at three retry attempts at five-second intervals. +If the queues still do not exist, the behavior is controlled by the `missingQueuesFatal` property (default: `true`). +Also, for containers configured to listen from multiple queues, if only a subset of queues are available, the consumer +retried the missing queues on a fixed interval of 60 seconds. + +The `declarationRetries`, `failedDeclarationRetryInterval`, and `retryDeclarationInterval` properties are now configurable. +See [Message Listener Container Configuration](#containerAttributes) for more information. + +##### Class Package Change + +The `RabbitGatewaySupport` class has been moved from `o.s.amqp.rabbit.core.support` to `o.s.amqp.rabbit.core`. + +##### `DefaultMessagePropertiesConverter` Changes + +You can now configure the `DefaultMessagePropertiesConverter` to +determine the maximum length of a `LongString` that is converted +to a `String` rather than to a `DataInputStream`. +The converter has an alternative constructor that takes the value as a limit. +Previously, this limit was hard-coded at `1024` bytes. +(Also available in 1.4.4). + +##### `@RabbitListener` Improvements + +###### `@QueueBinding` for `@RabbitListener` + +The `bindings` attribute has been added to the `@RabbitListener` annotation as mutually exclusive with the `queues`attribute to allow the specification of the `queue`, its `exchange`, and `binding` for declaration by a `RabbitAdmin` on +the Broker. + +###### SpEL in `@SendTo` + +The default reply address (`@SendTo`) for a `@RabbitListener` can now be a SpEL expression. + +###### Multiple Queue Names through Properties + +You can now use a combination of SpEL and property placeholders to specify multiple queues for a listener. + +See [Annotation-driven Listener Endpoints](#async-annotation-driven) for more information. + +##### Automatic Exchange, Queue, and Binding Declaration + +You can now declare beans that define a collection of these entities, and the `RabbitAdmin` adds the +contents to the list of entities that it declares when a connection is established. +See [Declaring Collections of Exchanges, Queues, and Bindings](#collection-declaration) for more information. + +##### `RabbitTemplate` Changes + +###### `reply-address` Added + +The `reply-address` attribute has been added to the `` component as an alternative `reply-queue`. +See [Request/Reply Messaging](#request-reply) for more information. +(Also available in 1.4.4 as a setter on the `RabbitTemplate`). + +###### Blocking `receive` Methods + +The `RabbitTemplate` now supports blocking in `receive` and `convertAndReceive` methods. +See [Polling Consumer](#polling-consumer) for more information. + +###### Mandatory with `sendAndReceive` Methods + +When the `mandatory` flag is set when using the `sendAndReceive` and `convertSendAndReceive` methods, the calling thread +throws an `AmqpMessageReturnedException` if the request message cannot be deliverted. +See [Reply Timeout](#reply-timeout) for more information. + +###### Improper Reply Listener Configuration + +The framework tries to verify proper configuration of a reply listener container when using a named reply queue. + +See [Reply Listener Container](#reply-listener) for more information. + +##### `RabbitManagementTemplate` Added + +The `RabbitManagementTemplate` has been introduced to monitor and configure the RabbitMQ Broker by using the REST API provided by its [management plugin](https://www.rabbitmq.com/management.html). +See [RabbitMQ REST API](#management-rest-api) for more information. + +##### + +| |The `id` attribute on the `` element has been removed.
Starting with this release, the `id` on the `` child element is used alone to name the listener container bean created for each listener element.

Normal Spring bean name overrides are applied.
If a later `` is parsed with the same `id` as an existing bean, the new definition overrides the existing one.
Previously, bean names were composed from the `id` attributes of the `` and `` elements.

When migrating to this release, if you have `id` attributes on your `` elements, remove them and set the `id` on the child `` element instead.| +|---|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +However, to support starting and stopping containers as a group, a new `group` attribute has been added. +When this attribute is defined, the containers created by this element are added to a bean with this name, of type `Collection`. +You can iterate over this group to start and stop containers. + +##### Class-Level `@RabbitListener` + +The `@RabbitListener` annotation can now be applied at the class level. +Together with the new `@RabbitHandler` method annotation, this lets you select the handler method based on payload type. +See [Multi-method Listeners](#annotation-method-selection) for more information. + +##### `SimpleMessageListenerContainer`: BackOff Support + +The `SimpleMessageListenerContainer` can now be supplied with a `BackOff` instance for `consumer` startup recovery. +See [Message Listener Container Configuration](#containerAttributes) for more information. + +##### Channel Close Logging + +A mechanism to control the log levels of channel closure has been introduced. +See [Logging Channel Close Events](#channel-close-logging). + +##### Application Events + +The `SimpleMessageListenerContainer` now emits application events when consumers fail. +See [Consumer Events](#consumer-events) for more information. + +##### Consumer Tag Configuration + +Previously, the consumer tags for asynchronous consumers were generated by the broker. +With this release, it is now possible to supply a naming strategy to the listener container. +See [Consumer Tags](#consumerTags). + +##### Using `MessageListenerAdapter` + +The `MessageListenerAdapter` now supports a map of queue names (or consumer tags) to method names, to determine +which delegate method to call based on the queue from which the message was received. + +##### `LocalizedQueueConnectionFactory` Added + +`LocalizedQueueConnectionFactory` is a new connection factory that connects to the node in a cluster where a mirrored queue actually resides. + +See [Queue Affinity and the `LocalizedQueueConnectionFactory`](#queue-affinity). + +##### Anonymous Queue Naming + +Starting with version 1.5.3, you can now control how `AnonymousQueue` names are generated. +See [`AnonymousQueue`](#anonymous-queue) for more information. + +#### A.2.9. Changes in 1.4 Since 1.3 + +##### `@RabbitListener` Annotation + +POJO listeners can be annotated with `@RabbitListener`, enabled by `@EnableRabbit` or ``. +Spring Framework 4.1 is required for this feature. +See [Annotation-driven Listener Endpoints](#async-annotation-driven) for more information. + +##### `RabbitMessagingTemplate` Added + +A new `RabbitMessagingTemplate` lets you interact with RabbitMQ by using `spring-messaging` `Message` instances. +Internally, it uses the `RabbitTemplate`, which you can configure as normal. +Spring Framework 4.1 is required for this feature. +See [Messaging Integration](#template-messaging) for more information. + +##### Listener Container `missingQueuesFatal` Attribute + +1.3.5 introduced the `missingQueuesFatal` property on the `SimpleMessageListenerContainer`. +This is now available on the listener container namespace element. +See [Message Listener Container Configuration](#containerAttributes). + +##### RabbitTemplate `ConfirmCallback` Interface + +The `confirm` method on this interface has an additional parameter called `cause`. +When available, this parameter contains the reason for a negative acknowledgement (nack). +See [Correlated Publisher Confirms and Returns](#template-confirms). + +##### `RabbitConnectionFactoryBean` Added + +`RabbitConnectionFactoryBean` creates the underlying RabbitMQ `ConnectionFactory` used by the `CachingConnectionFactory`. +This enables configuration of SSL options using Spring’s dependency injection. +See [Configuring the Underlying Client Connection Factory](#connection-factory). + +##### Using `CachingConnectionFactory` + +The `CachingConnectionFactory` now lets the `connectionTimeout` be set as a property or as an attribute in the namespace. +It sets the property on the underlying RabbitMQ `ConnectionFactory`. +See [Configuring the Underlying Client Connection Factory](#connection-factory). + +##### Log Appender + +The Logback `org.springframework.amqp.rabbit.logback.AmqpAppender` has been introduced. +It provides options similar to `org.springframework.amqp.rabbit.log4j.AmqpAppender`. +For more information, see the JavaDoc of these classes. + +The Log4j `AmqpAppender` now supports the `deliveryMode` property (`PERSISTENT` or `NON_PERSISTENT`, default: `PERSISTENT`). +Previously, all log4j messages were `PERSISTENT`. + +The appender also supports modification of the `Message` before sending — allowing, for example, the addition of custom headers. +Subclasses should override the `postProcessMessageBeforeSend()`. + +##### Listener Queues + +The listener container now, by default, redeclares any missing queues during startup. +A new `auto-declare` attribute has been added to the `` to prevent these re-declarations. +See [`auto-delete` Queues](#lc-auto-delete). + +##### `RabbitTemplate`: `mandatory` and `connectionFactorySelector` Expressions + +The `mandatoryExpression`, `sendConnectionFactorySelectorExpression`, and `receiveConnectionFactorySelectorExpression` SpEL Expression`s properties have been added to `RabbitTemplate`. +The `mandatoryExpression` is used to evaluate a `mandatory` boolean value against each request message when a `ReturnCallback` is in use. +See [Correlated Publisher Confirms and Returns](#template-confirms). +The `sendConnectionFactorySelectorExpression` and `receiveConnectionFactorySelectorExpression` are used when an `AbstractRoutingConnectionFactory` is provided, to determine the `lookupKey` for the target `ConnectionFactory` at runtime on each AMQP protocol interaction operation. +See [Routing Connection Factory](#routing-connection-factory). + +##### Listeners and the Routing Connection Factory + +You can configure a `SimpleMessageListenerContainer` with a routing connection factory to enable connection selection based on the queue names. +See [Routing Connection Factory](#routing-connection-factory). + +##### `RabbitTemplate`: `RecoveryCallback` Option + +The `recoveryCallback` property has been added for use in the `retryTemplate.execute()`. +See [Adding Retry Capabilities](#template-retry). + +##### `MessageConversionException` Change + +This exception is now a subclass of `AmqpException`. +Consider the following code: + +``` +try { + template.convertAndSend("thing1", "thing2", "cat"); +} +catch (AmqpException e) { + ... +} +catch (MessageConversionException e) { + ... +} +``` + +The second catch block is no longer reachable and needs to be moved above the catch-all `AmqpException` catch block. + +##### RabbitMQ 3.4 Compatibility + +Spring AMQP is now compatible with the RabbitMQ 3.4, including direct reply-to. +See [Compatibility](#compatibility) and [RabbitMQ Direct reply-to](#direct-reply-to) for more information. + +##### `ContentTypeDelegatingMessageConverter` Added + +The `ContentTypeDelegatingMessageConverter` has been introduced to select the `MessageConverter` to use, based on the `contentType` property in the `MessageProperties`. +See [Message Converters](#message-converters) for more information. + +#### A.2.10. Changes in 1.3 Since 1.2 + +##### Listener Concurrency + +The listener container now supports dynamic scaling of the number of consumers based on workload, or you can programmatically change the concurrency without stopping the container. +See [Listener Concurrency](#listener-concurrency). + +##### Listener Queues + +The listener container now permits the queues on which it listens to be modified at runtime. +Also, the container now starts if at least one of its configured queues is available for use. +See [Listener Container Queues](#listener-queues) + +This listener container now redeclares any auto-delete queues during startup. +See [`auto-delete` Queues](#lc-auto-delete). + +##### Consumer Priority + +The listener container now supports consumer arguments, letting the `x-priority` argument be set. +See [Consumer Priority](#consumer-priority). + +##### Exclusive Consumer + +You can now configure `SimpleMessageListenerContainer` with a single `exclusive` consumer, preventing other consumers from listening to the queue. +See [Exclusive Consumer](#exclusive-consumer). + +##### Rabbit Admin + +You can now have the broker generate the queue name, regardless of `durable`, `autoDelete`, and `exclusive` settings. +See [Configuring the Broker](#broker-configuration). + +##### Direct Exchange Binding + +Previously, omitting the `key` attribute from a `binding` element of a `direct-exchange` configuration caused the queue or exchange to be bound with an empty string as the routing key. +Now it is bound with the the name of the provided `Queue` or `Exchange`. +If you wish to bind with an empty string routing key, you need to specify `key=""`. + +##### `AmqpTemplate` Changes + +The `AmqpTemplate` now provides several synchronous `receiveAndReply` methods. +These are implemented by the `RabbitTemplate`. +For more information see [Receiving Messages](#receiving-messages). + +The `RabbitTemplate` now supports configuring a `RetryTemplate` to attempt retries (with optional back-off policy) for when the broker is not available. +For more information see [Adding Retry Capabilities](#template-retry). + +##### Caching Connection Factory + +You can now configure the caching connection factory to cache `Connection` instances and their `Channel` instances instead of using a single connection and caching only `Channel` instances. +See [Connection and Resource Management](#connections). + +##### Binding Arguments + +The `` of the `` now supports parsing of the `` sub-element. +You can now configure the `` of the `` with a `key/value` attribute pair (to match on a single header) or with a `` sub-element (allowing matching on multiple headers). +These options are mutually exclusive. +See [Headers Exchange](#headers-exchange). + +##### Routing Connection Factory + +A new `SimpleRoutingConnectionFactory` has been introduced. +It allows configuration of `ConnectionFactories` mapping, to determine the target `ConnectionFactory` to use at runtime. +See [Routing Connection Factory](#routing-connection-factory). + +##### `MessageBuilder` and `MessagePropertiesBuilder` + +“Fluent APIs” for building messages or message properties are now provided. +See [Message Builder API](#message-builder). + +##### `RetryInterceptorBuilder` Change + +A “Fluent API” for building listener container retry interceptors is now provided. +See [Failures in Synchronous Operations and Options for Retry](#retry). + +##### `RepublishMessageRecoverer` Added + +This new `MessageRecoverer` is provided to allow publishing a failed message to another queue (including stack trace information in the header) when retries are exhausted. +See [Message Listeners and the Asynchronous Case](#async-listeners). + +##### + +A default `ConditionalRejectingErrorHandler` has been added to the listener container. +This error handler detects fatal message conversion problems and instructs the container to reject the message to prevent the broker from continually redelivering the unconvertible message. +See [Exception Handling](#exception-handling). + +##### + +The `SimpleMessageListenerContainer` now has a property called `missingQueuesFatal` (default: `true`). +Previously, missing queues were always fatal. +See [Message Listener Container Configuration](#containerAttributes). + +#### A.2.11. Changes to 1.2 Since 1.1 + +##### RabbitMQ Version + +Spring AMQP now uses RabbitMQ 3.1.x by default (but retains compatibility with earlier versions). +Certain deprecations have been added for features no longer supported by RabbitMQ 3.1.x — federated exchanges and the `immediate` property on the `RabbitTemplate`. + +##### Rabbit Admin + +`RabbitAdmin` now provides an option to let exchange, queue, and binding declarations continue when a declaration fails. +Previously, all declarations stopped on a failure. +By setting `ignore-declaration-exceptions`, such exceptions are logged (at the `WARN` level), but further declarations continue. +An example where this might be useful is when a queue declaration fails because of a slightly different `ttl` setting that would normally stop other declarations from proceeding. + +`RabbitAdmin` now provides an additional method called `getQueueProperties()`. +You can use this determine if a queue exists on the broker (returns `null` for a non-existent queue). +In addition, it returns the current number of messages in the queue as well as the current number of consumers. + +##### Rabbit Template + +Previously, when the `…​sendAndReceive()` methods were used with a fixed reply queue, two custom headers were used for correlation data and to retain and restore reply queue information. +With this release, the standard message property (`correlationId`) is used by default, although you can specify a custom property to use instead. +In addition, nested `replyTo` information is now retained internally in the template, instead of using a custom header. + +The `immediate` property is deprecated. +You must not set this property when using RabbitMQ 3.0.x or greater. + +##### JSON Message Converters + +A Jackson 2.x `MessageConverter` is now provided, along with the existing converter that uses Jackson 1.x. + +##### Automatic Declaration of Queues and Other Items + +Previously, when declaring queues, exchanges and bindings, you could not define which connection factory was used for the declarations. +Each `RabbitAdmin` declared all components by using its connection. + +Starting with this release, you can now limit declarations to specific `RabbitAdmin` instances. +See [Conditional Declaration](#conditional-declaration). + +##### AMQP Remoting + +Facilities are now provided for using Spring remoting techniques, using AMQP as the transport for the RPC calls. +For more information see [Spring Remoting with AMQP](#remoting) + +##### Requested Heart Beats + +Several users have asked for the underlying client connection factory’s `requestedHeartBeats` property to be exposed on the Spring AMQP `CachingConnectionFactory`. +This is now available. +Previously, it was necessary to configure the AMQP client factory as a separate bean and provide a reference to it in the `CachingConnectionFactory`. + +#### A.2.12. Changes to 1.1 Since 1.0 + +##### General + +Spring-AMQP is now built with Gradle. + +Adds support for publisher confirms and returns. + +Adds support for HA queues and broker failover. + +Adds support for dead letter exchanges and dead letter queues. + +##### AMQP Log4j Appender + +Adds an option to support adding a message ID to logged messages. + +Adds an option to allow the specification of a `Charset` name to be used when converting `String` to `byte[]`. \ No newline at end of file diff --git a/docs/en/spring-batch/README.md b/docs/en/spring-batch/README.md new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/docs/en/spring-batch/appendix.md b/docs/en/spring-batch/appendix.md new file mode 100644 index 0000000000000000000000000000000000000000..10e84b5cc95996eff85bcf539d44c70e52435319 --- /dev/null +++ b/docs/en/spring-batch/appendix.md @@ -0,0 +1,48 @@ +## Appendix A: List of ItemReaders and ItemWriters + +### Item Readers + +| Item Reader | Description | +|----------------------------------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +|AbstractItemCountingItemStreamItemReader| Abstract base class that provides basic
restart capabilities by counting the number of items returned from
an `ItemReader`. | +| AggregateItemReader |An `ItemReader` that delivers a list as its
item, storing up objects from the injected `ItemReader` until they
are ready to be packed out as a collection. This class must be used
as a wrapper for a custom `ItemReader` that can identify the record
boundaries. The custom reader should mark the beginning and end of
records by returning an `AggregateItem` which responds `true` to its
query methods `isHeader()` and `isFooter()`. Note that this reader
is not part of the library of readers provided by Spring Batch
but given as a sample in `spring-batch-samples`.| +| AmqpItemReader | Given a Spring `AmqpTemplate`, it provides
synchronous receive methods. The `receiveAndConvert()` method
lets you receive POJO objects. | +| KafkaItemReader | An `ItemReader` that reads messages from an Apache Kafka topic.
It can be configured to read messages from multiple partitions of the same topic.
This reader stores message offsets in the execution context to support restart capabilities. | +| FlatFileItemReader | Reads from a flat file. Includes `ItemStream`and `Skippable` functionality. See [`FlatFileItemReader`](readersAndWriters.html#flatFileItemReader). | +| HibernateCursorItemReader | Reads from a cursor based on an HQL query. See[`Cursor-based ItemReaders`](readersAndWriters.html#cursorBasedItemReaders). | +| HibernatePagingItemReader | Reads from a paginated HQL query | +| ItemReaderAdapter | Adapts any class to the`ItemReader` interface. | +| JdbcCursorItemReader | Reads from a database cursor via JDBC. See[`Cursor-based ItemReaders`](readersAndWriters.html#cursorBasedItemReaders). | +| JdbcPagingItemReader | Given an SQL statement, pages through the rows,
such that large datasets can be read without running out of
memory. | +| JmsItemReader | Given a Spring `JmsOperations` object and a JMS
Destination or destination name to which to send errors, provides items
received through the injected `JmsOperations#receive()`method. | +| JpaPagingItemReader | Given a JPQL statement, pages through the
rows, such that large datasets can be read without running out of
memory. | +| ListItemReader | Provides the items from a list, one at a
time. | +| MongoItemReader | Given a `MongoOperations` object and a JSON-based MongoDB
query, provides items received from the `MongoOperations#find()` method. | +| Neo4jItemReader | Given a `Neo4jOperations` object and the components of a
Cyhper query, items are returned as the result of the Neo4jOperations.query
method. | +| RepositoryItemReader | Given a Spring Data `PagingAndSortingRepository` object,
a `Sort`, and the name of method to execute, returns items provided by the
Spring Data repository implementation. | +| StoredProcedureItemReader | Reads from a database cursor resulting from the
execution of a database stored procedure. See [`StoredProcedureItemReader`](readersAndWriters.html#StoredProcedureItemReader) | +| StaxEventItemReader | Reads via StAX. see [`StaxEventItemReader`](readersAndWriters.html#StaxEventItemReader). | +| JsonItemReader | Reads items from a Json document. see [`JsonItemReader`](readersAndWriters.html#JsonItemReader). | + +### Item Writers + +| Item Writer | Description | +|--------------------------------------|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| AbstractItemStreamItemWriter | Abstract base class that combines the`ItemStream` and`ItemWriter` interfaces. | +| AmqpItemWriter | Given a Spring `AmqpTemplate`, it provides
for a synchronous `send` method. The `convertAndSend(Object)`method lets you send POJO objects. | +| CompositeItemWriter | Passes an item to the `write` method of each
in an injected `List` of `ItemWriter` objects. | +| FlatFileItemWriter | Writes to a flat file. Includes `ItemStream` and
Skippable functionality. See [`FlatFileItemWriter`](readersAndWriters.html#flatFileItemWriter). | +| GemfireItemWriter | Using a `GemfireOperations` object, items are either written
or removed from the Gemfire instance based on the configuration of the delete
flag. | +| HibernateItemWriter | This item writer is Hibernate-session aware
and handles some transaction-related work that a non-"hibernate-aware"
item writer would not need to know about and then delegates
to another item writer to do the actual writing. | +| ItemWriterAdapter | Adapts any class to the`ItemWriter` interface. | +| JdbcBatchItemWriter | Uses batching features from a`PreparedStatement`, if available, and can
take rudimentary steps to locate a failure during a`flush`. | +| JmsItemWriter | Using a `JmsOperations` object, items are written
to the default queue through the `JmsOperations#convertAndSend()` method. | +| JpaItemWriter | This item writer is JPA EntityManager-aware
and handles some transaction-related work that a non-"JPA-aware"`ItemWriter` would not need to know about and
then delegates to another writer to do the actual writing. | +| KafkaItemWriter |Using a `KafkaTemplate` object, items are written to the default topic through the`KafkaTemplate#sendDefault(Object, Object)` method using a `Converter` to map the key from the item.
A delete flag can also be configured to send delete events to the topic.| +| MimeMessageItemWriter | Using Spring’s `JavaMailSender`, items of type `MimeMessage`are sent as mail messages. | +| MongoItemWriter | Given a `MongoOperations` object, items are written
through the `MongoOperations.save(Object)` method. The actual write is delayed
until the last possible moment before the transaction commits. | +| Neo4jItemWriter | Given a `Neo4jOperations` object, items are persisted through the`save(Object)` method or deleted through the `delete(Object)` per the`ItemWriter’s` configuration | +|PropertyExtractingDelegatingItemWriter| Extends `AbstractMethodInvokingDelegator`creating arguments on the fly. Arguments are created by retrieving
the values from the fields in the item to be processed (through a`SpringBeanWrapper`), based on an injected array of field
names. | +| RepositoryItemWriter | Given a Spring Data `CrudRepository` implementation,
items are saved through the method specified in the configuration. | +| StaxEventItemWriter | Uses a `Marshaller` implementation to
convert each item to XML and then writes it to an XML file using
StAX. | +| JsonFileItemWriter | Uses a `JsonObjectMarshaller` implementation to
convert each item to Json and then writes it to an Json file. \ No newline at end of file diff --git a/docs/en/spring-batch/common-patterns.md b/docs/en/spring-batch/common-patterns.md new file mode 100644 index 0000000000000000000000000000000000000000..12e0f17d23d4ff2b75bf2c0f548da8879c09bee7 --- /dev/null +++ b/docs/en/spring-batch/common-patterns.md @@ -0,0 +1,703 @@ +# Common Batch Patterns + +## Common Batch Patterns + +XMLJavaBoth + +Some batch jobs can be assembled purely from off-the-shelf components in Spring Batch. +For instance, the `ItemReader` and `ItemWriter` implementations can be configured to +cover a wide range of scenarios. However, for the majority of cases, custom code must be +written. The main API entry points for application developers are the `Tasklet`, the`ItemReader`, the `ItemWriter`, and the various listener interfaces. Most simple batch +jobs can use off-the-shelf input from a Spring Batch `ItemReader`, but it is often the +case that there are custom concerns in the processing and writing that require developers +to implement an `ItemWriter` or `ItemProcessor`. + +In this chapter, we provide a few examples of common patterns in custom business logic. +These examples primarily feature the listener interfaces. It should be noted that an`ItemReader` or `ItemWriter` can implement a listener interface as well, if appropriate. + +### Logging Item Processing and Failures + +A common use case is the need for special handling of errors in a step, item by item, +perhaps logging to a special channel or inserting a record into a database. A +chunk-oriented `Step` (created from the step factory beans) lets users implement this use +case with a simple `ItemReadListener` for errors on `read` and an `ItemWriteListener` for +errors on `write`. The following code snippet illustrates a listener that logs both read +and write failures: + +``` +public class ItemFailureLoggerListener extends ItemListenerSupport { + + private static Log logger = LogFactory.getLog("item.error"); + + public void onReadError(Exception ex) { + logger.error("Encountered error on read", e); + } + + public void onWriteError(Exception ex, List items) { + logger.error("Encountered error on write", ex); + } +} +``` + +Having implemented this listener, it must be registered with a step. + +The following example shows how to register a listener with a step in XML: + +XML Configuration + +``` + +... + + + + + + +``` + +The following example shows how to register a listener with a step Java: + +Java Configuration + +``` +@Bean +public Step simpleStep() { + return this.stepBuilderFactory.get("simpleStep") + ... + .listener(new ItemFailureLoggerListener()) + .build(); +} +``` + +| |if your listener does anything in an `onError()` method, it must be inside
a transaction that is going to be rolled back. If you need to use a transactional
resource, such as a database, inside an `onError()` method, consider adding a declarative
transaction to that method (see Spring Core Reference Guide for details), and giving its
propagation attribute a value of `REQUIRES_NEW`.| +|---|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +### Stopping a Job Manually for Business Reasons + +Spring Batch provides a `stop()` method through the `JobOperator` interface, but this is +really for use by the operator rather than the application programmer. Sometimes, it is +more convenient or makes more sense to stop a job execution from within the business +logic. + +The simplest thing to do is to throw a `RuntimeException` (one that is neither retried +indefinitely nor skipped). For example, a custom exception type could be used, as shown +in the following example: + +``` +public class PoisonPillItemProcessor implements ItemProcessor { + + @Override + public T process(T item) throws Exception { + if (isPoisonPill(item)) { + throw new PoisonPillException("Poison pill detected: " + item); + } + return item; + } +} +``` + +Another simple way to stop a step from executing is to return `null` from the`ItemReader`, as shown in the following example: + +``` +public class EarlyCompletionItemReader implements ItemReader { + + private ItemReader delegate; + + public void setDelegate(ItemReader delegate) { ... } + + public T read() throws Exception { + T item = delegate.read(); + if (isEndItem(item)) { + return null; // end the step here + } + return item; + } + +} +``` + +The previous example actually relies on the fact that there is a default implementation +of the `CompletionPolicy` strategy that signals a complete batch when the item to be +processed is `null`. A more sophisticated completion policy could be implemented and +injected into the `Step` through the `SimpleStepFactoryBean`. + +The following example shows how to inject a completion policy into a step in XML: + +XML Configuration + +``` + + + + + + + +``` + +The following example shows how to inject a completion policy into a step in Java: + +Java Configuration + +``` +@Bean +public Step simpleStep() { + return this.stepBuilderFactory.get("simpleStep") + .chunk(new SpecialCompletionPolicy()) + .reader(reader()) + .writer(writer()) + .build(); +} +``` + +An alternative is to set a flag in the `StepExecution`, which is checked by the `Step`implementations in the framework in between item processing. To implement this +alternative, we need access to the current `StepExecution`, and this can be achieved by +implementing a `StepListener` and registering it with the `Step`. The following example +shows a listener that sets the flag: + +``` +public class CustomItemWriter extends ItemListenerSupport implements StepListener { + + private StepExecution stepExecution; + + public void beforeStep(StepExecution stepExecution) { + this.stepExecution = stepExecution; + } + + public void afterRead(Object item) { + if (isPoisonPill(item)) { + stepExecution.setTerminateOnly(); + } + } + +} +``` + +When the flag is set, the default behavior is for the step to throw a`JobInterruptedException`. This behavior can be controlled through the`StepInterruptionPolicy`. However, the only choice is to throw or not throw an exception, +so this is always an abnormal ending to a job. + +### Adding a Footer Record + +Often, when writing to flat files, a “footer” record must be appended to the end of the +file, after all processing has be completed. This can be achieved using the`FlatFileFooterCallback` interface provided by Spring Batch. The `FlatFileFooterCallback`(and its counterpart, the `FlatFileHeaderCallback`) are optional properties of the`FlatFileItemWriter` and can be added to an item writer. + +The following example shows how to use the `FlatFileHeaderCallback` and the`FlatFileFooterCallback` in XML: + +XML Configuration + +``` + + + + + + +``` + +The following example shows how to use the `FlatFileHeaderCallback` and the`FlatFileFooterCallback` in Java: + +Java Configuration + +``` +@Bean +public FlatFileItemWriter itemWriter(Resource outputResource) { + return new FlatFileItemWriterBuilder() + .name("itemWriter") + .resource(outputResource) + .lineAggregator(lineAggregator()) + .headerCallback(headerCallback()) + .footerCallback(footerCallback()) + .build(); +} +``` + +The footer callback interface has just one method that is called when the footer must be +written, as shown in the following interface definition: + +``` +public interface FlatFileFooterCallback { + + void writeFooter(Writer writer) throws IOException; + +} +``` + +#### Writing a Summary Footer + +A common requirement involving footer records is to aggregate information during the +output process and to append this information to the end of the file. This footer often +serves as a summarization of the file or provides a checksum. + +For example, if a batch job is writing `Trade` records to a flat file, and there is a +requirement that the total amount from all the `Trades` is placed in a footer, then the +following `ItemWriter` implementation can be used: + +``` +public class TradeItemWriter implements ItemWriter, + FlatFileFooterCallback { + + private ItemWriter delegate; + + private BigDecimal totalAmount = BigDecimal.ZERO; + + public void write(List items) throws Exception { + BigDecimal chunkTotal = BigDecimal.ZERO; + for (Trade trade : items) { + chunkTotal = chunkTotal.add(trade.getAmount()); + } + + delegate.write(items); + + // After successfully writing all items + totalAmount = totalAmount.add(chunkTotal); + } + + public void writeFooter(Writer writer) throws IOException { + writer.write("Total Amount Processed: " + totalAmount); + } + + public void setDelegate(ItemWriter delegate) {...} +} +``` + +This `TradeItemWriter` stores a `totalAmount` value that is increased with the `amount`from each `Trade` item written. After the last `Trade` is processed, the framework calls`writeFooter`, which puts the `totalAmount` into the file. Note that the `write` method +makes use of a temporary variable, `chunkTotal`, that stores the total of the`Trade` amounts in the chunk. This is done to ensure that, if a skip occurs in the`write` method, the `totalAmount` is left unchanged. It is only at the end of the `write`method, once we are guaranteed that no exceptions are thrown, that we update the`totalAmount`. + +In order for the `writeFooter` method to be called, the `TradeItemWriter` (which +implements `FlatFileFooterCallback`) must be wired into the `FlatFileItemWriter` as the`footerCallback`. + +The following example shows how to wire the `TradeItemWriter` in XML: + +XML Configuration + +``` + + + + + + + + + +``` + +The following example shows how to wire the `TradeItemWriter` in Java: + +Java Configuration + +``` +@Bean +public TradeItemWriter tradeItemWriter() { + TradeItemWriter itemWriter = new TradeItemWriter(); + + itemWriter.setDelegate(flatFileItemWriter(null)); + + return itemWriter; +} + +@Bean +public FlatFileItemWriter flatFileItemWriter(Resource outputResource) { + return new FlatFileItemWriterBuilder() + .name("itemWriter") + .resource(outputResource) + .lineAggregator(lineAggregator()) + .footerCallback(tradeItemWriter()) + .build(); +} +``` + +The way that the `TradeItemWriter` has been written so far functions correctly only if +the `Step` is not restartable. This is because the class is stateful (since it stores the`totalAmount`), but the `totalAmount` is not persisted to the database. Therefore, it +cannot be retrieved in the event of a restart. In order to make this class restartable, +the `ItemStream` interface should be implemented along with the methods `open` and`update`, as shown in the following example: + +``` +public void open(ExecutionContext executionContext) { + if (executionContext.containsKey("total.amount") { + totalAmount = (BigDecimal) executionContext.get("total.amount"); + } +} + +public void update(ExecutionContext executionContext) { + executionContext.put("total.amount", totalAmount); +} +``` + +The update method stores the most current version of `totalAmount` to the`ExecutionContext` just before that object is persisted to the database. The open method +retrieves any existing `totalAmount` from the `ExecutionContext` and uses it as the +starting point for processing, allowing the `TradeItemWriter` to pick up on restart where +it left off the previous time the `Step` was run. + +### Driving Query Based ItemReaders + +In the [chapter on readers and writers](readersAndWriters.html), database input using +paging was discussed. Many database vendors, such as DB2, have extremely pessimistic +locking strategies that can cause issues if the table being read also needs to be used by +other portions of the online application. Furthermore, opening cursors over extremely +large datasets can cause issues on databases from certain vendors. Therefore, many +projects prefer to use a 'Driving Query' approach to reading in data. This approach works +by iterating over keys, rather than the entire object that needs to be returned, as the +following image illustrates: + +![Driving Query Job](https://docs.spring.io/spring-batch/docs/current/reference/html/images/drivingQueryExample.png) + +Figure 1. Driving Query Job + +As you can see, the example shown in the preceding image uses the same 'FOO' table as was +used in the cursor-based example. However, rather than selecting the entire row, only the +IDs were selected in the SQL statement. So, rather than a `FOO` object being returned +from `read`, an `Integer` is returned. This number can then be used to query for the +'details', which is a complete `Foo` object, as shown in the following image: + +![Driving Query Example](https://docs.spring.io/spring-batch/docs/current/reference/html/images/drivingQueryJob.png) + +Figure 2. Driving Query Example + +An `ItemProcessor` should be used to transform the key obtained from the driving query +into a full `Foo` object. An existing DAO can be used to query for the full object based +on the key. + +### Multi-Line Records + +While it is usually the case with flat files that each record is confined to a single +line, it is common that a file might have records spanning multiple lines with multiple +formats. The following excerpt from a file shows an example of such an arrangement: + +``` +HEA;0013100345;2007-02-15 +NCU;Smith;Peter;;T;20014539;F +BAD;;Oak Street 31/A;;Small Town;00235;IL;US +FOT;2;2;267.34 +``` + +Everything between the line starting with 'HEA' and the line starting with 'FOT' is +considered one record. There are a few considerations that must be made in order to +handle this situation correctly: + +* Instead of reading one record at a time, the `ItemReader` must read every line of the + multi-line record as a group, so that it can be passed to the `ItemWriter` intact. + +* Each line type may need to be tokenized differently. + +Because a single record spans multiple lines and because we may not know how many lines +there are, the `ItemReader` must be careful to always read an entire record. In order to +do this, a custom `ItemReader` should be implemented as a wrapper for the`FlatFileItemReader`. + +The following example shows how to implement a custom `ItemReader` in XML: + +XML Configuration + +``` + + + + + + + + + + + + + +``` + +The following example shows how to implement a custom `ItemReader` in Java: + +Java Configuration + +``` +@Bean +public MultiLineTradeItemReader itemReader() { + MultiLineTradeItemReader itemReader = new MultiLineTradeItemReader(); + + itemReader.setDelegate(flatFileItemReader()); + + return itemReader; +} + +@Bean +public FlatFileItemReader flatFileItemReader() { + FlatFileItemReader reader = new FlatFileItemReaderBuilder<>() + .name("flatFileItemReader") + .resource(new ClassPathResource("data/iosample/input/multiLine.txt")) + .lineTokenizer(orderFileTokenizer()) + .fieldSetMapper(orderFieldSetMapper()) + .build(); + return reader; +} +``` + +To ensure that each line is tokenized properly, which is especially important for +fixed-length input, the `PatternMatchingCompositeLineTokenizer` can be used on the +delegate `FlatFileItemReader`. See[`FlatFileItemReader` in the Readers and +Writers chapter](readersAndWriters.html#flatFileItemReader) for more details. The delegate reader then uses a`PassThroughFieldSetMapper` to deliver a `FieldSet` for each line back to the wrapping`ItemReader`. + +The following example shows how to ensure that each line is properly tokenized in XML: + +XML Content + +``` + + + + + + + + + + +``` + +The following example shows how to ensure that each line is properly tokenized in Java: + +Java Content + +``` +@Bean +public PatternMatchingCompositeLineTokenizer orderFileTokenizer() { + PatternMatchingCompositeLineTokenizer tokenizer = + new PatternMatchingCompositeLineTokenizer(); + + Map tokenizers = new HashMap<>(4); + + tokenizers.put("HEA*", headerRecordTokenizer()); + tokenizers.put("FOT*", footerRecordTokenizer()); + tokenizers.put("NCU*", customerLineTokenizer()); + tokenizers.put("BAD*", billingAddressLineTokenizer()); + + tokenizer.setTokenizers(tokenizers); + + return tokenizer; +} +``` + +This wrapper has to be able to recognize the end of a record so that it can continually +call `read()` on its delegate until the end is reached. For each line that is read, the +wrapper should build up the item to be returned. Once the footer is reached, the item can +be returned for delivery to the `ItemProcessor` and `ItemWriter`, as shown in the +following example: + +``` +private FlatFileItemReader

delegate; + +public Trade read() throws Exception { + Trade t = null; + + for (FieldSet line = null; (line = this.delegate.read()) != null;) { + String prefix = line.readString(0); + if (prefix.equals("HEA")) { + t = new Trade(); // Record must start with header + } + else if (prefix.equals("NCU")) { + Assert.notNull(t, "No header was found."); + t.setLast(line.readString(1)); + t.setFirst(line.readString(2)); + ... + } + else if (prefix.equals("BAD")) { + Assert.notNull(t, "No header was found."); + t.setCity(line.readString(4)); + t.setState(line.readString(6)); + ... + } + else if (prefix.equals("FOT")) { + return t; // Record must end with footer + } + } + Assert.isNull(t, "No 'END' was found."); + return null; +} +``` + +### Executing System Commands + +Many batch jobs require that an external command be called from within the batch job. +Such a process could be kicked off separately by the scheduler, but the advantage of +common metadata about the run would be lost. Furthermore, a multi-step job would also +need to be split up into multiple jobs as well. + +Because the need is so common, Spring Batch provides a `Tasklet` implementation for +calling system commands. + +The following example shows how to call an external command in XML: + +XML Configuration + +``` + + + + + +``` + +The following example shows how to call an external command in Java: + +Java Configuration + +``` +@Bean +public SystemCommandTasklet tasklet() { + SystemCommandTasklet tasklet = new SystemCommandTasklet(); + + tasklet.setCommand("echo hello"); + tasklet.setTimeout(5000); + + return tasklet; +} +``` + +### Handling Step Completion When No Input is Found + +In many batch scenarios, finding no rows in a database or file to process is not +exceptional. The `Step` is simply considered to have found no work and completes with 0 +items read. All of the `ItemReader` implementations provided out of the box in Spring +Batch default to this approach. This can lead to some confusion if nothing is written out +even when input is present (which usually happens if a file was misnamed or some similar +issue arises). For this reason, the metadata itself should be inspected to determine how +much work the framework found to be processed. However, what if finding no input is +considered exceptional? In this case, programmatically checking the metadata for no items +processed and causing failure is the best solution. Because this is a common use case, +Spring Batch provides a listener with exactly this functionality, as shown in +the class definition for `NoWorkFoundStepExecutionListener`: + +``` +public class NoWorkFoundStepExecutionListener extends StepExecutionListenerSupport { + + public ExitStatus afterStep(StepExecution stepExecution) { + if (stepExecution.getReadCount() == 0) { + return ExitStatus.FAILED; + } + return null; + } + +} +``` + +The preceding `StepExecutionListener` inspects the `readCount` property of the`StepExecution` during the 'afterStep' phase to determine if no items were read. If that +is the case, an exit code `FAILED` is returned, indicating that the `Step` should fail. +Otherwise, `null` is returned, which does not affect the status of the `Step`. + +### Passing Data to Future Steps + +It is often useful to pass information from one step to another. This can be done through +the `ExecutionContext`. The catch is that there are two `ExecutionContexts`: one at the`Step` level and one at the `Job` level. The `Step` `ExecutionContext` remains only as +long as the step, while the `Job` `ExecutionContext` remains through the whole `Job`. On +the other hand, the `Step` `ExecutionContext` is updated every time the `Step` commits a +chunk, while the `Job` `ExecutionContext` is updated only at the end of each `Step`. + +The consequence of this separation is that all data must be placed in the `Step``ExecutionContext` while the `Step` is executing. Doing so ensures that the data is +stored properly while the `Step` runs. If data is stored to the `Job` `ExecutionContext`, +then it is not persisted during `Step` execution. If the `Step` fails, that data is lost. + +``` +public class SavingItemWriter implements ItemWriter { + private StepExecution stepExecution; + + public void write(List items) throws Exception { + // ... + + ExecutionContext stepContext = this.stepExecution.getExecutionContext(); + stepContext.put("someKey", someObject); + } + + @BeforeStep + public void saveStepExecution(StepExecution stepExecution) { + this.stepExecution = stepExecution; + } +} +``` + +To make the data available to future `Steps`, it must be “promoted” to the `Job``ExecutionContext` after the step has finished. Spring Batch provides the`ExecutionContextPromotionListener` for this purpose. The listener must be configured +with the keys related to the data in the `ExecutionContext` that must be promoted. It can +also, optionally, be configured with a list of exit code patterns for which the promotion +should occur (`COMPLETED` is the default). As with all listeners, it must be registered +on the `Step`. + +The following example shows how to promote a step to the `Job` `ExecutionContext` in XML: + +XML Configuration + +``` + + + + + + + + + + + + ... + + + + + + + someKey + + + +``` + +The following example shows how to promote a step to the `Job` `ExecutionContext` in Java: + +Java Configuration + +``` +@Bean +public Job job1() { + return this.jobBuilderFactory.get("job1") + .start(step1()) + .next(step1()) + .build(); +} + +@Bean +public Step step1() { + return this.stepBuilderFactory.get("step1") + .chunk(10) + .reader(reader()) + .writer(savingWriter()) + .listener(promotionListener()) + .build(); +} + +@Bean +public ExecutionContextPromotionListener promotionListener() { + ExecutionContextPromotionListener listener = new ExecutionContextPromotionListener(); + + listener.setKeys(new String[] {"someKey"}); + + return listener; +} +``` + +Finally, the saved values must be retrieved from the `Job` `ExecutionContext`, as shown +in the following example: + +``` +public class RetrievingItemWriter implements ItemWriter { + private Object someObject; + + public void write(List items) throws Exception { + // ... + } + + @BeforeStep + public void retrieveInterstepData(StepExecution stepExecution) { + JobExecution jobExecution = stepExecution.getJobExecution(); + ExecutionContext jobContext = jobExecution.getExecutionContext(); + this.someObject = jobContext.get("someKey"); + } +} +``` \ No newline at end of file diff --git a/docs/en/spring-batch/domain.md b/docs/en/spring-batch/domain.md new file mode 100644 index 0000000000000000000000000000000000000000..a424d19cf8a333c5ec076481396d71aa0e5b39d8 --- /dev/null +++ b/docs/en/spring-batch/domain.md @@ -0,0 +1,434 @@ +# The Domain Language of Batch + +## The Domain Language of Batch + +XMLJavaBoth + +To any experienced batch architect, the overall concepts of batch processing used in +Spring Batch should be familiar and comfortable. There are "Jobs" and "Steps" and +developer-supplied processing units called `ItemReader` and `ItemWriter`. However, +because of the Spring patterns, operations, templates, callbacks, and idioms, there are +opportunities for the following: + +* Significant improvement in adherence to a clear separation of concerns. + +* Clearly delineated architectural layers and services provided as interfaces. + +* Simple and default implementations that allow for quick adoption and ease of use + out-of-the-box. + +* Significantly enhanced extensibility. + +The following diagram is a simplified version of the batch reference architecture that +has been used for decades. It provides an overview of the components that make up the +domain language of batch processing. This architecture framework is a blueprint that has +been proven through decades of implementations on the last several generations of +platforms (COBOL/Mainframe, C/Unix, and now Java/anywhere). JCL and COBOL developers +are likely to be as comfortable with the concepts as C, C#, and Java developers. Spring +Batch provides a physical implementation of the layers, components, and technical +services commonly found in the robust, maintainable systems that are used to address the +creation of simple to complex batch applications, with the infrastructure and extensions +to address very complex processing needs. + +![Figure 2.1: Batch Stereotypes](https://docs.spring.io/spring-batch/docs/current/reference/html/images/spring-batch-reference-model.png) + +Figure 1. Batch Stereotypes + +The preceding diagram highlights the key concepts that make up the domain language of +Spring Batch. A Job has one to many steps, each of which has exactly one `ItemReader`, +one `ItemProcessor`, and one `ItemWriter`. A job needs to be launched (with`JobLauncher`), and metadata about the currently running process needs to be stored (in`JobRepository`). + +### Job + +This section describes stereotypes relating to the concept of a batch job. A `Job` is an +entity that encapsulates an entire batch process. As is common with other Spring +projects, a `Job` is wired together with either an XML configuration file or Java-based +configuration. This configuration may be referred to as the "job configuration". However,`Job` is just the top of an overall hierarchy, as shown in the following diagram: + +![Job Hierarchy](https://docs.spring.io/spring-batch/docs/current/reference/html/images/job-heirarchy.png) + +Figure 2. Job Hierarchy + +In Spring Batch, a `Job` is simply a container for `Step` instances. It combines multiple +steps that belong logically together in a flow and allows for configuration of properties +global to all steps, such as restartability. The job configuration contains: + +* The simple name of the job. + +* Definition and ordering of `Step` instances. + +* Whether or not the job is restartable. + +For those who use Java configuration, Spring Batch provides a default implementation of +the Job interface in the form of the `SimpleJob` class, which creates some standard +functionality on top of `Job`. When using java based configuration, a collection of +builders is made available for the instantiation of a `Job`, as shown in the following +example: + +``` +@Bean +public Job footballJob() { + return this.jobBuilderFactory.get("footballJob") + .start(playerLoad()) + .next(gameLoad()) + .next(playerSummarization()) + .build(); +} +``` + +For those who use XML configuration, Spring Batch provides a default implementation of the`Job` interface in the form of the `SimpleJob` class, which creates some standard +functionality on top of `Job`. However, the batch namespace abstracts away the need to +instantiate it directly. Instead, the `` element can be used, as shown in the +following example: + +``` + + + + + +``` + +#### JobInstance + +A `JobInstance` refers to the concept of a logical job run. Consider a batch job that +should be run once at the end of the day, such as the 'EndOfDay' `Job` from the preceding +diagram. There is one 'EndOfDay' job, but each individual run of the `Job` must be +tracked separately. In the case of this job, there is one logical `JobInstance` per day. +For example, there is a January 1st run, a January 2nd run, and so on. If the January 1st +run fails the first time and is run again the next day, it is still the January 1st run. +(Usually, this corresponds with the data it is processing as well, meaning the January +1st run processes data for January 1st). Therefore, each `JobInstance` can have multiple +executions (`JobExecution` is discussed in more detail later in this chapter), and only +one `JobInstance` corresponding to a particular `Job` and identifying `JobParameters` can +run at a given time. + +The definition of a `JobInstance` has absolutely no bearing on the data to be loaded. +It is entirely up to the `ItemReader` implementation to determine how data is loaded. For +example, in the EndOfDay scenario, there may be a column on the data that indicates the +'effective date' or 'schedule date' to which the data belongs. So, the January 1st run +would load only data from the 1st, and the January 2nd run would use only data from the +2nd. Because this determination is likely to be a business decision, it is left up to the`ItemReader` to decide. However, using the same `JobInstance` determines whether or not +the 'state' (that is, the `ExecutionContext`, which is discussed later in this chapter) +from previous executions is used. Using a new `JobInstance` means 'start from the +beginning', and using an existing instance generally means 'start from where you left +off'. + +#### JobParameters + +Having discussed `JobInstance` and how it differs from Job, the natural question to ask +is: "How is one `JobInstance` distinguished from another?" The answer is:`JobParameters`. A `JobParameters` object holds a set of parameters used to start a batch +job. They can be used for identification or even as reference data during the run, as +shown in the following image: + +![Job Parameters](https://docs.spring.io/spring-batch/docs/current/reference/html/images/job-stereotypes-parameters.png) + +Figure 3. Job Parameters + +In the preceding example, where there are two instances, one for January 1st, and another +for January 2nd, there is really only one `Job`, but it has two `JobParameter` objects: +one that was started with a job parameter of 01-01-2017 and another that was started with +a parameter of 01-02-2017. Thus, the contract can be defined as: `JobInstance` = `Job`+ identifying `JobParameters`. This allows a developer to effectively control how a`JobInstance` is defined, since they control what parameters are passed in. + +| |Not all job parameters are required to contribute to the identification of a`JobInstance`. By default, they do so. However, the framework also allows the submission
of a `Job` with parameters that do not contribute to the identity of a `JobInstance`.| +|---|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +#### JobExecution + +A `JobExecution` refers to the technical concept of a single attempt to run a Job. An +execution may end in failure or success, but the `JobInstance` corresponding to a given +execution is not considered to be complete unless the execution completes successfully. +Using the EndOfDay `Job` described previously as an example, consider a `JobInstance` for +01-01-2017 that failed the first time it was run. If it is run again with the same +identifying job parameters as the first run (01-01-2017), a new `JobExecution` is +created. However, there is still only one `JobInstance`. + +A `Job` defines what a job is and how it is to be executed, and a `JobInstance` is a +purely organizational object to group executions together, primarily to enable correct +restart semantics. A `JobExecution`, however, is the primary storage mechanism for what +actually happened during a run and contains many more properties that must be controlled +and persisted, as shown in the following table: + +| Property | Definition | +|-----------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| Status | A `BatchStatus` object that indicates the status of the execution. While running, it is`BatchStatus#STARTED`. If it fails, it is `BatchStatus#FAILED`. If it finishes
successfully, it is `BatchStatus#COMPLETED` | +| startTime | A `java.util.Date` representing the current system time when the execution was started.
This field is empty if the job has yet to start. | +| endTime | A `java.util.Date` representing the current system time when the execution finished,
regardless of whether or not it was successful. The field is empty if the job has yet to
finish. | +| exitStatus | The `ExitStatus`, indicating the result of the run. It is most important, because it
contains an exit code that is returned to the caller. See chapter 5 for more details. The
field is empty if the job has yet to finish. | +| createTime |A `java.util.Date` representing the current system time when the `JobExecution` was
first persisted. The job may not have been started yet (and thus has no start time), but
it always has a createTime, which is required by the framework for managing job level`ExecutionContexts`.| +| lastUpdated | A `java.util.Date` representing the last time a `JobExecution` was persisted. This field
is empty if the job has yet to start. | +|executionContext | The "property bag" containing any user data that needs to be persisted between
executions. | +|failureExceptions| The list of exceptions encountered during the execution of a `Job`. These can be useful
if more than one exception is encountered during the failure of a `Job`. | + +These properties are important because they are persisted and can be used to completely +determine the status of an execution. For example, if the EndOfDay job for 01-01 is +executed at 9:00 PM and fails at 9:30, the following entries are made in the batch +metadata tables: + +|JOB\_INST\_ID| JOB\_NAME | +|-------------|-----------| +| 1 |EndOfDayJob| + +|JOB\_EXECUTION\_ID|TYPE\_CD| KEY\_NAME |DATE\_VAL |IDENTIFYING| +|------------------|--------|-------------|----------|-----------| +| 1 | DATE |schedule.Date|2017-01-01| TRUE | + +|JOB\_EXEC\_ID|JOB\_INST\_ID| START\_TIME | END\_TIME |STATUS| +|-------------|-------------|----------------|----------------|------| +| 1 | 1 |2017-01-01 21:00|2017-01-01 21:30|FAILED| + +| |Column names may have been abbreviated or removed for the sake of clarity and
formatting.| +|---|---------------------------------------------------------------------------------------------| + +Now that the job has failed, assume that it took the entire night for the problem to be +determined, so that the 'batch window' is now closed. Further assuming that the window +starts at 9:00 PM, the job is kicked off again for 01-01, starting where it left off and +completing successfully at 9:30. Because it is now the next day, the 01-02 job must be +run as well, and it is kicked off just afterwards at 9:31 and completes in its normal one +hour time at 10:30. There is no requirement that one `JobInstance` be kicked off after +another, unless there is potential for the two jobs to attempt to access the same data, +causing issues with locking at the database level. It is entirely up to the scheduler to +determine when a `Job` should be run. Since they are separate `JobInstances`, Spring +Batch makes no attempt to stop them from being run concurrently. (Attempting to run the +same `JobInstance` while another is already running results in a`JobExecutionAlreadyRunningException` being thrown). There should now be an extra entry +in both the `JobInstance` and `JobParameters` tables and two extra entries in the`JobExecution` table, as shown in the following tables: + +|JOB\_INST\_ID| JOB\_NAME | +|-------------|-----------| +| 1 |EndOfDayJob| +| 2 |EndOfDayJob| + +|JOB\_EXECUTION\_ID|TYPE\_CD| KEY\_NAME | DATE\_VAL |IDENTIFYING| +|------------------|--------|-------------|-------------------|-----------| +| 1 | DATE |schedule.Date|2017-01-01 00:00:00| TRUE | +| 2 | DATE |schedule.Date|2017-01-01 00:00:00| TRUE | +| 3 | DATE |schedule.Date|2017-01-02 00:00:00| TRUE | + +|JOB\_EXEC\_ID|JOB\_INST\_ID| START\_TIME | END\_TIME | STATUS | +|-------------|-------------|----------------|----------------|---------| +| 1 | 1 |2017-01-01 21:00|2017-01-01 21:30| FAILED | +| 2 | 1 |2017-01-02 21:00|2017-01-02 21:30|COMPLETED| +| 3 | 2 |2017-01-02 21:31|2017-01-02 22:29|COMPLETED| + +| |Column names may have been abbreviated or removed for the sake of clarity and
formatting.| +|---|---------------------------------------------------------------------------------------------| + +### Step + +A `Step` is a domain object that encapsulates an independent, sequential phase of a batch +job. Therefore, every Job is composed entirely of one or more steps. A `Step` contains +all of the information necessary to define and control the actual batch processing. This +is a necessarily vague description because the contents of any given `Step` are at the +discretion of the developer writing a `Job`. A `Step` can be as simple or complex as the +developer desires. A simple `Step` might load data from a file into the database, +requiring little or no code (depending upon the implementations used). A more complex`Step` may have complicated business rules that are applied as part of the processing. As +with a `Job`, a `Step` has an individual `StepExecution` that correlates with a unique`JobExecution`, as shown in the following image: + +![Figure 2.1: Job Hierarchy With Steps](https://docs.spring.io/spring-batch/docs/current/reference/html/images/jobHeirarchyWithSteps.png) + +Figure 4. Job Hierarchy With Steps + +#### StepExecution + +A `StepExecution` represents a single attempt to execute a `Step`. A new `StepExecution`is created each time a `Step` is run, similar to `JobExecution`. However, if a step fails +to execute because the step before it fails, no execution is persisted for it. A`StepExecution` is created only when its `Step` is actually started. + +`Step` executions are represented by objects of the `StepExecution` class. Each execution +contains a reference to its corresponding step and `JobExecution` and transaction related +data, such as commit and rollback counts and start and end times. Additionally, each step +execution contains an `ExecutionContext`, which contains any data a developer needs to +have persisted across batch runs, such as statistics or state information needed to +restart. The following table lists the properties for `StepExecution`: + +| Property | Definition | +|----------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| Status |A `BatchStatus` object that indicates the status of the execution. While running, the
status is `BatchStatus.STARTED`. If it fails, the status is `BatchStatus.FAILED`. If it
finishes successfully, the status is `BatchStatus.COMPLETED`.| +| startTime | A `java.util.Date` representing the current system time when the execution was started.
This field is empty if the step has yet to start. | +| endTime | A `java.util.Date` representing the current system time when the execution finished,
regardless of whether or not it was successful. This field is empty if the step has yet to
exit. | +| exitStatus | The `ExitStatus` indicating the result of the execution. It is most important, because
it contains an exit code that is returned to the caller. See chapter 5 for more details.
This field is empty if the job has yet to exit. | +|executionContext| The "property bag" containing any user data that needs to be persisted between
executions. | +| readCount | The number of items that have been successfully read. | +| writeCount | The number of items that have been successfully written. | +| commitCount | The number of transactions that have been committed for this execution. | +| rollbackCount | The number of times the business transaction controlled by the `Step` has been rolled
back. | +| readSkipCount | The number of times `read` has failed, resulting in a skipped item. | +|processSkipCount| The number of times `process` has failed, resulting in a skipped item. | +| filterCount | The number of items that have been 'filtered' by the `ItemProcessor`. | +| writeSkipCount | The number of times `write` has failed, resulting in a skipped item. | + +### ExecutionContext + +An `ExecutionContext` represents a collection of key/value pairs that are persisted and +controlled by the framework in order to allow developers a place to store persistent +state that is scoped to a `StepExecution` object or a `JobExecution` object. For those +familiar with Quartz, it is very similar to JobDataMap. The best usage example is to +facilitate restart. Using flat file input as an example, while processing individual +lines, the framework periodically persists the `ExecutionContext` at commit points. Doing +so allows the `ItemReader` to store its state in case a fatal error occurs during the run +or even if the power goes out. All that is needed is to put the current number of lines +read into the context, as shown in the following example, and the framework will do the +rest: + +``` +executionContext.putLong(getKey(LINES_READ_COUNT), reader.getPosition()); +``` + +Using the EndOfDay example from the `Job` Stereotypes section as an example, assume there +is one step, 'loadData', that loads a file into the database. After the first failed run, +the metadata tables would look like the following example: + +|JOB\_INST\_ID| JOB\_NAME | +|-------------|-----------| +| 1 |EndOfDayJob| + +|JOB\_INST\_ID|TYPE\_CD| KEY\_NAME |DATE\_VAL | +|-------------|--------|-------------|----------| +| 1 | DATE |schedule.Date|2017-01-01| + +|JOB\_EXEC\_ID|JOB\_INST\_ID| START\_TIME | END\_TIME |STATUS| +|-------------|-------------|----------------|----------------|------| +| 1 | 1 |2017-01-01 21:00|2017-01-01 21:30|FAILED| + +|STEP\_EXEC\_ID|JOB\_EXEC\_ID|STEP\_NAME| START\_TIME | END\_TIME |STATUS| +|--------------|-------------|----------|----------------|----------------|------| +| 1 | 1 | loadData |2017-01-01 21:00|2017-01-01 21:30|FAILED| + +|STEP\_EXEC\_ID| SHORT\_CONTEXT | +|--------------|-------------------| +| 1 |{piece.count=40321}| + +In the preceding case, the `Step` ran for 30 minutes and processed 40,321 'pieces', which +would represent lines in a file in this scenario. This value is updated just before each +commit by the framework and can contain multiple rows corresponding to entries within the`ExecutionContext`. Being notified before a commit requires one of the various`StepListener` implementations (or an `ItemStream`), which are discussed in more detail +later in this guide. As with the previous example, it is assumed that the `Job` is +restarted the next day. When it is restarted, the values from the `ExecutionContext` of +the last run are reconstituted from the database. When the `ItemReader` is opened, it can +check to see if it has any stored state in the context and initialize itself from there, +as shown in the following example: + +``` +if (executionContext.containsKey(getKey(LINES_READ_COUNT))) { + log.debug("Initializing for restart. Restart data is: " + executionContext); + + long lineCount = executionContext.getLong(getKey(LINES_READ_COUNT)); + + LineReader reader = getReader(); + + Object record = ""; + while (reader.getPosition() < lineCount && record != null) { + record = readLine(); + } +} +``` + +In this case, after the above code runs, the current line is 40,322, allowing the `Step`to start again from where it left off. The `ExecutionContext` can also be used for +statistics that need to be persisted about the run itself. For example, if a flat file +contains orders for processing that exist across multiple lines, it may be necessary to +store how many orders have been processed (which is much different from the number of +lines read), so that an email can be sent at the end of the `Step` with the total number +of orders processed in the body. The framework handles storing this for the developer, in +order to correctly scope it with an individual `JobInstance`. It can be very difficult to +know whether an existing `ExecutionContext` should be used or not. For example, using the +'EndOfDay' example from above, when the 01-01 run starts again for the second time, the +framework recognizes that it is the same `JobInstance` and on an individual `Step` basis, +pulls the `ExecutionContext` out of the database, and hands it (as part of the`StepExecution`) to the `Step` itself. Conversely, for the 01-02 run, the framework +recognizes that it is a different instance, so an empty context must be handed to the`Step`. There are many of these types of determinations that the framework makes for the +developer, to ensure the state is given to them at the correct time. It is also important +to note that exactly one `ExecutionContext` exists per `StepExecution` at any given time. +Clients of the `ExecutionContext` should be careful, because this creates a shared +keyspace. As a result, care should be taken when putting values in to ensure no data is +overwritten. However, the `Step` stores absolutely no data in the context, so there is no +way to adversely affect the framework. + +It is also important to note that there is at least one `ExecutionContext` per`JobExecution` and one for every `StepExecution`. For example, consider the following +code snippet: + +``` +ExecutionContext ecStep = stepExecution.getExecutionContext(); +ExecutionContext ecJob = jobExecution.getExecutionContext(); +//ecStep does not equal ecJob +``` + +As noted in the comment, `ecStep` does not equal `ecJob`. They are two different`ExecutionContexts`. The one scoped to the `Step` is saved at every commit point in the`Step`, whereas the one scoped to the Job is saved in between every `Step` execution. + +### JobRepository + +`JobRepository` is the persistence mechanism for all of the Stereotypes mentioned above. +It provides CRUD operations for `JobLauncher`, `Job`, and `Step` implementations. When a`Job` is first launched, a `JobExecution` is obtained from the repository, and, during +the course of execution, `StepExecution` and `JobExecution` implementations are persisted +by passing them to the repository. + +The Spring Batch XML namespace provides support for configuring a `JobRepository` instance +with the `` tag, as shown in the following example: + +``` + +``` + +When using Java configuration, the `@EnableBatchProcessing` annotation provides a`JobRepository` as one of the components automatically configured out of the box. + +### JobLauncher + +`JobLauncher` represents a simple interface for launching a `Job` with a given set of`JobParameters`, as shown in the following example: + +``` +public interface JobLauncher { + +public JobExecution run(Job job, JobParameters jobParameters) + throws JobExecutionAlreadyRunningException, JobRestartException, + JobInstanceAlreadyCompleteException, JobParametersInvalidException; +} +``` + +It is expected that implementations obtain a valid `JobExecution` from the`JobRepository` and execute the `Job`. + +### Item Reader + +`ItemReader` is an abstraction that represents the retrieval of input for a `Step`, one +item at a time. When the `ItemReader` has exhausted the items it can provide, it +indicates this by returning `null`. More details about the `ItemReader` interface and its +various implementations can be found in[Readers And Writers](readersAndWriters.html#readersAndWriters). + +### Item Writer + +`ItemWriter` is an abstraction that represents the output of a `Step`, one batch or chunk +of items at a time. Generally, an `ItemWriter` has no knowledge of the input it should +receive next and knows only the item that was passed in its current invocation. More +details about the `ItemWriter` interface and its various implementations can be found in[Readers And Writers](readersAndWriters.html#readersAndWriters). + +### Item Processor + +`ItemProcessor` is an abstraction that represents the business processing of an item. +While the `ItemReader` reads one item, and the `ItemWriter` writes them, the`ItemProcessor` provides an access point to transform or apply other business processing. +If, while processing the item, it is determined that the item is not valid, returning`null` indicates that the item should not be written out. More details about the`ItemProcessor` interface can be found in[Readers And Writers](readersAndWriters.html#readersAndWriters). + +### Batch Namespace + +Many of the domain concepts listed previously need to be configured in a Spring`ApplicationContext`. While there are implementations of the interfaces above that can be +used in a standard bean definition, a namespace has been provided for ease of +configuration, as shown in the following example: + +``` + + + + + + + + + + + +``` + +As long as the batch namespace has been declared, any of its elements can be used. More +information on configuring a Job can be found in [Configuring and +Running a Job](job.html#configureJob). More information on configuring a `Step` can be found in[Configuring a Step](step.html#configureStep). \ No newline at end of file diff --git a/docs/en/spring-batch/glossary.md b/docs/en/spring-batch/glossary.md new file mode 100644 index 0000000000000000000000000000000000000000..6f43da1bc4055539a564f5309dd9fc1d8749f638 --- /dev/null +++ b/docs/en/spring-batch/glossary.md @@ -0,0 +1,122 @@ +# Glossary + +## Appendix A: Glossary + +### Spring Batch Glossary + +Batch + +An accumulation of business transactions over time. + +Batch Application Style + +Term used to designate batch as an application style in its own right, similar to +online, Web, or SOA. It has standard elements of input, validation, transformation of +information to business model, business processing, and output. In addition, it +requires monitoring at a macro level. + +Batch Processing + +The handling of a batch of many business transactions that have accumulated over a +period of time (such as an hour, a day, a week, a month, or a year). It is the +application of a process or set of processes to many data entities or objects in a +repetitive and predictable fashion with either no manual element or a separate manual +element for error processing. + +Batch Window + +The time frame within which a batch job must complete. This can be constrained by other +systems coming online, other dependent jobs needing to execute, or other factors +specific to the batch environment. + +Step + +The main batch task or unit of work. It initializes the business logic and controls the +transaction environment, based on commit interval setting and other factors. + +Tasklet + +A component created by an application developer to process the business logic for a +Step. + +Batch Job Type + +Job types describe application of jobs for particular types of processing. Common areas +are interface processing (typically flat files), forms processing (either for online +PDF generation or print formats), and report processing. + +Driving Query + +A driving query identifies the set of work for a job to do. The job then breaks that +work into individual units of work. For instance, a driving query might be to identify +all financial transactions that have a status of "pending transmission" and send them +to a partner system. The driving query returns a set of record IDs to process. Each +record ID then becomes a unit of work. A driving query may involve a join (if the +criteria for selection falls across two or more tables) or it may work with a single +table. + +Item + +An item represents the smallest amount of complete data for processing. In the simplest +terms, this might be a line in a file, a row in a database table, or a particular +element in an XML file. + +Logical Unit of Work (LUW) + +A batch job iterates through a driving query (or other input source, such as a file) to +perform the set of work that the job must accomplish. Each iteration of work performed +is a unit of work. + +Commit Interval + +A set of LUWs processed within a single transaction. + +Partitioning + +Splitting a job into multiple threads where each thread is responsible for a subset of +the overall data to be processed. The threads of execution may be within the same JVM +or they may span JVMs in a clustered environment that supports workload balancing. + +Staging Table + +A table that holds temporary data while it is being processed. + +Restartable + +A job that can be executed again and assumes the same identity as when run initially. +In other words, it is has the same job instance ID. + +Rerunnable + +A job that is restartable and manages its own state in terms of the previous run’s +record processing. An example of a rerunnable step is one based on a driving query. If +the driving query can be formed so that it limits the processed rows when the job is +restarted, then it is re-runnable. This is managed by the application logic. Often, a +condition is added to the `where` statement to limit the rows returned by the driving +query with logic resembling "and processedFlag!= true". + +Repeat + +One of the most basic units of batch processing, it defines by repeatability calling a +portion of code until it is finished and while there is no error. Typically, a batch +process would be repeatable as long as there is input. + +Retry + +Simplifies the execution of operations with retry semantics most frequently associated +with handling transactional output exceptions. Retry is slightly different from repeat, +rather than continually calling a block of code, retry is stateful and continually +calls the same block of code with the same input, until it either succeeds or some type +of retry limit has been exceeded. It is only generally useful when a subsequent +invocation of the operation might succeed because something in the environment has +improved. + +Recover + +Recover operations handle an exception in such a way that a repeat process is able to +continue. + +Skip + +Skip is a recovery strategy often used on file input sources as the strategy for +ignoring bad input records that failed validation. \ No newline at end of file diff --git a/docs/en/spring-batch/job.md b/docs/en/spring-batch/job.md new file mode 100644 index 0000000000000000000000000000000000000000..ebc75c5b814d7c61153d09f216a370c387888796 --- /dev/null +++ b/docs/en/spring-batch/job.md @@ -0,0 +1,1357 @@ +# Configuring and Running a Job + +## Configuring and Running a Job + +XMLJavaBoth + +In the [domain section](domain.html#domainLanguageOfBatch) , the overall +architecture design was discussed, using the following diagram as a +guide: + +![Figure 2.1: Batch Stereotypes](https://docs.spring.io/spring-batch/docs/current/reference/html/images/spring-batch-reference-model.png) + +Figure 1. Batch Stereotypes + +While the `Job` object may seem like a simple +container for steps, there are many configuration options of which a +developer must be aware. Furthermore, there are many considerations for +how a `Job` will be run and how its meta-data will be +stored during that run. This chapter will explain the various configuration +options and runtime concerns of a `Job`. + +### Configuring a Job + +There are multiple implementations of the [`Job`](#configureJob) interface. However, +builders abstract away the difference in configuration. + +``` +@Bean +public Job footballJob() { + return this.jobBuilderFactory.get("footballJob") + .start(playerLoad()) + .next(gameLoad()) + .next(playerSummarization()) + .build(); +} +``` + +A `Job` (and typically any `Step` within it) requires a `JobRepository`. The +configuration of the `JobRepository` is handled via the [`BatchConfigurer`](#javaConfig). + +The above example illustrates a `Job` that consists of three `Step` instances. The job related +builders can also contain other elements that help with parallelisation (`Split`), +declarative flow control (`Decision`) and externalization of flow definitions (`Flow`). + +Whether you use Java or XML, there are multiple implementations of the [`Job`](#configureJob)interface. However, the namespace abstracts away the differences in configuration. It has +only three required dependencies: a name, `JobRepository` , and a list of `Step` instances. + +``` + + + + + +``` + +The examples here use a parent bean definition to create the steps. +See the section on [step configuration](step.html#configureStep)for more options declaring specific step details inline. The XML namespace +defaults to referencing a repository with an id of 'jobRepository', which +is a sensible default. However, this can be overridden explicitly: + +``` + + + + + +``` + +In addition to steps a job configuration can contain other elements that help with +parallelization (``), declarative flow control (``) and externalization +of flow definitions (``). + +#### Restartability + +One key issue when executing a batch job concerns the behavior of a `Job` when it is +restarted. The launching of a `Job` is considered to be a 'restart' if a `JobExecution`already exists for the particular `JobInstance`. Ideally, all jobs should be able to start +up where they left off, but there are scenarios where this is not possible. *It is +entirely up to the developer to ensure that a new `JobInstance` is created in this +scenario.* However, Spring Batch does provide some help. If a `Job` should never be +restarted, but should always be run as part of a new `JobInstance`, then the +restartable property may be set to 'false'. + +The following example shows how to set the `restartable` field to `false` in XML: + +XML Configuration + +``` + + ... + +``` + +The following example shows how to set the `restartable` field to `false` in Java: + +Java Configuration + +``` +@Bean +public Job footballJob() { + return this.jobBuilderFactory.get("footballJob") + .preventRestart() + ... + .build(); +} +``` + +To phrase it another way, setting restartable to false means “this`Job` does not support being started again”. Restarting a `Job` that is not +restartable causes a `JobRestartException` to +be thrown. + +``` +Job job = new SimpleJob(); +job.setRestartable(false); + +JobParameters jobParameters = new JobParameters(); + +JobExecution firstExecution = jobRepository.createJobExecution(job, jobParameters); +jobRepository.saveOrUpdate(firstExecution); + +try { + jobRepository.createJobExecution(job, jobParameters); + fail(); +} +catch (JobRestartException e) { + // expected +} +``` + +This snippet of JUnit code shows how attempting to create a`JobExecution` the first time for a non restartable +job will cause no issues. However, the second +attempt will throw a `JobRestartException`. + +#### Intercepting Job Execution + +During the course of the execution of a +Job, it may be useful to be notified of various +events in its lifecycle so that custom code may be executed. The`SimpleJob` allows for this by calling a`JobListener` at the appropriate time: + +``` +public interface JobExecutionListener { + + void beforeJob(JobExecution jobExecution); + + void afterJob(JobExecution jobExecution); + +} +``` + +`JobListeners` can be added to a `SimpleJob` by setting listeners on the job. + +The following example shows how to add a listener element to an XML job definition: + +XML Configuration + +``` + + + + + + + + +``` + +The following example shows how to add a listener method to a Java job definition: + +Java Configuration + +``` +@Bean +public Job footballJob() { + return this.jobBuilderFactory.get("footballJob") + .listener(sampleListener()) + ... + .build(); +} +``` + +It should be noted that the `afterJob` method is called regardless of the success or +failure of the `Job`. If success or failure needs to be determined, it can be obtained +from the `JobExecution`, as follows: + +``` +public void afterJob(JobExecution jobExecution){ + if (jobExecution.getStatus() == BatchStatus.COMPLETED ) { + //job success + } + else if (jobExecution.getStatus() == BatchStatus.FAILED) { + //job failure + } +} +``` + +The annotations corresponding to this interface are: + +* `@BeforeJob` + +* `@AfterJob` + +#### Inheriting from a Parent Job + +If a group of Jobs share similar, but not +identical, configurations, then it may be helpful to define a "parent"`Job` from which the concrete +Jobs may inherit properties. Similar to class +inheritance in Java, the "child" `Job` will combine +its elements and attributes with the parent’s. + +In the following example, "baseJob" is an abstract`Job` definition that defines only a list of +listeners. The `Job` "job1" is a concrete +definition that inherits the list of listeners from "baseJob" and merges +it with its own list of listeners to produce a`Job` with two listeners and one`Step`, "step1". + +``` + + + + + + + + + + + + + +``` + +Please see the section on [Inheriting from a Parent Step](step.html#inheritingFromParentStep)for more detailed information. + +#### JobParametersValidator + +A job declared in the XML namespace or using any subclass of`AbstractJob` can optionally declare a validator for the job parameters at +runtime. This is useful when for instance you need to assert that a job +is started with all its mandatory parameters. There is a`DefaultJobParametersValidator` that can be used to constrain combinations +of simple mandatory and optional parameters, and for more complex +constraints you can implement the interface yourself. + +The configuration of a validator is supported through the XML namespace through a child +element of the job, as shown in the following example: + +``` + + + + +``` + +The validator can be specified as a reference (as shown earlier) or as a nested bean +definition in the beans namespace. + +The configuration of a validator is supported through the java builders, as shown in the +following example: + +``` +@Bean +public Job job1() { + return this.jobBuilderFactory.get("job1") + .validator(parametersValidator()) + ... + .build(); +} +``` + +### Java Config + +Spring 3 brought the ability to configure applications via java instead of XML. As of +Spring Batch 2.2.0, batch jobs can be configured using the same java config. +There are two components for the java based configuration: the `@EnableBatchProcessing`annotation and two builders. + +The `@EnableBatchProcessing` works similarly to the other @Enable\* annotations in the +Spring family. In this case, `@EnableBatchProcessing` provides a base configuration for +building batch jobs. Within this base configuration, an instance of `StepScope` is +created in addition to a number of beans made available to be autowired: + +* `JobRepository`: bean name "jobRepository" + +* `JobLauncher`: bean name "jobLauncher" + +* `JobRegistry`: bean name "jobRegistry" + +* `PlatformTransactionManager`: bean name "transactionManager" + +* `JobBuilderFactory`: bean name "jobBuilders" + +* `StepBuilderFactory`: bean name "stepBuilders" + +The core interface for this configuration is the `BatchConfigurer`. The default +implementation provides the beans mentioned above and requires a `DataSource` as a bean +within the context to be provided. This data source is used by the JobRepository. +You can customize any of these beans +by creating a custom implementation of the `BatchConfigurer` interface. +Typically, extending the `DefaultBatchConfigurer` (which is provided if a`BatchConfigurer` is not found) and overriding the required getter is sufficient. +However, implementing your own from scratch may be required. The following +example shows how to provide a custom transaction manager: + +``` +@Bean +public BatchConfigurer batchConfigurer(DataSource dataSource) { + return new DefaultBatchConfigurer(dataSource) { + @Override + public PlatformTransactionManager getTransactionManager() { + return new MyTransactionManager(); + } + }; +} +``` + +| |Only one configuration class needs to have the `@EnableBatchProcessing` annotation. Once
you have a class annotated with it, you will have all of the above available.| +|---|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +With the base configuration in place, a user can use the provided builder factories to +configure a job. The following example shows a two step job configured with the`JobBuilderFactory` and the `StepBuilderFactory`: + +``` +@Configuration +@EnableBatchProcessing +@Import(DataSourceConfiguration.class) +public class AppConfig { + + @Autowired + private JobBuilderFactory jobs; + + @Autowired + private StepBuilderFactory steps; + + @Bean + public Job job(@Qualifier("step1") Step step1, @Qualifier("step2") Step step2) { + return jobs.get("myJob").start(step1).next(step2).build(); + } + + @Bean + protected Step step1(ItemReader reader, + ItemProcessor processor, + ItemWriter writer) { + return steps.get("step1") + . chunk(10) + .reader(reader) + .processor(processor) + .writer(writer) + .build(); + } + + @Bean + protected Step step2(Tasklet tasklet) { + return steps.get("step2") + .tasklet(tasklet) + .build(); + } +} +``` + +### Configuring a JobRepository + +When using `@EnableBatchProcessing`, a `JobRepository` is provided out of the box for you. +This section addresses configuring your own. + +As described in earlier, the [`JobRepository`](#configureJob) is used for basic CRUD operations of the various persisted +domain objects within Spring Batch, such as`JobExecution` and`StepExecution`. It is required by many of the major +framework features, such as the `JobLauncher`,`Job`, and `Step`. + +The batch namespace abstracts away many of the implementation details of the`JobRepository` implementations and their collaborators. However, there are still a few +configuration options available, as shown in the following example: + +XML Configuration + +``` + +``` + +None of the configuration options listed above are required except the `id`. If they are +not set, the defaults shown above will be used. They are shown above for awareness +purposes. The `max-varchar-length` defaults to 2500, which is the length of the long`VARCHAR` columns in the [sample schema +scripts](schema-appendix.html#metaDataSchemaOverview). + +When using java configuration, a `JobRepository` is provided for you. A JDBC based one is +provided out of the box if a `DataSource` is provided, the `Map` based one if not. However, +you can customize the configuration of the `JobRepository` through an implementation of the`BatchConfigurer` interface. + +Java Configuration + +``` +... +// This would reside in your BatchConfigurer implementation +@Override +protected JobRepository createJobRepository() throws Exception { + JobRepositoryFactoryBean factory = new JobRepositoryFactoryBean(); + factory.setDataSource(dataSource); + factory.setTransactionManager(transactionManager); + factory.setIsolationLevelForCreate("ISOLATION_SERIALIZABLE"); + factory.setTablePrefix("BATCH_"); + factory.setMaxVarCharLength(1000); + return factory.getObject(); +} +... +``` + +None of the configuration options listed above are required except +the dataSource and transactionManager. If they are not set, the defaults shown above +will be used. They are shown above for awareness purposes. The +max varchar length defaults to 2500, which is the +length of the long `VARCHAR` columns in the[sample schema scripts](schema-appendix.html#metaDataSchemaOverview) + +#### Transaction Configuration for the JobRepository + +If the namespace or the provided `FactoryBean` is used, transactional advice is +automatically created around the repository. This is to ensure that the batch meta-data, +including state that is necessary for restarts after a failure, is persisted correctly. +The behavior of the framework is not well defined if the repository methods are not +transactional. The isolation level in the `create*` method attributes is specified +separately to ensure that, when jobs are launched, if two processes try to launch +the same job at the same time, only one succeeds. The default isolation level for that +method is `SERIALIZABLE`, which is quite aggressive. `READ_COMMITTED` would work just as +well. `READ_UNCOMMITTED` would be fine if two processes are not likely to collide in this +way. However, since a call to the `create*` method is quite short, it is unlikely that`SERIALIZED` causes problems, as long as the database platform supports it. However, this +can be overridden. + +The following example shows how to override the isolation level in XML: + +XML Configuration + +``` + +``` + +The following example shows how to override the isolation level in Java: + +Java Configuration + +``` +// This would reside in your BatchConfigurer implementation +@Override +protected JobRepository createJobRepository() throws Exception { + JobRepositoryFactoryBean factory = new JobRepositoryFactoryBean(); + factory.setDataSource(dataSource); + factory.setTransactionManager(transactionManager); + factory.setIsolationLevelForCreate("ISOLATION_REPEATABLE_READ"); + return factory.getObject(); +} +``` + +If the namespace or factory beans are not used, then it is also essential to configure the +transactional behavior of the repository using AOP. + +The following example shows how to configure the transactional behavior of the repository +in XML: + +XML Configuration + +``` + + + + + + + + + + +``` + +The preceding fragment can be used nearly as is, with almost no changes. Remember also to +include the appropriate namespace declarations and to make sure spring-tx and spring-aop +(or the whole of Spring) are on the classpath. + +The following example shows how to configure the transactional behavior of the repository +in Java: + +Java Configuration + +``` +@Bean +public TransactionProxyFactoryBean baseProxy() { + TransactionProxyFactoryBean transactionProxyFactoryBean = new TransactionProxyFactoryBean(); + Properties transactionAttributes = new Properties(); + transactionAttributes.setProperty("*", "PROPAGATION_REQUIRED"); + transactionProxyFactoryBean.setTransactionAttributes(transactionAttributes); + transactionProxyFactoryBean.setTarget(jobRepository()); + transactionProxyFactoryBean.setTransactionManager(transactionManager()); + return transactionProxyFactoryBean; +} +``` + +#### Changing the Table Prefix + +Another modifiable property of the `JobRepository` is the table prefix of the meta-data +tables. By default they are all prefaced with `BATCH_`. `BATCH_JOB_EXECUTION` and`BATCH_STEP_EXECUTION` are two examples. However, there are potential reasons to modify this +prefix. If the schema names needs to be prepended to the table names, or if more than one +set of meta data tables is needed within the same schema, then the table prefix needs to +be changed: + +The following example shows how to change the table prefix in XML: + +XML Configuration + +``` + +``` + +The following example shows how to change the table prefix in Java: + +Java Configuration + +``` +// This would reside in your BatchConfigurer implementation +@Override +protected JobRepository createJobRepository() throws Exception { + JobRepositoryFactoryBean factory = new JobRepositoryFactoryBean(); + factory.setDataSource(dataSource); + factory.setTransactionManager(transactionManager); + factory.setTablePrefix("SYSTEM.TEST_"); + return factory.getObject(); +} +``` + +Given the preceding changes, every query to the meta-data tables is prefixed with`SYSTEM.TEST_`. `BATCH_JOB_EXECUTION` is referred to as SYSTEM.`TEST_JOB_EXECUTION`. + +| |Only the table prefix is configurable. The table and column names are not.| +|---|--------------------------------------------------------------------------| + +#### In-Memory Repository + +There are scenarios in which you may not want to persist your domain objects to the +database. One reason may be speed; storing domain objects at each commit point takes extra +time. Another reason may be that you just don’t need to persist status for a particular +job. For this reason, Spring batch provides an in-memory `Map` version of the job +repository. + +The following example shows the inclusion of `MapJobRepositoryFactoryBean` in XML: + +XML Configuration + +``` + + + +``` + +The following example shows the inclusion of `MapJobRepositoryFactoryBean` in Java: + +Java Configuration + +``` +// This would reside in your BatchConfigurer implementation +@Override +protected JobRepository createJobRepository() throws Exception { + MapJobRepositoryFactoryBean factory = new MapJobRepositoryFactoryBean(); + factory.setTransactionManager(transactionManager); + return factory.getObject(); +} +``` + +Note that the in-memory repository is volatile and so does not allow restart between JVM +instances. It also cannot guarantee that two job instances with the same parameters are +launched simultaneously, and is not suitable for use in a multi-threaded Job, or a locally +partitioned `Step`. So use the database version of the repository wherever you need those +features. + +However it does require a transaction manager to be defined because there are rollback +semantics within the repository, and because the business logic might still be +transactional (such as RDBMS access). For testing purposes many people find the`ResourcelessTransactionManager` useful. + +| |The `MapJobRepositoryFactoryBean` and related classes have been deprecated in v4 and are scheduled
for removal in v5. If you want to use an in-memory job repository, you can use an embedded database
like H2, Apache Derby or HSQLDB. There are several ways to create an embedded database and use it in
your Spring Batch application. One way to do that is by using the APIs from [Spring JDBC](https://docs.spring.io/spring-framework/docs/current/reference/html/data-access.html#jdbc-embedded-database-support):

```
@Bean
public DataSource dataSource() {
return new EmbeddedDatabaseBuilder()
.setType(EmbeddedDatabaseType.H2)
.addScript("/org/springframework/batch/core/schema-drop-h2.sql")
.addScript("/org/springframework/batch/core/schema-h2.sql")
.build();
}
```

Once you have defined your embedded datasource as a bean in your application context, it should be picked
up automatically if you use `@EnableBatchProcessing`. Otherwise you can configure it manually using the
JDBC based `JobRepositoryFactoryBean` as shown in the [Configuring a JobRepository section](#configuringJobRepository).| +|---|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +#### Non-standard Database Types in a Repository + +If you are using a database platform that is not in the list of supported platforms, you +may be able to use one of the supported types, if the SQL variant is close enough. To do +this, you can use the raw `JobRepositoryFactoryBean` instead of the namespace shortcut and +use it to set the database type to the closest match. + +The following example shows how to use `JobRepositoryFactoryBean` to set the database type +to the closest match in XML: + +XML Configuration + +``` + + + + +``` + +The following example shows how to use `JobRepositoryFactoryBean` to set the database type +to the closest match in Java: + +Java Configuration + +``` +// This would reside in your BatchConfigurer implementation +@Override +protected JobRepository createJobRepository() throws Exception { + JobRepositoryFactoryBean factory = new JobRepositoryFactoryBean(); + factory.setDataSource(dataSource); + factory.setDatabaseType("db2"); + factory.setTransactionManager(transactionManager); + return factory.getObject(); +} +``` + +(The `JobRepositoryFactoryBean` tries to +auto-detect the database type from the `DataSource`if it is not specified.) The major differences between platforms are +mainly accounted for by the strategy for incrementing primary keys, so +often it might be necessary to override the`incrementerFactory` as well (using one of the standard +implementations from the Spring Framework). + +If even that doesn’t work, or you are not using an RDBMS, then the +only option may be to implement the various `Dao`interfaces that the `SimpleJobRepository` depends +on and wire one up manually in the normal Spring way. + +### Configuring a JobLauncher + +When using `@EnableBatchProcessing`, a `JobRegistry` is provided out of the box for you. +This section addresses configuring your own. + +The most basic implementation of the `JobLauncher` interface is the `SimpleJobLauncher`. +Its only required dependency is a `JobRepository`, in order to obtain an execution. + +The following example shows a `SimpleJobLauncher` in XML: + +XML Configuration + +``` + + + +``` + +The following example shows a `SimpleJobLauncher` in Java: + +Java Configuration + +``` +... +// This would reside in your BatchConfigurer implementation +@Override +protected JobLauncher createJobLauncher() throws Exception { + SimpleJobLauncher jobLauncher = new SimpleJobLauncher(); + jobLauncher.setJobRepository(jobRepository); + jobLauncher.afterPropertiesSet(); + return jobLauncher; +} +... +``` + +Once a [JobExecution](domain.html#domainLanguageOfBatch) is obtained, it is passed to the +execute method of `Job`, ultimately returning the `JobExecution` to the caller, as shown +in the following image: + +![Job Launcher Sequence](https://docs.spring.io/spring-batch/docs/current/reference/html/images/job-launcher-sequence-sync.png) + +Figure 2. Job Launcher Sequence + +The sequence is straightforward and works well when launched from a scheduler. However, +issues arise when trying to launch from an HTTP request. In this scenario, the launching +needs to be done asynchronously so that the `SimpleJobLauncher` returns immediately to its +caller. This is because it is not good practice to keep an HTTP request open for the +amount of time needed by long running processes such as batch. The following image shows +an example sequence: + +![Async Job Launcher Sequence](https://docs.spring.io/spring-batch/docs/current/reference/html/images/job-launcher-sequence-async.png) + +Figure 3. Asynchronous Job Launcher Sequence + +The `SimpleJobLauncher` can be configured to allow for this scenario by configuring a`TaskExecutor`. + +The following XML example shows a `SimpleJobLauncher` configured to return immediately: + +XML Configuration + +``` + + + + + + +``` + +The following Java example shows a `SimpleJobLauncher` configured to return immediately: + +Java Configuration + +``` +@Bean +public JobLauncher jobLauncher() { + SimpleJobLauncher jobLauncher = new SimpleJobLauncher(); + jobLauncher.setJobRepository(jobRepository()); + jobLauncher.setTaskExecutor(new SimpleAsyncTaskExecutor()); + jobLauncher.afterPropertiesSet(); + return jobLauncher; +} +``` + +Any implementation of the spring `TaskExecutor`interface can be used to control how jobs are asynchronously +executed. + +### Running a Job + +At a minimum, launching a batch job requires two things: the`Job` to be launched and a`JobLauncher`. Both can be contained within the same +context or different contexts. For example, if launching a job from the +command line, a new JVM will be instantiated for each Job, and thus every +job will have its own `JobLauncher`. However, if +running from within a web container within the scope of an`HttpRequest`, there will usually be one`JobLauncher`, configured for asynchronous job +launching, that multiple requests will invoke to launch their jobs. + +#### Running Jobs from the Command Line + +For users that want to run their jobs from an enterprise +scheduler, the command line is the primary interface. This is because +most schedulers (with the exception of Quartz unless using the +NativeJob) work directly with operating system +processes, primarily kicked off with shell scripts. There are many ways +to launch a Java process besides a shell script, such as Perl, Ruby, or +even 'build tools' such as ant or maven. However, because most people +are familiar with shell scripts, this example will focus on them. + +##### The CommandLineJobRunner + +Because the script launching the job must kick off a Java +Virtual Machine, there needs to be a class with a main method to act +as the primary entry point. Spring Batch provides an implementation +that serves just this purpose:`CommandLineJobRunner`. It’s important to note +that this is just one way to bootstrap your application, but there are +many ways to launch a Java process, and this class should in no way be +viewed as definitive. The `CommandLineJobRunner`performs four tasks: + +* Load the appropriate`ApplicationContext` + +* Parse command line arguments into`JobParameters` + +* Locate the appropriate job based on arguments + +* Use the `JobLauncher` provided in the + application context to launch the job. + +All of these tasks are accomplished using only the arguments +passed in. The following are required arguments: + +|jobPath|The location of the XML file that will be used to
create an `ApplicationContext`. This file
should contain everything needed to run the complete
Job| +|-------|----------------------------------------------------------------------------------------------------------------------------------------------------------------| +|jobName| The name of the job to be run. | + +These arguments must be passed in with the path first and the name second. All arguments +after these are considered to be job parameters, are turned into a JobParameters object, +and must be in the format of 'name=value'. + +The following example shows a date passed as a job parameter to a job defied in XML: + +``` +key/value pairs to identifying job parameters. However, it is possible to explicitly specify
which job parameters are identifying and which are not by prefixing them with `+` or `-` respectively.

In the following example, `schedule.date` is an identifying job parameter while `vendor.id` is not:

```
+schedule.date(date)=2007/05/05 -vendor.id=123
```

```
+schedule.date(date)=2007/05/05 -vendor.id=123
```

This behaviour can be overridden by using a custom `JobParametersConverter`.| +|---|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +In most cases, you would want to use a manifest to declare your main class in a jar, but, +for simplicity, the class was used directly. This example is using the same 'EndOfDay' +example from the [domainLanguageOfBatch](domain.html#domainLanguageOfBatch). The first +argument is 'endOfDayJob.xml', which is the Spring ApplicationContext containing the`Job`. The second argument, 'endOfDay' represents the job name. The final argument, +'schedule.date(date)=2007/05/05', is converted into a JobParameters object. + +The following example shows a sample configuration for `endOfDay` in XML: + +``` + + + + + + +``` + +In most cases you would want to use a manifest to declare your main class in a jar, but, +for simplicity, the class was used directly. This example is using the same 'EndOfDay' +example from the [domainLanguageOfBatch](domain.html#domainLanguageOfBatch). The first +argument is 'io.spring.EndOfDayJobConfiguration', which is the fully qualified class name +to the configuration class containing the Job. The second argument, 'endOfDay' represents +the job name. The final argument, 'schedule.date(date)=2007/05/05' is converted into a`JobParameters` object. An example of the java configuration follows: + +The following example shows a sample configuration for `endOfDay` in Java: + +``` +@Configuration +@EnableBatchProcessing +public class EndOfDayJobConfiguration { + + @Autowired + private JobBuilderFactory jobBuilderFactory; + + @Autowired + private StepBuilderFactory stepBuilderFactory; + + @Bean + public Job endOfDay() { + return this.jobBuilderFactory.get("endOfDay") + .start(step1()) + .build(); + } + + @Bean + public Step step1() { + return this.stepBuilderFactory.get("step1") + .tasklet((contribution, chunkContext) -> null) + .build(); + } +} +``` + +The preceding example is overly simplistic, since there are many more requirements to a +run a batch job in Spring Batch in general, but it serves to show the two main +requirements of the `CommandLineJobRunner`: `Job` and `JobLauncher`. + +##### ExitCodes + +When launching a batch job from the command-line, an enterprise +scheduler is often used. Most schedulers are fairly dumb and work only +at the process level. This means that they only know about some +operating system process such as a shell script that they’re invoking. +In this scenario, the only way to communicate back to the scheduler +about the success or failure of a job is through return codes. A +return code is a number that is returned to a scheduler by the process +that indicates the result of the run. In the simplest case: 0 is +success and 1 is failure. However, there may be more complex +scenarios: If job A returns 4 kick off job B, and if it returns 5 kick +off job C. This type of behavior is configured at the scheduler level, +but it is important that a processing framework such as Spring Batch +provide a way to return a numeric representation of the 'Exit Code' +for a particular batch job. In Spring Batch this is encapsulated +within an `ExitStatus`, which is covered in more +detail in Chapter 5. For the purposes of discussing exit codes, the +only important thing to know is that an`ExitStatus` has an exit code property that is +set by the framework (or the developer) and is returned as part of the`JobExecution` returned from the`JobLauncher`. The`CommandLineJobRunner` converts this string value +to a number using the `ExitCodeMapper`interface: + +``` +public interface ExitCodeMapper { + + public int intValue(String exitCode); + +} +``` + +The essential contract of an`ExitCodeMapper` is that, given a string exit +code, a number representation will be returned. The default implementation used by the job runner is the `SimpleJvmExitCodeMapper`that returns 0 for completion, 1 for generic errors, and 2 for any job +runner errors such as not being able to find a`Job` in the provided context. If anything more +complex than the 3 values above is needed, then a custom +implementation of the `ExitCodeMapper` interface +must be supplied. Because the`CommandLineJobRunner` is the class that creates +an `ApplicationContext`, and thus cannot be +'wired together', any values that need to be overwritten must be +autowired. This means that if an implementation of`ExitCodeMapper` is found within the `BeanFactory`, +it will be injected into the runner after the context is created. All +that needs to be done to provide your own`ExitCodeMapper` is to declare the implementation +as a root level bean and ensure that it is part of the`ApplicationContext` that is loaded by the +runner. + +#### Running Jobs from within a Web Container + +Historically, offline processing such as batch jobs have been +launched from the command-line, as described above. However, there are +many cases where launching from an `HttpRequest` is +a better option. Many such use cases include reporting, ad-hoc job +running, and web application support. Because a batch job by definition +is long running, the most important concern is ensuring to launch the +job asynchronously: + +![Async Job Launcher Sequence from web container](https://docs.spring.io/spring-batch/docs/current/reference/html/images/launch-from-request.png) + +Figure 4. Asynchronous Job Launcher Sequence From Web Container + +The controller in this case is a Spring MVC controller. More +information on Spring MVC can be found here: . +The controller launches a `Job` using a`JobLauncher` that has been configured to launch[asynchronously](#runningJobsFromWebContainer), which +immediately returns a `JobExecution`. The`Job` will likely still be running, however, this +nonblocking behaviour allows the controller to return immediately, which +is required when handling an `HttpRequest`. An +example is below: + +``` +@Controller +public class JobLauncherController { + + @Autowired + JobLauncher jobLauncher; + + @Autowired + Job job; + + @RequestMapping("/jobLauncher.html") + public void handle() throws Exception{ + jobLauncher.run(job, new JobParameters()); + } +} +``` + +### Advanced Meta-Data Usage + +So far, both the `JobLauncher` and `JobRepository` interfaces have been +discussed. Together, they represent simple launching of a job, and basic +CRUD operations of batch domain objects: + +![Job Repository](https://docs.spring.io/spring-batch/docs/current/reference/html/images/job-repository.png) + +Figure 5. Job Repository + +A `JobLauncher` uses the`JobRepository` to create new`JobExecution` objects and run them.`Job` and `Step` implementations +later use the same `JobRepository` for basic updates +of the same executions during the running of a Job. +The basic operations suffice for simple scenarios, but in a large batch +environment with hundreds of batch jobs and complex scheduling +requirements, more advanced access of the meta data is required: + +![Job Repository Advanced](https://docs.spring.io/spring-batch/docs/current/reference/html/images/job-repository-advanced.png) + +Figure 6. Advanced Job Repository Access + +The `JobExplorer` and`JobOperator` interfaces, which will be discussed +below, add additional functionality for querying and controlling the meta +data. + +#### Querying the Repository + +The most basic need before any advanced features is the ability to +query the repository for existing executions. This functionality is +provided by the `JobExplorer` interface: + +``` +public interface JobExplorer { + + List getJobInstances(String jobName, int start, int count); + + JobExecution getJobExecution(Long executionId); + + StepExecution getStepExecution(Long jobExecutionId, Long stepExecutionId); + + JobInstance getJobInstance(Long instanceId); + + List getJobExecutions(JobInstance jobInstance); + + Set findRunningJobExecutions(String jobName); +} +``` + +As is evident from the method signatures above, `JobExplorer` is a read-only version of +the `JobRepository`, and, like the `JobRepository`, it can be easily configured by using a +factory bean: + +The following example shows how to configure a `JobExplorer` in XML: + +XML Configuration + +``` + +``` + +The following example shows how to configure a `JobExplorer` in Java: + +Java Configuration + +``` +... +// This would reside in your BatchConfigurer implementation +@Override +public JobExplorer getJobExplorer() throws Exception { + JobExplorerFactoryBean factoryBean = new JobExplorerFactoryBean(); + factoryBean.setDataSource(this.dataSource); + return factoryBean.getObject(); +} +... +``` + +[Earlier in this chapter](#repositoryTablePrefix), we noted that the table prefix +of the `JobRepository` can be modified to allow for different versions or schemas. Because +the `JobExplorer` works with the same tables, it too needs the ability to set a prefix. + +The following example shows how to set the table prefix for a `JobExplorer` in XML: + +XML Configuration + +``` + +``` + +The following example shows how to set the table prefix for a `JobExplorer` in Java: + +Java Configuration + +``` +... +// This would reside in your BatchConfigurer implementation +@Override +public JobExplorer getJobExplorer() throws Exception { + JobExplorerFactoryBean factoryBean = new JobExplorerFactoryBean(); + factoryBean.setDataSource(this.dataSource); + factoryBean.setTablePrefix("SYSTEM."); + return factoryBean.getObject(); +} +... +``` + +#### JobRegistry + +A `JobRegistry` (and its parent interface `JobLocator`) is not mandatory, but it can be +useful if you want to keep track of which jobs are available in the context. It is also +useful for collecting jobs centrally in an application context when they have been created +elsewhere (for example, in child contexts). Custom `JobRegistry` implementations can also +be used to manipulate the names and other properties of the jobs that are registered. +There is only one implementation provided by the framework and this is based on a simple +map from job name to job instance. + +The following example shows how to include a `JobRegistry` for a job defined in XML: + +``` + +``` + +The following example shows how to include a `JobRegistry` for a job defined in Java: + +When using `@EnableBatchProcessing`, a `JobRegistry` is provided out of the box for you. +If you want to configure your own: + +``` +... +// This is already provided via the @EnableBatchProcessing but can be customized via +// overriding the getter in the SimpleBatchConfiguration +@Override +@Bean +public JobRegistry jobRegistry() throws Exception { + return new MapJobRegistry(); +} +... +``` + +There are two ways to populate a `JobRegistry` automatically: using +a bean post processor and using a registrar lifecycle component. These +two mechanisms are described in the following sections. + +##### JobRegistryBeanPostProcessor + +This is a bean post-processor that can register all jobs as they are created. + +The following example shows how to include the `JobRegistryBeanPostProcessor` for a job +defined in XML: + +XML Configuration + +``` + + + +``` + +The following example shows how to include the `JobRegistryBeanPostProcessor` for a job +defined in Java: + +Java Configuration + +``` +@Bean +public JobRegistryBeanPostProcessor jobRegistryBeanPostProcessor() { + JobRegistryBeanPostProcessor postProcessor = new JobRegistryBeanPostProcessor(); + postProcessor.setJobRegistry(jobRegistry()); + return postProcessor; +} +``` + +Although it is not strictly necessary, the post-processor in the +example has been given an id so that it can be included in child +contexts (e.g. as a parent bean definition) and cause all jobs created +there to also be registered automatically. + +##### `AutomaticJobRegistrar` + +This is a lifecycle component that creates child contexts and registers jobs from those +contexts as they are created. One advantage of doing this is that, while the job names in +the child contexts still have to be globally unique in the registry, their dependencies +can have "natural" names. So for example, you can create a set of XML configuration files +each having only one Job, but all having different definitions of an `ItemReader` with the +same bean name, such as "reader". If all those files were imported into the same context, +the reader definitions would clash and override one another, but with the automatic +registrar this is avoided. This makes it easier to integrate jobs contributed from +separate modules of an application. + +The following example shows how to include the `AutomaticJobRegistrar` for a job defined +in XML: + +XML Configuration + +``` + + + + + + + + + + + + +``` + +The following example shows how to include the `AutomaticJobRegistrar` for a job defined +in Java: + +Java Configuration + +``` +@Bean +public AutomaticJobRegistrar registrar() { + + AutomaticJobRegistrar registrar = new AutomaticJobRegistrar(); + registrar.setJobLoader(jobLoader()); + registrar.setApplicationContextFactories(applicationContextFactories()); + registrar.afterPropertiesSet(); + return registrar; + +} +``` + +The registrar has two mandatory properties, one is an array of`ApplicationContextFactory` (here created from a +convenient factory bean), and the other is a`JobLoader`. The `JobLoader`is responsible for managing the lifecycle of the child contexts and +registering jobs in the `JobRegistry`. + +The `ApplicationContextFactory` is +responsible for creating the child context and the most common usage +would be as above using a`ClassPathXmlApplicationContextFactory`. One of +the features of this factory is that by default it copies some of the +configuration down from the parent context to the child. So for +instance you don’t have to re-define the`PropertyPlaceholderConfigurer` or AOP +configuration in the child, if it should be the same as the +parent. + +The `AutomaticJobRegistrar` can be used in +conjunction with a `JobRegistryBeanPostProcessor`if desired (as long as the `DefaultJobLoader` is +used as well). For instance this might be desirable if there are jobs +defined in the main parent context as well as in the child +locations. + +#### JobOperator + +As previously discussed, the `JobRepository`provides CRUD operations on the meta-data, and the`JobExplorer` provides read-only operations on the +meta-data. However, those operations are most useful when used together +to perform common monitoring tasks such as stopping, restarting, or +summarizing a Job, as is commonly done by batch operators. Spring Batch +provides these types of operations via the`JobOperator` interface: + +``` +public interface JobOperator { + + List getExecutions(long instanceId) throws NoSuchJobInstanceException; + + List getJobInstances(String jobName, int start, int count) + throws NoSuchJobException; + + Set getRunningExecutions(String jobName) throws NoSuchJobException; + + String getParameters(long executionId) throws NoSuchJobExecutionException; + + Long start(String jobName, String parameters) + throws NoSuchJobException, JobInstanceAlreadyExistsException; + + Long restart(long executionId) + throws JobInstanceAlreadyCompleteException, NoSuchJobExecutionException, + NoSuchJobException, JobRestartException; + + Long startNextInstance(String jobName) + throws NoSuchJobException, JobParametersNotFoundException, JobRestartException, + JobExecutionAlreadyRunningException, JobInstanceAlreadyCompleteException; + + boolean stop(long executionId) + throws NoSuchJobExecutionException, JobExecutionNotRunningException; + + String getSummary(long executionId) throws NoSuchJobExecutionException; + + Map getStepExecutionSummaries(long executionId) + throws NoSuchJobExecutionException; + + Set getJobNames(); + +} +``` + +The above operations represent methods from many different interfaces, such as`JobLauncher`, `JobRepository`, `JobExplorer`, and `JobRegistry`. For this reason, the +provided implementation of `JobOperator`, `SimpleJobOperator`, has many dependencies. + +The following example shows a typical bean definition for `SimpleJobOperator` in XML: + +``` + + + + + + + + + + +``` + +The following example shows a typical bean definition for `SimpleJobOperator` in Java: + +``` + /** + * All injected dependencies for this bean are provided by the @EnableBatchProcessing + * infrastructure out of the box. + */ + @Bean + public SimpleJobOperator jobOperator(JobExplorer jobExplorer, + JobRepository jobRepository, + JobRegistry jobRegistry) { + + SimpleJobOperator jobOperator = new SimpleJobOperator(); + + jobOperator.setJobExplorer(jobExplorer); + jobOperator.setJobRepository(jobRepository); + jobOperator.setJobRegistry(jobRegistry); + jobOperator.setJobLauncher(jobLauncher); + + return jobOperator; + } +``` + +| |If you set the table prefix on the job repository, don’t forget to set it on the job explorer as well.| +|---|------------------------------------------------------------------------------------------------------| + +#### JobParametersIncrementer + +Most of the methods on `JobOperator` are +self-explanatory, and more detailed explanations can be found on the[javadoc of the interface](https://docs.spring.io/spring-batch/docs/current/api/org/springframework/batch/core/launch/JobOperator.html). However, the`startNextInstance` method is worth noting. This +method will always start a new instance of a Job. +This can be extremely useful if there are serious issues in a`JobExecution` and the Job +needs to be started over again from the beginning. Unlike`JobLauncher` though, which requires a new`JobParameters` object that will trigger a new`JobInstance` if the parameters are different from +any previous set of parameters, the`startNextInstance` method will use the`JobParametersIncrementer` tied to the`Job` to force the `Job` to a +new instance: + +``` +public interface JobParametersIncrementer { + + JobParameters getNext(JobParameters parameters); + +} +``` + +The contract of `JobParametersIncrementer` is +that, given a [JobParameters](#jobParameters)object, it will return the 'next' JobParameters +object by incrementing any necessary values it may contain. This +strategy is useful because the framework has no way of knowing what +changes to the `JobParameters` make it the 'next' +instance. For example, if the only value in`JobParameters` is a date, and the next instance +should be created, should that value be incremented by one day? Or one +week (if the job is weekly for instance)? The same can be said for any +numerical values that help to identify the Job, +as shown below: + +``` +public class SampleIncrementer implements JobParametersIncrementer { + + public JobParameters getNext(JobParameters parameters) { + if (parameters==null || parameters.isEmpty()) { + return new JobParametersBuilder().addLong("run.id", 1L).toJobParameters(); + } + long id = parameters.getLong("run.id",1L) + 1; + return new JobParametersBuilder().addLong("run.id", id).toJobParameters(); + } +} +``` + +In this example, the value with a key of 'run.id' is used to +discriminate between `JobInstances`. If the`JobParameters` passed in is null, it can be +assumed that the `Job` has never been run before +and thus its initial state can be returned. However, if not, the old +value is obtained, incremented by one, and returned. + +For jobs defined in XML, an incrementer can be associated with `Job` through the +'incrementer' attribute in the namespace, as follows: + +``` + + ... + +``` + +For jobs defined in Java, an incrementer can be associated with a 'Job' through the`incrementer` method provided in the builders, as follows: + +``` +@Bean +public Job footballJob() { + return this.jobBuilderFactory.get("footballJob") + .incrementer(sampleIncrementer()) + ... + .build(); +} +``` + +#### Stopping a Job + +One of the most common use cases of`JobOperator` is gracefully stopping a +Job: + +``` +Set executions = jobOperator.getRunningExecutions("sampleJob"); +jobOperator.stop(executions.iterator().next()); +``` + +The shutdown is not immediate, since there is no way to force +immediate shutdown, especially if the execution is currently in +developer code that the framework has no control over, such as a +business service. However, as soon as control is returned back to the +framework, it will set the status of the current`StepExecution` to`BatchStatus.STOPPED`, save it, then do the same +for the `JobExecution` before finishing. + +#### Aborting a Job + +A job execution which is `FAILED` can be +restarted (if the `Job` is restartable). A job execution whose status is`ABANDONED` will not be restarted by the framework. +The `ABANDONED` status is also used in step +executions to mark them as skippable in a restarted job execution: if a +job is executing and encounters a step that has been marked`ABANDONED` in the previous failed job execution, it +will move on to the next step (as determined by the job flow definition +and the step execution exit status). + +If the process died (`"kill -9"` or server +failure) the job is, of course, not running, but the `JobRepository` has +no way of knowing because no-one told it before the process died. You +have to tell it manually that you know that the execution either failed +or should be considered aborted (change its status to`FAILED` or `ABANDONED`) - it’s +a business decision and there is no way to automate it. Only change the +status to `FAILED` if it is not restartable, or if +you know the restart data is valid. There is a utility in Spring Batch +Admin `JobService` to abort a job execution. \ No newline at end of file diff --git a/docs/en/spring-batch/jsr-352.md b/docs/en/spring-batch/jsr-352.md new file mode 100644 index 0000000000000000000000000000000000000000..6ccea008257b7304321dab4dc28e5e5192dd9ecc --- /dev/null +++ b/docs/en/spring-batch/jsr-352.md @@ -0,0 +1,415 @@ +# JSR-352 Support + +## JSR-352 Support + +XMLJavaBoth + +As of Spring Batch 3.0 support for JSR-352 has been fully implemented. This section is not a replacement for +the spec itself and instead, intends to explain how the JSR-352 specific concepts apply to Spring Batch. +Additional information on JSR-352 can be found via the +JCP here: + +### General Notes about Spring Batch and JSR-352 + +Spring Batch and JSR-352 are structurally the same. They both have jobs that are made up of steps. They +both have readers, processors, writers, and listeners. However, their interactions are subtly different. +For example, the `org.springframework.batch.core.SkipListener#onSkipInWrite(S item, Throwable t)`within Spring Batch receives two parameters: the item that was skipped and the Exception that caused the +skip. The JSR-352 version of the same method +(`javax.batch.api.chunk.listener.SkipWriteListener#onSkipWriteItem(List items, Exception ex)`) +also receives two parameters. However the first one is a `List` of all the items +within the current chunk with the second being the `Exception` that caused the skip. +Because of these differences, it is important to note that there are two paths to execute a job within +Spring Batch: either a traditional Spring Batch job or a JSR-352 based job. While the use of Spring Batch +artifacts (readers, writers, etc) will work within a job configured with JSR-352’s JSL and executed with the`JsrJobOperator`, they will behave according to the rules of JSR-352. It is also +important to note that batch artifacts that have been developed against the JSR-352 interfaces will not work +within a traditional Spring Batch job. + +### Setup + +#### Application Contexts + +All JSR-352 based jobs within Spring Batch consist of two application contexts. A parent context, that +contains beans related to the infrastructure of Spring Batch such as the `JobRepository`,`PlatformTransactionManager`, etc and a child context that consists of the configuration +of the job to be run. The parent context is defined via the `jsrBaseContext.xml` provided +by the framework. This context may be overridden by setting the `JSR-352-BASE-CONTEXT` system +property. + +| |The base context is not processed by the JSR-352 processors for things like property injection so
no components requiring that additional processing should be configured there.| +|---|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +#### Launching a JSR-352 based job + +JSR-352 requires a very simple path to executing a batch job. The following code is all that is needed to +execute your first batch job: + +``` +JobOperator operator = BatchRuntime.getJobOperator(); +jobOperator.start("myJob", new Properties()); +``` + +While that is convenient for developers, the devil is in the details. Spring Batch bootstraps a bit of +infrastructure behind the scenes that a developer may want to override. The following is bootstrapped the +first time `BatchRuntime.getJobOperator()` is called: + +| *Bean Name* | *Default Configuration* | *Notes* | +|------------------------|-----------------------------------------------------------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| dataSource | Apache DBCP BasicDataSource with configured values. | By default, HSQLDB is bootstrapped. | +| `transactionManager` | `org.springframework.jdbc.datasource.DataSourceTransactionManager` | References the dataSource bean defined above. | +|A Datasource initializer| | This is configured to execute the scripts configured via the`batch.drop.script` and `batch.schema.script` properties. By
default, the schema scripts for HSQLDB are executed. This behavior can be disabled by setting the`batch.data.source.init` property. | +| jobRepository | A JDBC based `SimpleJobRepository`. | This `JobRepository` uses the previously mentioned data source and transaction
manager. The schema’s table prefix is configurable (defaults to BATCH\_) via the`batch.table.prefix` property. | +| jobLauncher | `org.springframework.batch.core.launch.support.SimpleJobLauncher` | Used to launch jobs. | +| batchJobOperator | `org.springframework.batch.core.launch.support.SimpleJobOperator` | The `JsrJobOperator` wraps this to provide most of it’s functionality. | +| jobExplorer |`org.springframework.batch.core.explore.support.JobExplorerFactoryBean`| Used to address lookup functionality provided by the `JsrJobOperator`. | +| jobParametersConverter | `org.springframework.batch.core.jsr.JsrJobParametersConverter` | JSR-352 specific implementation of the `JobParametersConverter`. | +| jobRegistry | `org.springframework.batch.core.configuration.support.MapJobRegistry` | Used by the `SimpleJobOperator`. | +| placeholderProperties |`org.springframework.beans.factory.config.PropertyPlaceholderConfigure`|Loads the properties file `batch-${ENVIRONMENT:hsql}.properties` to configure
the properties mentioned above. ENVIRONMENT is a System property (defaults to `hsql`)
that can be used to specify any of the supported databases Spring Batch currently
supports.| + +| |None of the above beans are optional for executing JSR-352 based jobs. All may be overridden to
provide customized functionality as needed.| +|---|-----------------------------------------------------------------------------------------------------------------------------------------------| + +### Dependency Injection + +JSR-352 is based heavily on the Spring Batch programming model. As such, while not explicitly requiring a +formal dependency injection implementation, DI of some kind implied. Spring Batch supports all three +methods for loading batch artifacts defined by JSR-352: + +* Implementation Specific Loader: Spring Batch is built upon Spring and so supports + Spring dependency injection within JSR-352 batch jobs. + +* Archive Loader: JSR-352 defines the existing of a `batch.xml` file that provides mappings + between a logical name and a class name. This file must be found within the `/META-INF/`directory if it is used. + +* Thread Context Class Loader: JSR-352 allows configurations to specify batch artifact + implementations in their JSL by providing the fully qualified class name inline. Spring + Batch supports this as well in JSR-352 configured jobs. + +To use Spring dependency injection within a JSR-352 based batch job consists of +configuring batch artifacts using a Spring application context as beans. Once the beans +have been defined, a job can refer to them as it would any bean defined within the`batch.xml` file. + +The following example shows how to use Spring dependency injection within a JSR-352 based +batch job in XML: + +XML Configuration + +``` + + + + + + + + + + + + + + + +``` + +The following example shows how to use Spring dependency injection within a JSR-352 based +batch job in Java: + +Java Configuration + +``` +@Configuration +public class BatchConfiguration { + + @Bean + public Batchlet fooBatchlet() { + FooBatchlet batchlet = new FooBatchlet(); + batchlet.setProp("bar"); + return batchlet; + } +} + + + + + + + +``` + +The assembly of Spring contexts (imports, etc) works with JSR-352 jobs just as it would with any other +Spring based application. The only difference with a JSR-352 based job is that the entry point for the +context definition will be the job definition found in /META-INF/batch-jobs/. + +To use the thread context class loader approach, all you need to do is provide the fully qualified class +name as the ref. It is important to note that when using this approach or the `batch.xml` approach, the class +referenced requires a no argument constructor which will be used to create the bean. + +``` + + + + + + +``` + +### Batch Properties + +#### Property Support + +JSR-352 allows for properties to be defined at the Job, Step and batch artifact level by way of +configuration in the JSL. Batch properties are configured at each level in the following way: + +``` + + + + +``` + +`Properties` may be configured on any batch artifact. + +#### @BatchProperty annotation + +`Properties` are referenced in batch artifacts by annotating class fields with the`@BatchProperty` and `@Inject` annotations (both annotations +are required by the spec). As defined by JSR-352, fields for properties must be String typed. Any type +conversion is up to the implementing developer to perform. + +An `javax.batch.api.chunk.ItemReader` artifact could be configured with a +properties block such as the one described above and accessed as such: + +``` +public class MyItemReader extends AbstractItemReader { + @Inject + @BatchProperty + private String propertyName1; + + ... +} +``` + +The value of the field "propertyName1" will be "propertyValue1" + +#### Property Substitution + +Property substitution is provided by way of operators and simple conditional expressions. The general +usage is `#{operator['key']}`. + +Supported operators: + +* `jobParameters`: access job parameter values that the job was started/restarted with. + +* `jobProperties`: access properties configured at the job level of the JSL. + +* `systemProperties`: access named system properties. + +* `partitionPlan`: access named property from the partition plan of a partitioned step. + +``` +#{jobParameters['unresolving.prop']}?:#{systemProperties['file.separator']} +``` + +The left hand side of the assignment is the expected value, the right hand side is the +default value. In the preceding +example, the result will resolve to a value of the system property file.separator as +#{jobParameters['unresolving.prop']} is assumed to not be resolvable. If neither +expressions can be resolved, an empty String will be returned. Multiple conditions can be +used, which are separated by a ';'. + +### Processing Models + +JSR-352 provides the same two basic processing models that Spring Batch does: + +* Item based processing - Using an `javax.batch.api.chunk.ItemReader`, an optional`javax.batch.api.chunk.ItemProcessor`, and an `javax.batch.api.chunk.ItemWriter`. + +* Task based processing - Using a `javax.batch.api.Batchlet`implementation. This processing model is the same as the`org.springframework.batch.core.step.tasklet.Tasklet` based processing + currently available. + +#### Item based processing + +Item based processing in this context is a chunk size being set by the number of items read by an`ItemReader`. To configure a step this way, specify the`item-count` (which defaults to 10) and optionally configure the`checkpoint-policy` as item (this is the default). + +``` +... + + + + + + + +... +``` + +If item-based checkpointing is chosen, an additional attribute `time-limit` is supported. +This sets a time limit for how long the number of items specified has to be processed. If +the timeout is reached, the chunk will complete with however many items have been read by +then regardless of what the `item-count` is configured to be. + +#### Custom checkpointing + +JSR-352 calls the process around the commit interval within a step "checkpointing". +Item-based checkpointing is one approach as mentioned above. However, this is not robust +enough in many cases. Because of this, the spec allows for the implementation of a custom +checkpointing algorithm by implementing the `javax.batch.api.chunk.CheckpointAlgorithm`interface. This functionality is functionally the same as Spring Batch’s custom completion +policy. To use an implementation of `CheckpointAlgorithm`, configure your step with the +custom `checkpoint-policy` as shown below where `fooCheckpointer` refers to an +implementation of `CheckpointAlgorithm`. + +``` +... + + + + + + + + +... +``` + +### Running a job + +The entrance to executing a JSR-352 based job is through the`javax.batch.operations.JobOperator`. Spring Batch provides its own implementation of +this interface (`org.springframework.batch.core.jsr.launch.JsrJobOperator`). This +implementation is loaded via the `javax.batch.runtime.BatchRuntime`. Launching a +JSR-352 based batch job is implemented as follows: + +``` +JobOperator jobOperator = BatchRuntime.getJobOperator(); +long jobExecutionId = jobOperator.start("fooJob", new Properties()); +``` + +The above code does the following: + +* Bootstraps a base `ApplicationContext`: In order to provide batch functionality, the + framework needs some infrastructure bootstrapped. This occurs once per JVM. The + components that are bootstrapped are similar to those provided by`@EnableBatchProcessing`. Specific details can be found in the javadoc for the`JsrJobOperator`. + +* Loads an `ApplicationContext` for the job requested: In the example + above, the framework looks in /META-INF/batch-jobs for a file named fooJob.xml and load a + context that is a child of the shared context mentioned previously. + +* Launch the job: The job defined within the context will be executed asynchronously. + The `JobExecution’s` ID will be returned. + +| |All JSR-352 based batch jobs are executed asynchronously.| +|---|---------------------------------------------------------| + +When `JobOperator#start` is called using `SimpleJobOperator`, Spring Batch determines if +the call is an initial run or a retry of a previously executed run. Using the JSR-352 +based `JobOperator#start(String jobXMLName, Properties jobParameters)`, the framework +will always create a new JobInstance (JSR-352 job parameters are non-identifying). In order to +restart a job, a call to`JobOperator#restart(long executionId, Properties restartParameters)` is required. + +### Contexts + +JSR-352 defines two context objects that are used to interact with the meta-data of a job or step from +within a batch artifact: `javax.batch.runtime.context.JobContext` and`javax.batch.runtime.context.StepContext`. Both of these are available in any step +level artifact (`Batchlet`, `ItemReader`, etc) with the`JobContext` being available to job level artifacts as well +(`JobListener` for example). + +To obtain a reference to the `JobContext` or `StepContext`within the current scope, simply use the `@Inject` annotation: + +``` +@Inject +JobContext jobContext; +``` + +| |@Autowire for JSR-352 contexts

Using Spring’s @Autowire is not supported for the injection of these contexts.| +|---|----------------------------------------------------------------------------------------------------------------------| + +In Spring Batch, the `JobContext` and `StepContext` wrap their +corresponding execution objects (`JobExecution` and`StepExecution` respectively). Data stored through`StepContext#setPersistentUserData(Serializable data)` is stored in the +Spring Batch `StepExecution#executionContext`. + +### Step Flow + +Within a JSR-352 based job, the flow of steps works similarly as it does within Spring Batch. +However, there are a few subtle differences: + +* Decision’s are steps - In a regular Spring Batch job, a decision is a state that does not + have an independent `StepExecution` or any of the rights and + responsibilities that go along with being a full step.. However, with JSR-352, a decision + is a step just like any other and will behave just as any other steps (transactionality, + it gets a `StepExecution`, etc). This means that they are treated the + same as any other step on restarts as well. + +* `next` attribute and step transitions - In a regular job, these are + allowed to appear together in the same step. JSR-352 allows them to both be used in the + same step with the next attribute taking precedence in evaluation. + +* Transition element ordering - In a standard Spring Batch job, transition elements are + sorted from most specific to least specific and evaluated in that order. JSR-352 jobs + evaluate transition elements in the order they are specified in the XML. + +### Scaling a JSR-352 batch job + +Traditional Spring Batch jobs have four ways of scaling (the last two capable of being executed across +multiple JVMs): + +* Split - Running multiple steps in parallel. + +* Multiple threads - Executing a single step via multiple threads. + +* Partitioning - Dividing the data up for parallel processing (manager/worker). + +* Remote Chunking - Executing the processor piece of logic remotely. + +JSR-352 provides two options for scaling batch jobs. Both options support only a single JVM: + +* Split - Same as Spring Batch + +* Partitioning - Conceptually the same as Spring Batch however implemented slightly different. + +#### Partitioning + +Conceptually, partitioning in JSR-352 is the same as it is in Spring Batch. Meta-data is provided +to each worker to identify the input to be processed, with the workers reporting back to the manager the +results upon completion. However, there are some important differences: + +* Partitioned `Batchlet` - This will run multiple instances of the + configured `Batchlet` on multiple threads. Each instance will have + it’s own set of properties as provided by the JSL or the`PartitionPlan` + +* `PartitionPlan` - With Spring Batch’s partitioning, an`ExecutionContext` is provided for each partition. With JSR-352, a + single `javax.batch.api.partition.PartitionPlan` is provided with an + array of `Properties` providing the meta-data for each partition. + +* `PartitionMapper` - JSR-352 provides two ways to generate partition + meta-data. One is via the JSL (partition properties). The second is via an implementation + of the `javax.batch.api.partition.PartitionMapper` interface. + Functionally, this interface is similar to the`org.springframework.batch.core.partition.support.Partitioner`interface provided by Spring Batch in that it provides a way to programmatically generate + meta-data for partitioning. + +* `StepExecutions` - In Spring Batch, partitioned steps are run as + manager/worker. Within JSR-352, the same configuration occurs. However, the worker steps do + not get official `StepExecutions`. Because of that, calls to`JsrJobOperator#getStepExecutions(long jobExecutionId)` will only + return the `StepExecution` for the manager. + +| |The child `StepExecutions` still exist in the job repository and are available
through the `JobExplorer`.| +|---|-------------------------------------------------------------------------------------------------------------| + +* Compensating logic - Since Spring Batch implements the manager/worker logic of + partitioning using steps, `StepExecutionListeners` can be used to + handle compensating logic if something goes wrong. However, since the workers JSR-352 + provides a collection of other components for the ability to provide compensating logic when + errors occur and to dynamically set the exit status. These components include the following: + +| *Artifact Interface* | *Description* | +|----------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------------| +|`javax.batch.api.partition.PartitionCollector`| Provides a way for worker steps to send information back to the
manager. There is one instance per worker thread. | +|`javax.batch.api.partition.PartitionAnalyzer` |End point that receives the information collected by the`PartitionCollector` as well as the resulting
statuses from a completed partition.| +| `javax.batch.api.partition.PartitionReducer` | Provides the ability to provide compensating logic for a partitioned
step. | + +### Testing + +Since all JSR-352 based jobs are executed asynchronously, it can be difficult to determine when a job has +completed. To help with testing, Spring Batch provides the`org.springframework.batch.test.JsrTestUtils`. This utility class provides the +ability to start a job and restart a job and wait for it to complete. Once the job completes, the +associated `JobExecution` is returned. \ No newline at end of file diff --git a/docs/en/spring-batch/monitoring-and-metrics.md b/docs/en/spring-batch/monitoring-and-metrics.md new file mode 100644 index 0000000000000000000000000000000000000000..86c682c6565b23981d300c690415b2dbb04a03a4 --- /dev/null +++ b/docs/en/spring-batch/monitoring-and-metrics.md @@ -0,0 +1,75 @@ +# Monitoring and metrics + +## Monitoring and metrics + +Since version 4.2, Spring Batch provides support for batch monitoring and metrics +based on [Micrometer](https://micrometer.io/). This section describes +which metrics are provided out-of-the-box and how to contribute custom metrics. + +### Built-in metrics + +Metrics collection does not require any specific configuration. All metrics provided +by the framework are registered in[Micrometer’s global registry](https://micrometer.io/docs/concepts#_global_registry)under the `spring.batch` prefix. The following table explains all the metrics in details: + +| *Metric Name* | *Type* | *Description* | *Tags* | +|---------------------------|-----------------|---------------------------|---------------------------------| +| `spring.batch.job` | `TIMER` | Duration of job execution | `name`, `status` | +| `spring.batch.job.active` |`LONG_TASK_TIMER`| Currently active jobs | `name` | +| `spring.batch.step` | `TIMER` |Duration of step execution | `name`, `job.name`, `status` | +| `spring.batch.item.read` | `TIMER` | Duration of item reading |`job.name`, `step.name`, `status`| +|`spring.batch.item.process`| `TIMER` |Duration of item processing|`job.name`, `step.name`, `status`| +|`spring.batch.chunk.write` | `TIMER` | Duration of chunk writing |`job.name`, `step.name`, `status`| + +| |The `status` tag can be either `SUCCESS` or `FAILURE`.| +|---|------------------------------------------------------| + +### Custom metrics + +If you want to use your own metrics in your custom components, we recommend using +Micrometer APIs directly. The following is an example of how to time a `Tasklet`: + +``` +import io.micrometer.core.instrument.Metrics; +import io.micrometer.core.instrument.Timer; + +import org.springframework.batch.core.StepContribution; +import org.springframework.batch.core.scope.context.ChunkContext; +import org.springframework.batch.core.step.tasklet.Tasklet; +import org.springframework.batch.repeat.RepeatStatus; + +public class MyTimedTasklet implements Tasklet { + + @Override + public RepeatStatus execute(StepContribution contribution, ChunkContext chunkContext) { + Timer.Sample sample = Timer.start(Metrics.globalRegistry); + String status = "success"; + try { + // do some work + } catch (Exception e) { + // handle exception + status = "failure"; + } finally { + sample.stop(Timer.builder("my.tasklet.timer") + .description("Duration of MyTimedTasklet") + .tag("status", status) + .register(Metrics.globalRegistry)); + } + return RepeatStatus.FINISHED; + } +} +``` + +### Disabling metrics + +Metrics collection is a concern similar to logging. Disabling logs is typically +done by configuring the logging library and this is no different for metrics. +There is no feature in Spring Batch to disable micrometer’s metrics, this should +be done on micrometer’s side. Since Spring Batch stores metrics in the global +registry of micrometer with the `spring.batch` prefix, it is possible to configure +micrometer to ignore/deny batch metrics with the following snippet: + +``` +Metrics.globalRegistry.config().meterFilter(MeterFilter.denyNameStartsWith("spring.batch")) +``` + +Please refer to micrometer’s [reference documentation](http://micrometer.io/docs/concepts#_meter_filters)for more details. \ No newline at end of file diff --git a/docs/en/spring-batch/processor.md b/docs/en/spring-batch/processor.md new file mode 100644 index 0000000000000000000000000000000000000000..a25e49301d882f1261e37ee8964b1b4a7f8168ac --- /dev/null +++ b/docs/en/spring-batch/processor.md @@ -0,0 +1,347 @@ +# Item processing + +## Item processing + +XMLJavaBoth + +The [ItemReader and ItemWriter interfaces](readersAndWriters.html#readersAndWriters) are both very useful for their specific +tasks, but what if you want to insert business logic before writing? One option for both +reading and writing is to use the composite pattern: Create an `ItemWriter` that contains +another `ItemWriter` or an `ItemReader` that contains another `ItemReader`. The following +code shows an example: + +``` +public class CompositeItemWriter implements ItemWriter { + + ItemWriter itemWriter; + + public CompositeItemWriter(ItemWriter itemWriter) { + this.itemWriter = itemWriter; + } + + public void write(List items) throws Exception { + //Add business logic here + itemWriter.write(items); + } + + public void setDelegate(ItemWriter itemWriter){ + this.itemWriter = itemWriter; + } +} +``` + +The preceding class contains another `ItemWriter` to which it delegates after having +provided some business logic. This pattern could easily be used for an `ItemReader` as +well, perhaps to obtain more reference data based upon the input that was provided by the +main `ItemReader`. It is also useful if you need to control the call to `write` yourself. +However, if you only want to 'transform' the item passed in for writing before it is +actually written, you need not `write` yourself. You can just modify the item. For this +scenario, Spring Batch provides the `ItemProcessor` interface, as shown in the following +interface definition: + +``` +public interface ItemProcessor { + + O process(I item) throws Exception; +} +``` + +An `ItemProcessor` is simple. Given one object, transform it and return another. The +provided object may or may not be of the same type. The point is that business logic may +be applied within the process, and it is completely up to the developer to create that +logic. An `ItemProcessor` can be wired directly into a step. For example, assume an`ItemReader` provides a class of type `Foo` and that it needs to be converted to type `Bar`before being written out. The following example shows an `ItemProcessor` that performs +the conversion: + +``` +public class Foo {} + +public class Bar { + public Bar(Foo foo) {} +} + +public class FooProcessor implements ItemProcessor { + public Bar process(Foo foo) throws Exception { + //Perform simple transformation, convert a Foo to a Bar + return new Bar(foo); + } +} + +public class BarWriter implements ItemWriter { + public void write(List bars) throws Exception { + //write bars + } +} +``` + +In the preceding example, there is a class `Foo`, a class `Bar`, and a class`FooProcessor` that adheres to the `ItemProcessor` interface. The transformation is +simple, but any type of transformation could be done here. The `BarWriter` writes `Bar`objects, throwing an exception if any other type is provided. Similarly, the`FooProcessor` throws an exception if anything but a `Foo` is provided. The`FooProcessor` can then be injected into a `Step`, as shown in the following example: + +XML Configuration + +``` + + + + + + + +``` + +Java Configuration + +``` +@Bean +public Job ioSampleJob() { + return this.jobBuilderFactory.get("ioSampleJob") + .start(step1()) + .build(); +} + +@Bean +public Step step1() { + return this.stepBuilderFactory.get("step1") + .chunk(2) + .reader(fooReader()) + .processor(fooProcessor()) + .writer(barWriter()) + .build(); +} +``` + +A difference between `ItemProcessor` and `ItemReader` or `ItemWriter` is that an `ItemProcessor`is optional for a `Step`. + +### Chaining ItemProcessors + +Performing a single transformation is useful in many scenarios, but what if you want to +'chain' together multiple `ItemProcessor` implementations? This can be accomplished using +the composite pattern mentioned previously. To update the previous, single +transformation, example, `Foo` is transformed to `Bar`, which is transformed to `Foobar`and written out, as shown in the following example: + +``` +public class Foo {} + +public class Bar { + public Bar(Foo foo) {} +} + +public class Foobar { + public Foobar(Bar bar) {} +} + +public class FooProcessor implements ItemProcessor { + public Bar process(Foo foo) throws Exception { + //Perform simple transformation, convert a Foo to a Bar + return new Bar(foo); + } +} + +public class BarProcessor implements ItemProcessor { + public Foobar process(Bar bar) throws Exception { + return new Foobar(bar); + } +} + +public class FoobarWriter implements ItemWriter{ + public void write(List items) throws Exception { + //write items + } +} +``` + +A `FooProcessor` and a `BarProcessor` can be 'chained' together to give the resultant`Foobar`, as shown in the following example: + +``` +CompositeItemProcessor compositeProcessor = + new CompositeItemProcessor(); +List itemProcessors = new ArrayList(); +itemProcessors.add(new FooProcessor()); +itemProcessors.add(new BarProcessor()); +compositeProcessor.setDelegates(itemProcessors); +``` + +Just as with the previous example, the composite processor can be configured into the`Step`: + +XML Configuration + +``` + + + + + + + + + + + + + + + + +``` + +Java Configuration + +``` +@Bean +public Job ioSampleJob() { + return this.jobBuilderFactory.get("ioSampleJob") + .start(step1()) + .build(); +} + +@Bean +public Step step1() { + return this.stepBuilderFactory.get("step1") + .chunk(2) + .reader(fooReader()) + .processor(compositeProcessor()) + .writer(foobarWriter()) + .build(); +} + +@Bean +public CompositeItemProcessor compositeProcessor() { + List delegates = new ArrayList<>(2); + delegates.add(new FooProcessor()); + delegates.add(new BarProcessor()); + + CompositeItemProcessor processor = new CompositeItemProcessor(); + + processor.setDelegates(delegates); + + return processor; +} +``` + +### Filtering Records + +One typical use for an item processor is to filter out records before they are passed to +the `ItemWriter`. Filtering is an action distinct from skipping. Skipping indicates that +a record is invalid, while filtering simply indicates that a record should not be +written. + +For example, consider a batch job that reads a file containing three different types of +records: records to insert, records to update, and records to delete. If record deletion +is not supported by the system, then we would not want to send any "delete" records to +the `ItemWriter`. But, since these records are not actually bad records, we would want to +filter them out rather than skip them. As a result, the `ItemWriter` would receive only +"insert" and "update" records. + +To filter a record, you can return `null` from the `ItemProcessor`. The framework detects +that the result is `null` and avoids adding that item to the list of records delivered to +the `ItemWriter`. As usual, an exception thrown from the `ItemProcessor` results in a +skip. + +### Validating Input + +In the [ItemReaders and ItemWriters](readersAndWriters.html#readersAndWriters) chapter, multiple approaches to parsing input have been +discussed. Each major implementation throws an exception if it is not 'well-formed'. The`FixedLengthTokenizer` throws an exception if a range of data is missing. Similarly, +attempting to access an index in a `RowMapper` or `FieldSetMapper` that does not exist or +is in a different format than the one expected causes an exception to be thrown. All of +these types of exceptions are thrown before `read` returns. However, they do not address +the issue of whether or not the returned item is valid. For example, if one of the fields +is an age, it obviously cannot be negative. It may parse correctly, because it exists and +is a number, but it does not cause an exception. Since there are already a plethora of +validation frameworks, Spring Batch does not attempt to provide yet another. Rather, it +provides a simple interface, called `Validator`, that can be implemented by any number of +frameworks, as shown in the following interface definition: + +``` +public interface Validator { + + void validate(T value) throws ValidationException; + +} +``` + +The contract is that the `validate` method throws an exception if the object is invalid +and returns normally if it is valid. Spring Batch provides an out of the box`ValidatingItemProcessor`, as shown in the following bean definition: + +XML Configuration + +``` + + + + + + + + + +``` + +Java Configuration + +``` +@Bean +public ValidatingItemProcessor itemProcessor() { + ValidatingItemProcessor processor = new ValidatingItemProcessor(); + + processor.setValidator(validator()); + + return processor; +} + +@Bean +public SpringValidator validator() { + SpringValidator validator = new SpringValidator(); + + validator.setValidator(new TradeValidator()); + + return validator; +} +``` + +You can also use the `BeanValidatingItemProcessor` to validate items annotated with +the Bean Validation API (JSR-303) annotations. For example, given the following type `Person`: + +``` +class Person { + + @NotEmpty + private String name; + + public Person(String name) { + this.name = name; + } + + public String getName() { + return name; + } + + public void setName(String name) { + this.name = name; + } + +} +``` + +you can validate items by declaring a `BeanValidatingItemProcessor` bean in your +application context and register it as a processor in your chunk-oriented step: + +``` +@Bean +public BeanValidatingItemProcessor beanValidatingItemProcessor() throws Exception { + BeanValidatingItemProcessor beanValidatingItemProcessor = new BeanValidatingItemProcessor<>(); + beanValidatingItemProcessor.setFilter(true); + + return beanValidatingItemProcessor; +} +``` + +### Fault Tolerance + +When a chunk is rolled back, items that have been cached during reading may be +reprocessed. If a step is configured to be fault tolerant (typically by using skip or +retry processing), any `ItemProcessor` used should be implemented in a way that is +idempotent. Typically that would consist of performing no changes on the input item for +the `ItemProcessor` and only updating the +instance that is the result. \ No newline at end of file diff --git a/docs/en/spring-batch/readersAndWriters.md b/docs/en/spring-batch/readersAndWriters.md new file mode 100644 index 0000000000000000000000000000000000000000..9d7cea907527e6ba3b545ee92acd63df1e2088aa --- /dev/null +++ b/docs/en/spring-batch/readersAndWriters.md @@ -0,0 +1,2760 @@ +# ItemReaders and ItemWriters + +## ItemReaders and ItemWriters + +XMLJavaBoth + +All batch processing can be described in its most simple form as reading in large amounts +of data, performing some type of calculation or transformation, and writing the result +out. Spring Batch provides three key interfaces to help perform bulk reading and writing:`ItemReader`, `ItemProcessor`, and `ItemWriter`. + +### `ItemReader` + +Although a simple concept, an `ItemReader` is the means for providing data from many +different types of input. The most general examples include: + +* Flat File: Flat-file item readers read lines of data from a flat file that typically + describes records with fields of data defined by fixed positions in the file or delimited + by some special character (such as a comma). + +* XML: XML `ItemReaders` process XML independently of technologies used for parsing, + mapping and validating objects. Input data allows for the validation of an XML file + against an XSD schema. + +* Database: A database resource is accessed to return resultsets which can be mapped to + objects for processing. The default SQL `ItemReader` implementations invoke a `RowMapper`to return objects, keep track of the current row if restart is required, store basic + statistics, and provide some transaction enhancements that are explained later. + +There are many more possibilities, but we focus on the basic ones for this chapter. A +complete list of all available `ItemReader` implementations can be found in[Appendix A](appendix.html#listOfReadersAndWriters). + +`ItemReader` is a basic interface for generic +input operations, as shown in the following interface definition: + +``` +public interface ItemReader { + + T read() throws Exception, UnexpectedInputException, ParseException, NonTransientResourceException; + +} +``` + +The `read` method defines the most essential contract of the `ItemReader`. Calling it +returns one item or `null` if no more items are left. An item might represent a line in a +file, a row in a database, or an element in an XML file. It is generally expected that +these are mapped to a usable domain object (such as `Trade`, `Foo`, or others), but there +is no requirement in the contract to do so. + +It is expected that implementations of the `ItemReader` interface are forward only. +However, if the underlying resource is transactional (such as a JMS queue) then calling`read` may return the same logical item on subsequent calls in a rollback scenario. It is +also worth noting that a lack of items to process by an `ItemReader` does not cause an +exception to be thrown. For example, a database `ItemReader` that is configured with a +query that returns 0 results returns `null` on the first invocation of `read`. + +### `ItemWriter` + +`ItemWriter` is similar in functionality to an `ItemReader` but with inverse operations. +Resources still need to be located, opened, and closed but they differ in that an`ItemWriter` writes out, rather than reading in. In the case of databases or queues, +these operations may be inserts, updates, or sends. The format of the serialization of +the output is specific to each batch job. + +As with `ItemReader`,`ItemWriter` is a fairly generic interface, as shown in the following interface definition: + +``` +public interface ItemWriter { + + void write(List items) throws Exception; + +} +``` + +As with `read` on `ItemReader`, `write` provides the basic contract of `ItemWriter`. It +attempts to write out the list of items passed in as long as it is open. Because it is +generally expected that items are 'batched' together into a chunk and then output, the +interface accepts a list of items, rather than an item by itself. After writing out the +list, any flushing that may be necessary can be performed before returning from the write +method. For example, if writing to a Hibernate DAO, multiple calls to write can be made, +one for each item. The writer can then call `flush` on the hibernate session before +returning. + +### `ItemStream` + +Both `ItemReaders` and `ItemWriters` serve their individual purposes well, but there is a +common concern among both of them that necessitates another interface. In general, as +part of the scope of a batch job, readers and writers need to be opened, closed, and +require a mechanism for persisting state. The `ItemStream` interface serves that purpose, +as shown in the following example: + +``` +public interface ItemStream { + + void open(ExecutionContext executionContext) throws ItemStreamException; + + void update(ExecutionContext executionContext) throws ItemStreamException; + + void close() throws ItemStreamException; +} +``` + +Before describing each method, we should mention the `ExecutionContext`. Clients of an`ItemReader` that also implement `ItemStream` should call `open` before any calls to`read`, in order to open any resources such as files or to obtain connections. A similar +restriction applies to an `ItemWriter` that implements `ItemStream`. As mentioned in +Chapter 2, if expected data is found in the `ExecutionContext`, it may be used to start +the `ItemReader` or `ItemWriter` at a location other than its initial state. Conversely,`close` is called to ensure that any resources allocated during open are released safely.`update` is called primarily to ensure that any state currently being held is loaded into +the provided `ExecutionContext`. This method is called before committing, to ensure that +the current state is persisted in the database before commit. + +In the special case where the client of an `ItemStream` is a `Step` (from the Spring +Batch Core), an `ExecutionContext` is created for each StepExecution to allow users to +store the state of a particular execution, with the expectation that it is returned if +the same `JobInstance` is started again. For those familiar with Quartz, the semantics +are very similar to a Quartz `JobDataMap`. + +### The Delegate Pattern and Registering with the Step + +Note that the `CompositeItemWriter` is an example of the delegation pattern, which is +common in Spring Batch. The delegates themselves might implement callback interfaces, +such as `StepListener`. If they do and if they are being used in conjunction with Spring +Batch Core as part of a `Step` in a `Job`, then they almost certainly need to be +registered manually with the `Step`. A reader, writer, or processor that is directly +wired into the `Step` gets registered automatically if it implements `ItemStream` or a`StepListener` interface. However, because the delegates are not known to the `Step`, +they need to be injected as listeners or streams (or both if appropriate). + +The following example shows how to inject a delegate as a stream in XML: + +XML Configuration + +``` + + + + + + + + + + + + + + + + + +``` + +The following example shows how to inject a delegate as a stream in XML: + +Java Configuration + +``` +@Bean +public Job ioSampleJob() { + return this.jobBuilderFactory.get("ioSampleJob") + .start(step1()) + .build(); +} + +@Bean +public Step step1() { + return this.stepBuilderFactory.get("step1") + .chunk(2) + .reader(fooReader()) + .processor(fooProcessor()) + .writer(compositeItemWriter()) + .stream(barWriter()) + .build(); +} + +@Bean +public CustomCompositeItemWriter compositeItemWriter() { + + CustomCompositeItemWriter writer = new CustomCompositeItemWriter(); + + writer.setDelegate(barWriter()); + + return writer; +} + +@Bean +public BarWriter barWriter() { + return new BarWriter(); +} +``` + +### Flat Files + +One of the most common mechanisms for interchanging bulk data has always been the flat +file. Unlike XML, which has an agreed upon standard for defining how it is structured +(XSD), anyone reading a flat file must understand ahead of time exactly how the file is +structured. In general, all flat files fall into two types: delimited and fixed length. +Delimited files are those in which fields are separated by a delimiter, such as a comma. +Fixed Length files have fields that are a set length. + +#### The `FieldSet` + +When working with flat files in Spring Batch, regardless of whether it is for input or +output, one of the most important classes is the `FieldSet`. Many architectures and +libraries contain abstractions for helping you read in from a file, but they usually +return a `String` or an array of `String` objects. This really only gets you halfway +there. A `FieldSet` is Spring Batch’s abstraction for enabling the binding of fields from +a file resource. It allows developers to work with file input in much the same way as +they would work with database input. A `FieldSet` is conceptually similar to a JDBC`ResultSet`. A `FieldSet` requires only one argument: a `String` array of tokens. +Optionally, you can also configure the names of the fields so that the fields may be +accessed either by index or name as patterned after `ResultSet`, as shown in the following +example: + +``` +String[] tokens = new String[]{"foo", "1", "true"}; +FieldSet fs = new DefaultFieldSet(tokens); +String name = fs.readString(0); +int value = fs.readInt(1); +boolean booleanValue = fs.readBoolean(2); +``` + +There are many more options on the `FieldSet` interface, such as `Date`, long,`BigDecimal`, and so on. The biggest advantage of the `FieldSet` is that it provides +consistent parsing of flat file input. Rather than each batch job parsing differently in +potentially unexpected ways, it can be consistent, both when handling errors caused by a +format exception, or when doing simple data conversions. + +#### `FlatFileItemReader` + +A flat file is any type of file that contains at most two-dimensional (tabular) data. +Reading flat files in the Spring Batch framework is facilitated by the class called`FlatFileItemReader`, which provides basic functionality for reading and parsing flat +files. The two most important required dependencies of `FlatFileItemReader` are`Resource` and `LineMapper`. The `LineMapper` interface is explored more in the next +sections. The resource property represents a Spring Core `Resource`. Documentation +explaining how to create beans of this type can be found in[Spring +Framework, Chapter 5. Resources](https://docs.spring.io/spring/docs/current/spring-framework-reference/core.html#resources). Therefore, this guide does not go into the details of +creating `Resource` objects beyond showing the following simple example: + +``` +Resource resource = new FileSystemResource("resources/trades.csv"); +``` + +In complex batch environments, the directory structures are often managed by the Enterprise Application Integration (EAI) +infrastructure, where drop zones for external interfaces are established for moving files +from FTP locations to batch processing locations and vice versa. File moving utilities +are beyond the scope of the Spring Batch architecture, but it is not unusual for batch +job streams to include file moving utilities as steps in the job stream. The batch +architecture only needs to know how to locate the files to be processed. Spring Batch +begins the process of feeding the data into the pipe from this starting point. However,[Spring Integration](https://projects.spring.io/spring-integration/) provides many +of these types of services. + +The other properties in `FlatFileItemReader` let you further specify how your data is +interpreted, as described in the following table: + +| Property | Type | Description | +|---------------------|---------------------|-------------------------------------------------------------------------------------------------------------------------------------------------------------| +| comments | String[] | Specifies line prefixes that indicate comment rows. | +| encoding | String | Specifies what text encoding to use. The default is the value of `Charset.defaultCharset()`. | +| lineMapper | `LineMapper` | Converts a `String` to an `Object` representing the item. | +| linesToSkip | int | Number of lines to ignore at the top of the file. | +|recordSeparatorPolicy|RecordSeparatorPolicy| Used to determine where the line endings are
and do things like continue over a line ending if inside a quoted string. | +| resource | `Resource` | The resource from which to read. | +|skippedLinesCallback | LineCallbackHandler |Interface that passes the raw line content of
the lines in the file to be skipped. If `linesToSkip` is set to 2, then this interface is
called twice.| +| strict | boolean |In strict mode, the reader throws an exception on `ExecutionContext` if
the input resource does not exist. Otherwise, it logs the problem and continues. | + +##### `LineMapper` + +As with `RowMapper`, which takes a low-level construct such as `ResultSet` and returns +an `Object`, flat file processing requires the same construct to convert a `String` line +into an `Object`, as shown in the following interface definition: + +``` +public interface LineMapper { + + T mapLine(String line, int lineNumber) throws Exception; + +} +``` + +The basic contract is that, given the current line and the line number with which it is +associated, the mapper should return a resulting domain object. This is similar to`RowMapper`, in that each line is associated with its line number, just as each row in a`ResultSet` is tied to its row number. This allows the line number to be tied to the +resulting domain object for identity comparison or for more informative logging. However, +unlike `RowMapper`, the `LineMapper` is given a raw line which, as discussed above, only +gets you halfway there. The line must be tokenized into a `FieldSet`, which can then be +mapped to an object, as described later in this document. + +##### `LineTokenizer` + +An abstraction for turning a line of input into a `FieldSet` is necessary because there +can be many formats of flat file data that need to be converted to a `FieldSet`. In +Spring Batch, this interface is the `LineTokenizer`: + +``` +public interface LineTokenizer { + + FieldSet tokenize(String line); + +} +``` + +The contract of a `LineTokenizer` is such that, given a line of input (in theory the`String` could encompass more than one line), a `FieldSet` representing the line is +returned. This `FieldSet` can then be passed to a `FieldSetMapper`. Spring Batch contains +the following `LineTokenizer` implementations: + +* `DelimitedLineTokenizer`: Used for files where fields in a record are separated by a + delimiter. The most common delimiter is a comma, but pipes or semicolons are often used + as well. + +* `FixedLengthTokenizer`: Used for files where fields in a record are each a "fixed + width". The width of each field must be defined for each record type. + +* `PatternMatchingCompositeLineTokenizer`: Determines which `LineTokenizer` among a list of + tokenizers should be used on a particular line by checking against a pattern. + +##### `FieldSetMapper` + +The `FieldSetMapper` interface defines a single method, `mapFieldSet`, which takes a`FieldSet` object and maps its contents to an object. This object may be a custom DTO, a +domain object, or an array, depending on the needs of the job. The `FieldSetMapper` is +used in conjunction with the `LineTokenizer` to translate a line of data from a resource +into an object of the desired type, as shown in the following interface definition: + +``` +public interface FieldSetMapper { + + T mapFieldSet(FieldSet fieldSet) throws BindException; + +} +``` + +The pattern used is the same as the `RowMapper` used by `JdbcTemplate`. + +##### `DefaultLineMapper` + +Now that the basic interfaces for reading in flat files have been defined, it becomes +clear that three basic steps are required: + +1. Read one line from the file. + +2. Pass the `String` line into the `LineTokenizer#tokenize()` method to retrieve a`FieldSet`. + +3. Pass the `FieldSet` returned from tokenizing to a `FieldSetMapper`, returning the + result from the `ItemReader#read()` method. + +The two interfaces described above represent two separate tasks: converting a line into a`FieldSet` and mapping a `FieldSet` to a domain object. Because the input of a`LineTokenizer` matches the input of the `LineMapper` (a line), and the output of a`FieldSetMapper` matches the output of the `LineMapper`, a default implementation that +uses both a `LineTokenizer` and a `FieldSetMapper` is provided. The `DefaultLineMapper`, +shown in the following class definition, represents the behavior most users need: + +``` +public class DefaultLineMapper implements LineMapper<>, InitializingBean { + + private LineTokenizer tokenizer; + + private FieldSetMapper fieldSetMapper; + + public T mapLine(String line, int lineNumber) throws Exception { + return fieldSetMapper.mapFieldSet(tokenizer.tokenize(line)); + } + + public void setLineTokenizer(LineTokenizer tokenizer) { + this.tokenizer = tokenizer; + } + + public void setFieldSetMapper(FieldSetMapper fieldSetMapper) { + this.fieldSetMapper = fieldSetMapper; + } +} +``` + +The above functionality is provided in a default implementation, rather than being built +into the reader itself (as was done in previous versions of the framework) to allow users +greater flexibility in controlling the parsing process, especially if access to the raw +line is needed. + +##### Simple Delimited File Reading Example + +The following example illustrates how to read a flat file with an actual domain scenario. +This particular batch job reads in football players from the following file: + +``` +ID,lastName,firstName,position,birthYear,debutYear +"AbduKa00,Abdul-Jabbar,Karim,rb,1974,1996", +"AbduRa00,Abdullah,Rabih,rb,1975,1999", +"AberWa00,Abercrombie,Walter,rb,1959,1982", +"AbraDa00,Abramowicz,Danny,wr,1945,1967", +"AdamBo00,Adams,Bob,te,1946,1969", +"AdamCh00,Adams,Charlie,wr,1979,2003" +``` + +The contents of this file are mapped to the following`Player` domain object: + +``` +public class Player implements Serializable { + + private String ID; + private String lastName; + private String firstName; + private String position; + private int birthYear; + private int debutYear; + + public String toString() { + return "PLAYER:ID=" + ID + ",Last Name=" + lastName + + ",First Name=" + firstName + ",Position=" + position + + ",Birth Year=" + birthYear + ",DebutYear=" + + debutYear; + } + + // setters and getters... +} +``` + +To map a `FieldSet` into a `Player` object, a `FieldSetMapper` that returns players needs +to be defined, as shown in the following example: + +``` +protected static class PlayerFieldSetMapper implements FieldSetMapper { + public Player mapFieldSet(FieldSet fieldSet) { + Player player = new Player(); + + player.setID(fieldSet.readString(0)); + player.setLastName(fieldSet.readString(1)); + player.setFirstName(fieldSet.readString(2)); + player.setPosition(fieldSet.readString(3)); + player.setBirthYear(fieldSet.readInt(4)); + player.setDebutYear(fieldSet.readInt(5)); + + return player; + } +} +``` + +The file can then be read by correctly constructing a `FlatFileItemReader` and calling`read`, as shown in the following example: + +``` +FlatFileItemReader itemReader = new FlatFileItemReader<>(); +itemReader.setResource(new FileSystemResource("resources/players.csv")); +DefaultLineMapper lineMapper = new DefaultLineMapper<>(); +//DelimitedLineTokenizer defaults to comma as its delimiter +lineMapper.setLineTokenizer(new DelimitedLineTokenizer()); +lineMapper.setFieldSetMapper(new PlayerFieldSetMapper()); +itemReader.setLineMapper(lineMapper); +itemReader.open(new ExecutionContext()); +Player player = itemReader.read(); +``` + +Each call to `read` returns a new`Player` object from each line in the file. When the end of the file is +reached, `null` is returned. + +##### Mapping Fields by Name + +There is one additional piece of functionality that is allowed by both`DelimitedLineTokenizer` and `FixedLengthTokenizer` and that is similar in function to a +JDBC `ResultSet`. The names of the fields can be injected into either of these`LineTokenizer` implementations to increase the readability of the mapping function. +First, the column names of all fields in the flat file are injected into the tokenizer, +as shown in the following example: + +``` +tokenizer.setNames(new String[] {"ID", "lastName", "firstName", "position", "birthYear", "debutYear"}); +``` + +A `FieldSetMapper` can use this information as follows: + +``` +public class PlayerMapper implements FieldSetMapper { + public Player mapFieldSet(FieldSet fs) { + + if (fs == null) { + return null; + } + + Player player = new Player(); + player.setID(fs.readString("ID")); + player.setLastName(fs.readString("lastName")); + player.setFirstName(fs.readString("firstName")); + player.setPosition(fs.readString("position")); + player.setDebutYear(fs.readInt("debutYear")); + player.setBirthYear(fs.readInt("birthYear")); + + return player; + } +} +``` + +##### Automapping FieldSets to Domain Objects + +For many, having to write a specific `FieldSetMapper` is equally as cumbersome as writing +a specific `RowMapper` for a `JdbcTemplate`. Spring Batch makes this easier by providing +a `FieldSetMapper` that automatically maps fields by matching a field name with a setter +on the object using the JavaBean specification. + +Again using the football example, the `BeanWrapperFieldSetMapper` configuration looks like +the following snippet in XML: + +XML Configuration + +``` + + + + + +``` + +Again using the football example, the `BeanWrapperFieldSetMapper` configuration looks like +the following snippet in Java: + +Java Configuration + +``` +@Bean +public FieldSetMapper fieldSetMapper() { + BeanWrapperFieldSetMapper fieldSetMapper = new BeanWrapperFieldSetMapper(); + + fieldSetMapper.setPrototypeBeanName("player"); + + return fieldSetMapper; +} + +@Bean +@Scope("prototype") +public Player player() { + return new Player(); +} +``` + +For each entry in the `FieldSet`, the mapper looks for a corresponding setter on a new +instance of the `Player` object (for this reason, prototype scope is required) in the +same way the Spring container looks for setters matching a property name. Each available +field in the `FieldSet` is mapped, and the resultant `Player` object is returned, with no +code required. + +##### Fixed Length File Formats + +So far, only delimited files have been discussed in much detail. However, they represent +only half of the file reading picture. Many organizations that use flat files use fixed +length formats. An example fixed length file follows: + +``` +UK21341EAH4121131.11customer1 +UK21341EAH4221232.11customer2 +UK21341EAH4321333.11customer3 +UK21341EAH4421434.11customer4 +UK21341EAH4521535.11customer5 +``` + +While this looks like one large field, it actually represent 4 distinct fields: + +1. ISIN: Unique identifier for the item being ordered - 12 characters long. + +2. Quantity: Number of the item being ordered - 3 characters long. + +3. Price: Price of the item - 5 characters long. + +4. Customer: ID of the customer ordering the item - 9 characters long. + +When configuring the `FixedLengthLineTokenizer`, each of these lengths must be provided +in the form of ranges. + +The following example shows how to define ranges for the `FixedLengthLineTokenizer` in +XML: + +XML Configuration + +``` + + + + +``` + +Because the `FixedLengthLineTokenizer` uses the same `LineTokenizer` interface as +discussed earlier, it returns the same `FieldSet` as if a delimiter had been used. This +allows the same approaches to be used in handling its output, such as using the`BeanWrapperFieldSetMapper`. + +| |Supporting the preceding syntax for ranges requires that a specialized property editor,`RangeArrayPropertyEditor`, be configured in the `ApplicationContext`. However, this bean
is automatically declared in an `ApplicationContext` where the batch namespace is used.| +|---|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +The following example shows how to define ranges for the `FixedLengthLineTokenizer` in +Java: + +Java Configuration + +``` +@Bean +public FixedLengthTokenizer fixedLengthTokenizer() { + FixedLengthTokenizer tokenizer = new FixedLengthTokenizer(); + + tokenizer.setNames("ISIN", "Quantity", "Price", "Customer"); + tokenizer.setColumns(new Range(1, 12), + new Range(13, 15), + new Range(16, 20), + new Range(21, 29)); + + return tokenizer; +} +``` + +Because the `FixedLengthLineTokenizer` uses the same `LineTokenizer` interface as +discussed above, it returns the same `FieldSet` as if a delimiter had been used. This +lets the same approaches be used in handling its output, such as using the`BeanWrapperFieldSetMapper`. + +##### Multiple Record Types within a Single File + +All of the file reading examples up to this point have all made a key assumption for +simplicity’s sake: all of the records in a file have the same format. However, this may +not always be the case. It is very common that a file might have records with different +formats that need to be tokenized differently and mapped to different objects. The +following excerpt from a file illustrates this: + +``` +USER;Smith;Peter;;T;20014539;F +LINEA;1044391041ABC037.49G201XX1383.12H +LINEB;2134776319DEF422.99M005LI +``` + +In this file we have three types of records, "USER", "LINEA", and "LINEB". A "USER" line +corresponds to a `User` object. "LINEA" and "LINEB" both correspond to `Line` objects, +though a "LINEA" has more information than a "LINEB". + +The `ItemReader` reads each line individually, but we must specify different`LineTokenizer` and `FieldSetMapper` objects so that the `ItemWriter` receives the +correct items. The `PatternMatchingCompositeLineMapper` makes this easy by allowing maps +of patterns to `LineTokenizers` and patterns to `FieldSetMappers` to be configured. + +The following example shows how to define ranges for the `FixedLengthLineTokenizer` in +XML: + +XML Configuration + +``` + + + + + + + + + + + + + + + +``` + +Java Configuration + +``` +@Bean +public PatternMatchingCompositeLineMapper orderFileLineMapper() { + PatternMatchingCompositeLineMapper lineMapper = + new PatternMatchingCompositeLineMapper(); + + Map tokenizers = new HashMap<>(3); + tokenizers.put("USER*", userTokenizer()); + tokenizers.put("LINEA*", lineATokenizer()); + tokenizers.put("LINEB*", lineBTokenizer()); + + lineMapper.setTokenizers(tokenizers); + + Map mappers = new HashMap<>(2); + mappers.put("USER*", userFieldSetMapper()); + mappers.put("LINE*", lineFieldSetMapper()); + + lineMapper.setFieldSetMappers(mappers); + + return lineMapper; +} +``` + +In this example, "LINEA" and "LINEB" have separate `LineTokenizer` instances, but they both use +the same `FieldSetMapper`. + +The `PatternMatchingCompositeLineMapper` uses the `PatternMatcher#match` method +in order to select the correct delegate for each line. The `PatternMatcher` allows for +two wildcard characters with special meaning: the question mark ("?") matches exactly one +character, while the asterisk ("\*") matches zero or more characters. Note that, in the +preceding configuration, all patterns end with an asterisk, making them effectively +prefixes to lines. The `PatternMatcher` always matches the most specific pattern +possible, regardless of the order in the configuration. So if "LINE\*" and "LINEA\*" were +both listed as patterns, "LINEA" would match pattern "LINEA\*", while "LINEB" would match +pattern "LINE\*". Additionally, a single asterisk ("\*") can serve as a default by matching +any line not matched by any other pattern. + +The following example shows how to match a line not matched by any other pattern in XML: + +XML Configuration + +``` + +``` + +The following example shows how to match a line not matched by any other pattern in Java: + +Java Configuration + +``` +... +tokenizers.put("*", defaultLineTokenizer()); +... +``` + +There is also a `PatternMatchingCompositeLineTokenizer` that can be used for tokenization +alone. + +It is also common for a flat file to contain records that each span multiple lines. To +handle this situation, a more complex strategy is required. A demonstration of this +common pattern can be found in the `multiLineRecords` sample. + +##### Exception Handling in Flat Files + +There are many scenarios when tokenizing a line may cause exceptions to be thrown. Many +flat files are imperfect and contain incorrectly formatted records. Many users choose to +skip these erroneous lines while logging the issue, the original line, and the line +number. These logs can later be inspected manually or by another batch job. For this +reason, Spring Batch provides a hierarchy of exceptions for handling parse exceptions:`FlatFileParseException` and `FlatFileFormatException`. `FlatFileParseException` is +thrown by the `FlatFileItemReader` when any errors are encountered while trying to read a +file. `FlatFileFormatException` is thrown by implementations of the `LineTokenizer`interface and indicates a more specific error encountered while tokenizing. + +###### `IncorrectTokenCountException` + +Both `DelimitedLineTokenizer` and `FixedLengthLineTokenizer` have the ability to specify +column names that can be used for creating a `FieldSet`. However, if the number of column +names does not match the number of columns found while tokenizing a line, the `FieldSet`cannot be created, and an `IncorrectTokenCountException` is thrown, which contains the +number of tokens encountered, and the number expected, as shown in the following example: + +``` +tokenizer.setNames(new String[] {"A", "B", "C", "D"}); + +try { + tokenizer.tokenize("a,b,c"); +} +catch (IncorrectTokenCountException e) { + assertEquals(4, e.getExpectedCount()); + assertEquals(3, e.getActualCount()); +} +``` + +Because the tokenizer was configured with 4 column names but only 3 tokens were found in +the file, an `IncorrectTokenCountException` was thrown. + +###### `IncorrectLineLengthException` + +Files formatted in a fixed-length format have additional requirements when parsing +because, unlike a delimited format, each column must strictly adhere to its predefined +width. If the total line length does not equal the widest value of this column, an +exception is thrown, as shown in the following example: + +``` +tokenizer.setColumns(new Range[] { new Range(1, 5), + new Range(6, 10), + new Range(11, 15) }); +try { + tokenizer.tokenize("12345"); + fail("Expected IncorrectLineLengthException"); +} +catch (IncorrectLineLengthException ex) { + assertEquals(15, ex.getExpectedLength()); + assertEquals(5, ex.getActualLength()); +} +``` + +The configured ranges for the tokenizer above are: 1-5, 6-10, and 11-15. Consequently, +the total length of the line is 15. However, in the preceding example, a line of length 5 +was passed in, causing an `IncorrectLineLengthException` to be thrown. Throwing an +exception here rather than only mapping the first column allows the processing of the +line to fail earlier and with more information than it would contain if it failed while +trying to read in column 2 in a `FieldSetMapper`. However, there are scenarios where the +length of the line is not always constant. For this reason, validation of line length can +be turned off via the 'strict' property, as shown in the following example: + +``` +tokenizer.setColumns(new Range[] { new Range(1, 5), new Range(6, 10) }); +tokenizer.setStrict(false); +FieldSet tokens = tokenizer.tokenize("12345"); +assertEquals("12345", tokens.readString(0)); +assertEquals("", tokens.readString(1)); +``` + +The preceding example is almost identical to the one before it, except that`tokenizer.setStrict(false)` was called. This setting tells the tokenizer to not enforce +line lengths when tokenizing the line. A `FieldSet` is now correctly created and +returned. However, it contains only empty tokens for the remaining values. + +#### `FlatFileItemWriter` + +Writing out to flat files has the same problems and issues that reading in from a file +must overcome. A step must be able to write either delimited or fixed length formats in a +transactional manner. + +##### `LineAggregator` + +Just as the `LineTokenizer` interface is necessary to take an item and turn it into a`String`, file writing must have a way to aggregate multiple fields into a single string +for writing to a file. In Spring Batch, this is the `LineAggregator`, shown in the +following interface definition: + +``` +public interface LineAggregator { + + public String aggregate(T item); + +} +``` + +The `LineAggregator` is the logical opposite of `LineTokenizer`. `LineTokenizer` takes a`String` and returns a `FieldSet`, whereas `LineAggregator` takes an `item` and returns a`String`. + +###### `PassThroughLineAggregator` + +The most basic implementation of the `LineAggregator` interface is the`PassThroughLineAggregator`, which assumes that the object is already a string or that +its string representation is acceptable for writing, as shown in the following code: + +``` +public class PassThroughLineAggregator implements LineAggregator { + + public String aggregate(T item) { + return item.toString(); + } +} +``` + +The preceding implementation is useful if direct control of creating the string is +required but the advantages of a `FlatFileItemWriter`, such as transaction and restart +support, are necessary. + +##### Simplified File Writing Example + +Now that the `LineAggregator` interface and its most basic implementation,`PassThroughLineAggregator`, have been defined, the basic flow of writing can be +explained: + +1. The object to be written is passed to the `LineAggregator` in order to obtain a`String`. + +2. The returned `String` is written to the configured file. + +The following excerpt from the `FlatFileItemWriter` expresses this in code: + +``` +public void write(T item) throws Exception { + write(lineAggregator.aggregate(item) + LINE_SEPARATOR); +} +``` + +In XML, a simple example of configuration might look like the following: + +XML Configuration + +``` + + + + + + +``` + +In Java, a simple example of configuration might look like the following: + +Java Configuration + +``` +@Bean +public FlatFileItemWriter itemWriter() { + return new FlatFileItemWriterBuilder() + .name("itemWriter") + .resource(new FileSystemResource("target/test-outputs/output.txt")) + .lineAggregator(new PassThroughLineAggregator<>()) + .build(); +} +``` + +##### `FieldExtractor` + +The preceding example may be useful for the most basic uses of a writing to a file. +However, most users of the `FlatFileItemWriter` have a domain object that needs to be +written out and, thus, must be converted into a line. In file reading, the following was +required: + +1. Read one line from the file. + +2. Pass the line into the `LineTokenizer#tokenize()` method, in order to retrieve a`FieldSet`. + +3. Pass the `FieldSet` returned from tokenizing to a `FieldSetMapper`, returning the + result from the `ItemReader#read()` method. + +File writing has similar but inverse steps: + +1. Pass the item to be written to the writer. + +2. Convert the fields on the item into an array. + +3. Aggregate the resulting array into a line. + +Because there is no way for the framework to know which fields from the object need to +be written out, a `FieldExtractor` must be written to accomplish the task of turning the +item into an array, as shown in the following interface definition: + +``` +public interface FieldExtractor { + + Object[] extract(T item); + +} +``` + +Implementations of the `FieldExtractor` interface should create an array from the fields +of the provided object, which can then be written out with a delimiter between the +elements or as part of a fixed-width line. + +###### `PassThroughFieldExtractor` + +There are many cases where a collection, such as an array, `Collection`, or `FieldSet`, +needs to be written out. "Extracting" an array from one of these collection types is very +straightforward. To do so, convert the collection to an array. Therefore, the`PassThroughFieldExtractor` should be used in this scenario. It should be noted that, if +the object passed in is not a type of collection, then the `PassThroughFieldExtractor`returns an array containing solely the item to be extracted. + +###### `BeanWrapperFieldExtractor` + +As with the `BeanWrapperFieldSetMapper` described in the file reading section, it is +often preferable to configure how to convert a domain object to an object array, rather +than writing the conversion yourself. The `BeanWrapperFieldExtractor` provides this +functionality, as shown in the following example: + +``` +BeanWrapperFieldExtractor extractor = new BeanWrapperFieldExtractor<>(); +extractor.setNames(new String[] { "first", "last", "born" }); + +String first = "Alan"; +String last = "Turing"; +int born = 1912; + +Name n = new Name(first, last, born); +Object[] values = extractor.extract(n); + +assertEquals(first, values[0]); +assertEquals(last, values[1]); +assertEquals(born, values[2]); +``` + +This extractor implementation has only one required property: the names of the fields to +map. Just as the `BeanWrapperFieldSetMapper` needs field names to map fields on the`FieldSet` to setters on the provided object, the `BeanWrapperFieldExtractor` needs names +to map to getters for creating an object array. It is worth noting that the order of the +names determines the order of the fields within the array. + +##### Delimited File Writing Example + +The most basic flat file format is one in which all fields are separated by a delimiter. +This can be accomplished using a `DelimitedLineAggregator`. The following example writes +out a simple domain object that represents a credit to a customer account: + +``` +public class CustomerCredit { + + private int id; + private String name; + private BigDecimal credit; + + //getters and setters removed for clarity +} +``` + +Because a domain object is being used, an implementation of the `FieldExtractor`interface must be provided, along with the delimiter to use. + +The following example shows how to use the `FieldExtractor` with a delimiter in XML: + +XML Configuration + +``` + + + + + + + + + + + + + +``` + +The following example shows how to use the `FieldExtractor` with a delimiter in Java: + +Java Configuration + +``` +@Bean +public FlatFileItemWriter itemWriter(Resource outputResource) throws Exception { + BeanWrapperFieldExtractor fieldExtractor = new BeanWrapperFieldExtractor<>(); + fieldExtractor.setNames(new String[] {"name", "credit"}); + fieldExtractor.afterPropertiesSet(); + + DelimitedLineAggregator lineAggregator = new DelimitedLineAggregator<>(); + lineAggregator.setDelimiter(","); + lineAggregator.setFieldExtractor(fieldExtractor); + + return new FlatFileItemWriterBuilder() + .name("customerCreditWriter") + .resource(outputResource) + .lineAggregator(lineAggregator) + .build(); +} +``` + +In the previous example, the `BeanWrapperFieldExtractor` described earlier in this +chapter is used to turn the name and credit fields within `CustomerCredit` into an object +array, which is then written out with commas between each field. + +It is also possible to use the `FlatFileItemWriterBuilder.DelimitedBuilder` to +automatically create the `BeanWrapperFieldExtractor` and `DelimitedLineAggregator`as shown in the following example: + +Java Configuration + +``` +@Bean +public FlatFileItemWriter itemWriter(Resource outputResource) throws Exception { + return new FlatFileItemWriterBuilder() + .name("customerCreditWriter") + .resource(outputResource) + .delimited() + .delimiter("|") + .names(new String[] {"name", "credit"}) + .build(); +} +``` + +##### Fixed Width File Writing Example + +Delimited is not the only type of flat file format. Many prefer to use a set width for +each column to delineate between fields, which is usually referred to as 'fixed width'. +Spring Batch supports this in file writing with the `FormatterLineAggregator`. + +Using the same `CustomerCredit` domain object described above, it can be configured as +follows in XML: + +XML Configuration + +``` + + + + + + + + + + + + + +``` + +Using the same `CustomerCredit` domain object described above, it can be configured as +follows in Java: + +Java Configuration + +``` +@Bean +public FlatFileItemWriter itemWriter(Resource outputResource) throws Exception { + BeanWrapperFieldExtractor fieldExtractor = new BeanWrapperFieldExtractor<>(); + fieldExtractor.setNames(new String[] {"name", "credit"}); + fieldExtractor.afterPropertiesSet(); + + FormatterLineAggregator lineAggregator = new FormatterLineAggregator<>(); + lineAggregator.setFormat("%-9s%-2.0f"); + lineAggregator.setFieldExtractor(fieldExtractor); + + return new FlatFileItemWriterBuilder() + .name("customerCreditWriter") + .resource(outputResource) + .lineAggregator(lineAggregator) + .build(); +} +``` + +Most of the preceding example should look familiar. However, the value of the format +property is new. + +The following example shows the format property in XML: + +``` + +``` + +The following example shows the format property in Java: + +``` +... +FormatterLineAggregator lineAggregator = new FormatterLineAggregator<>(); +lineAggregator.setFormat("%-9s%-2.0f"); +... +``` + +The underlying implementation is built using the same`Formatter` added as part of Java 5. The Java`Formatter` is based on the`printf` functionality of the C programming +language. Most details on how to configure a formatter can be found in +the Javadoc of [Formatter](https://docs.oracle.com/javase/8/docs/api/java/util/Formatter.html). + +It is also possible to use the `FlatFileItemWriterBuilder.FormattedBuilder` to +automatically create the `BeanWrapperFieldExtractor` and `FormatterLineAggregator`as shown in following example: + +Java Configuration + +``` +@Bean +public FlatFileItemWriter itemWriter(Resource outputResource) throws Exception { + return new FlatFileItemWriterBuilder() + .name("customerCreditWriter") + .resource(outputResource) + .formatted() + .format("%-9s%-2.0f") + .names(new String[] {"name", "credit"}) + .build(); +} +``` + +##### Handling File Creation + +`FlatFileItemReader` has a very simple relationship with file resources. When the reader +is initialized, it opens the file (if it exists), and throws an exception if it does not. +File writing isn’t quite so simple. At first glance, it seems like a similar +straightforward contract should exist for `FlatFileItemWriter`: If the file already +exists, throw an exception, and, if it does not, create it and start writing. However, +potentially restarting a `Job` can cause issues. In normal restart scenarios, the +contract is reversed: If the file exists, start writing to it from the last known good +position, and, if it does not, throw an exception. However, what happens if the file name +for this job is always the same? In this case, you would want to delete the file if it +exists, unless it’s a restart. Because of this possibility, the `FlatFileItemWriter`contains the property, `shouldDeleteIfExists`. Setting this property to true causes an +existing file with the same name to be deleted when the writer is opened. + +### XML Item Readers and Writers + +Spring Batch provides transactional infrastructure for both reading XML records and +mapping them to Java objects as well as writing Java objects as XML records. + +| |Constraints on streaming XML

The StAX API is used for I/O, as other standard XML parsing APIs do not fit batch
processing requirements (DOM loads the whole input into memory at once and SAX controls
the parsing process by allowing the user to provide only callbacks).| +|---|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +We need to consider how XML input and output works in Spring Batch. First, there are a +few concepts that vary from file reading and writing but are common across Spring Batch +XML processing. With XML processing, instead of lines of records (`FieldSet` instances) that need +to be tokenized, it is assumed an XML resource is a collection of 'fragments' +corresponding to individual records, as shown in the following image: + +![XML Input](https://docs.spring.io/spring-batch/docs/current/reference/html/images/xmlinput.png) + +Figure 1. XML Input + +The 'trade' tag is defined as the 'root element' in the scenario above. Everything +between '\' and '\' is considered one 'fragment'. Spring Batch +uses Object/XML Mapping (OXM) to bind fragments to objects. However, Spring Batch is not +tied to any particular XML binding technology. Typical use is to delegate to[Spring OXM](https://docs.spring.io/spring/docs/current/spring-framework-reference/data-access.html#oxm), which +provides uniform abstraction for the most popular OXM technologies. The dependency on +Spring OXM is optional and you can choose to implement Spring Batch specific interfaces +if desired. The relationship to the technologies that OXM supports is shown in the +following image: + +![OXM Binding](https://docs.spring.io/spring-batch/docs/current/reference/html/images/oxm-fragments.png) + +Figure 2. OXM Binding + +With an introduction to OXM and how one can use XML fragments to represent records, we +can now more closely examine readers and writers. + +#### `StaxEventItemReader` + +The `StaxEventItemReader` configuration provides a typical setup for the processing of +records from an XML input stream. First, consider the following set of XML records that +the `StaxEventItemReader` can process: + +``` + + + + XYZ0001 + 5 + 11.39 + Customer1 + + + XYZ0002 + 2 + 72.99 + Customer2c + + + XYZ0003 + 9 + 99.99 + Customer3 + + +``` + +To be able to process the XML records, the following is needed: + +* Root Element Name: The name of the root element of the fragment that constitutes the + object to be mapped. The example configuration demonstrates this with the value of trade. + +* Resource: A Spring Resource that represents the file to read. + +* `Unmarshaller`: An unmarshalling facility provided by Spring OXM for mapping the XML + fragment to an object. + +The following example shows how to define a `StaxEventItemReader` that works with a root +element named `trade`, a resource of `data/iosample/input/input.xml`, and an unmarshaller +called `tradeMarshaller` in XML: + +XML Configuration + +``` + + + + + +``` + +The following example shows how to define a `StaxEventItemReader` that works with a root +element named `trade`, a resource of `data/iosample/input/input.xml`, and an unmarshaller +called `tradeMarshaller` in Java: + +Java Configuration + +``` +@Bean +public StaxEventItemReader itemReader() { + return new StaxEventItemReaderBuilder() + .name("itemReader") + .resource(new FileSystemResource("org/springframework/batch/item/xml/domain/trades.xml")) + .addFragmentRootElements("trade") + .unmarshaller(tradeMarshaller()) + .build(); + +} +``` + +Note that, in this example, we have chosen to use an `XStreamMarshaller`, which accepts +an alias passed in as a map with the first key and value being the name of the fragment +(that is, a root element) and the object type to bind. Then, similar to a `FieldSet`, the +names of the other elements that map to fields within the object type are described as +key/value pairs in the map. In the configuration file, we can use a Spring configuration +utility to describe the required alias. + +The following example shows how to describe the alias in XML: + +XML Configuration + +``` + + + + + + + + + + + +``` + +The following example shows how to describe the alias in Java: + +Java Configuration + +``` +@Bean +public XStreamMarshaller tradeMarshaller() { + Map aliases = new HashMap<>(); + aliases.put("trade", Trade.class); + aliases.put("price", BigDecimal.class); + aliases.put("isin", String.class); + aliases.put("customer", String.class); + aliases.put("quantity", Long.class); + + XStreamMarshaller marshaller = new XStreamMarshaller(); + + marshaller.setAliases(aliases); + + return marshaller; +} +``` + +On input, the reader reads the XML resource until it recognizes that a new fragment is +about to start. By default, the reader matches the element name to recognize that a new +fragment is about to start. The reader creates a standalone XML document from the +fragment and passes the document to a deserializer (typically a wrapper around a Spring +OXM `Unmarshaller`) to map the XML to a Java object. + +In summary, this procedure is analogous to the following Java code, which uses the +injection provided by the Spring configuration: + +``` +StaxEventItemReader xmlStaxEventItemReader = new StaxEventItemReader<>(); +Resource resource = new ByteArrayResource(xmlResource.getBytes()); + +Map aliases = new HashMap(); +aliases.put("trade","org.springframework.batch.sample.domain.trade.Trade"); +aliases.put("price","java.math.BigDecimal"); +aliases.put("customer","java.lang.String"); +aliases.put("isin","java.lang.String"); +aliases.put("quantity","java.lang.Long"); +XStreamMarshaller unmarshaller = new XStreamMarshaller(); +unmarshaller.setAliases(aliases); +xmlStaxEventItemReader.setUnmarshaller(unmarshaller); +xmlStaxEventItemReader.setResource(resource); +xmlStaxEventItemReader.setFragmentRootElementName("trade"); +xmlStaxEventItemReader.open(new ExecutionContext()); + +boolean hasNext = true; + +Trade trade = null; + +while (hasNext) { + trade = xmlStaxEventItemReader.read(); + if (trade == null) { + hasNext = false; + } + else { + System.out.println(trade); + } +} +``` + +#### `StaxEventItemWriter` + +Output works symmetrically to input. The `StaxEventItemWriter` needs a `Resource`, a +marshaller, and a `rootTagName`. A Java object is passed to a marshaller (typically a +standard Spring OXM Marshaller) which writes to a `Resource` by using a custom event +writer that filters the `StartDocument` and `EndDocument` events produced for each +fragment by the OXM tools. + +The following XML example uses the `MarshallingEventWriterSerializer`: + +XML Configuration + +``` + + + + + + +``` + +The following Java example uses the `MarshallingEventWriterSerializer`: + +Java Configuration + +``` +@Bean +public StaxEventItemWriter itemWriter(Resource outputResource) { + return new StaxEventItemWriterBuilder() + .name("tradesWriter") + .marshaller(tradeMarshaller()) + .resource(outputResource) + .rootTagName("trade") + .overwriteOutput(true) + .build(); + +} +``` + +The preceding configuration sets up the three required properties and sets the optional`overwriteOutput=true` attrbute, mentioned earlier in this chapter for specifying whether +an existing file can be overwritten. + +The following XML example uses the same marshaller as the one used in the reading example +shown earlier in the chapter: + +XML Configuration + +``` + + + + + + + + + + + +``` + +The following Java example uses the same marshaller as the one used in the reading example +shown earlier in the chapter: + +Java Configuration + +``` +@Bean +public XStreamMarshaller customerCreditMarshaller() { + XStreamMarshaller marshaller = new XStreamMarshaller(); + + Map aliases = new HashMap<>(); + aliases.put("trade", Trade.class); + aliases.put("price", BigDecimal.class); + aliases.put("isin", String.class); + aliases.put("customer", String.class); + aliases.put("quantity", Long.class); + + marshaller.setAliases(aliases); + + return marshaller; +} +``` + +To summarize with a Java example, the following code illustrates all of the points +discussed, demonstrating the programmatic setup of the required properties: + +``` +FileSystemResource resource = new FileSystemResource("data/outputFile.xml") + +Map aliases = new HashMap(); +aliases.put("trade","org.springframework.batch.sample.domain.trade.Trade"); +aliases.put("price","java.math.BigDecimal"); +aliases.put("customer","java.lang.String"); +aliases.put("isin","java.lang.String"); +aliases.put("quantity","java.lang.Long"); +Marshaller marshaller = new XStreamMarshaller(); +marshaller.setAliases(aliases); + +StaxEventItemWriter staxItemWriter = + new StaxEventItemWriterBuilder() + .name("tradesWriter") + .marshaller(marshaller) + .resource(resource) + .rootTagName("trade") + .overwriteOutput(true) + .build(); + +staxItemWriter.afterPropertiesSet(); + +ExecutionContext executionContext = new ExecutionContext(); +staxItemWriter.open(executionContext); +Trade trade = new Trade(); +trade.setPrice(11.39); +trade.setIsin("XYZ0001"); +trade.setQuantity(5L); +trade.setCustomer("Customer1"); +staxItemWriter.write(trade); +``` + +### JSON Item Readers And Writers + +Spring Batch provides support for reading and Writing JSON resources in the following format: + +``` +[ + { + "isin": "123", + "quantity": 1, + "price": 1.2, + "customer": "foo" + }, + { + "isin": "456", + "quantity": 2, + "price": 1.4, + "customer": "bar" + } +] +``` + +It is assumed that the JSON resource is an array of JSON objects corresponding to +individual items. Spring Batch is not tied to any particular JSON library. + +#### `JsonItemReader` + +The `JsonItemReader` delegates JSON parsing and binding to implementations of the`org.springframework.batch.item.json.JsonObjectReader` interface. This interface +is intended to be implemented by using a streaming API to read JSON objects +in chunks. Two implementations are currently provided: + +* [Jackson](https://github.com/FasterXML/jackson) through the `org.springframework.batch.item.json.JacksonJsonObjectReader` + +* [Gson](https://github.com/google/gson) through the `org.springframework.batch.item.json.GsonJsonObjectReader` + +To be able to process JSON records, the following is needed: + +* `Resource`: A Spring Resource that represents the JSON file to read. + +* `JsonObjectReader`: A JSON object reader to parse and bind JSON objects to items + +The following example shows how to define a `JsonItemReader` that works with the +previous JSON resource `org/springframework/batch/item/json/trades.json` and a`JsonObjectReader` based on Jackson: + +``` +@Bean +public JsonItemReader jsonItemReader() { + return new JsonItemReaderBuilder() + .jsonObjectReader(new JacksonJsonObjectReader<>(Trade.class)) + .resource(new ClassPathResource("trades.json")) + .name("tradeJsonItemReader") + .build(); +} +``` + +#### `JsonFileItemWriter` + +The `JsonFileItemWriter` delegates the marshalling of items to the`org.springframework.batch.item.json.JsonObjectMarshaller` interface. The contract +of this interface is to take an object and marshall it to a JSON `String`. +Two implementations are currently provided: + +* [Jackson](https://github.com/FasterXML/jackson) through the `org.springframework.batch.item.json.JacksonJsonObjectMarshaller` + +* [Gson](https://github.com/google/gson) through the `org.springframework.batch.item.json.GsonJsonObjectMarshaller` + +To be able to write JSON records, the following is needed: + +* `Resource`: A Spring `Resource` that represents the JSON file to write + +* `JsonObjectMarshaller`: A JSON object marshaller to marshall objects to JSON format + +The following example shows how to define a `JsonFileItemWriter`: + +``` +@Bean +public JsonFileItemWriter jsonFileItemWriter() { + return new JsonFileItemWriterBuilder() + .jsonObjectMarshaller(new JacksonJsonObjectMarshaller<>()) + .resource(new ClassPathResource("trades.json")) + .name("tradeJsonFileItemWriter") + .build(); +} +``` + +### Multi-File Input + +It is a common requirement to process multiple files within a single `Step`. Assuming the +files all have the same formatting, the `MultiResourceItemReader` supports this type of +input for both XML and flat file processing. Consider the following files in a directory: + +``` +file-1.txt file-2.txt ignored.txt +``` + +file-1.txt and file-2.txt are formatted the same and, for business reasons, should be +processed together. The `MultiResourceItemReader` can be used to read in both files by +using wildcards. + +The following example shows how to read files with wildcards in XML: + +XML Configuration + +``` + + + + +``` + +The following example shows how to read files with wildcards in Java: + +Java Configuration + +``` +@Bean +public MultiResourceItemReader multiResourceReader() { + return new MultiResourceItemReaderBuilder() + .delegate(flatFileItemReader()) + .resources(resources()) + .build(); +} +``` + +The referenced delegate is a simple `FlatFileItemReader`. The above configuration reads +input from both files, handling rollback and restart scenarios. It should be noted that, +as with any `ItemReader`, adding extra input (in this case a file) could cause potential +issues when restarting. It is recommended that batch jobs work with their own individual +directories until completed successfully. + +| |Input resources are ordered by using `MultiResourceItemReader#setComparator(Comparator)`to make sure resource ordering is preserved between job runs in restart scenario.| +|---|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +### Database + +Like most enterprise application styles, a database is the central storage mechanism for +batch. However, batch differs from other application styles due to the sheer size of the +datasets with which the system must work. If a SQL statement returns 1 million rows, the +result set probably holds all returned results in memory until all rows have been read. +Spring Batch provides two types of solutions for this problem: + +* [Cursor-based `ItemReader` Implementations](#cursorBasedItemReaders) + +* [Paging `ItemReader` Implementations](#pagingItemReaders) + +#### Cursor-based `ItemReader` Implementations + +Using a database cursor is generally the default approach of most batch developers, +because it is the database’s solution to the problem of 'streaming' relational data. The +Java `ResultSet` class is essentially an object oriented mechanism for manipulating a +cursor. A `ResultSet` maintains a cursor to the current row of data. Calling `next` on a`ResultSet` moves this cursor to the next row. The Spring Batch cursor-based `ItemReader`implementation opens a cursor on initialization and moves the cursor forward one row for +every call to `read`, returning a mapped object that can be used for processing. The`close` method is then called to ensure all resources are freed up. The Spring core`JdbcTemplate` gets around this problem by using the callback pattern to completely map +all rows in a `ResultSet` and close before returning control back to the method caller. +However, in batch, this must wait until the step is complete. The following image shows a +generic diagram of how a cursor-based `ItemReader` works. Note that, while the example +uses SQL (because SQL is so widely known), any technology could implement the basic +approach. + +![Cursor Example](https://docs.spring.io/spring-batch/docs/current/reference/html/images/cursorExample.png) + +Figure 3. Cursor Example + +This example illustrates the basic pattern. Given a 'FOO' table, which has three columns:`ID`, `NAME`, and `BAR`, select all rows with an ID greater than 1 but less than 7. This +puts the beginning of the cursor (row 1) on ID 2. The result of this row should be a +completely mapped `Foo` object. Calling `read()` again moves the cursor to the next row, +which is the `Foo` with an ID of 3. The results of these reads are written out after each`read`, allowing the objects to be garbage collected (assuming no instance variables are +maintaining references to them). + +##### `JdbcCursorItemReader` + +`JdbcCursorItemReader` is the JDBC implementation of the cursor-based technique. It works +directly with a `ResultSet` and requires an SQL statement to run against a connection +obtained from a `DataSource`. The following database schema is used as an example: + +``` +CREATE TABLE CUSTOMER ( + ID BIGINT IDENTITY PRIMARY KEY, + NAME VARCHAR(45), + CREDIT FLOAT +); +``` + +Many people prefer to use a domain object for each row, so the following example uses an +implementation of the `RowMapper` interface to map a `CustomerCredit` object: + +``` +public class CustomerCreditRowMapper implements RowMapper { + + public static final String ID_COLUMN = "id"; + public static final String NAME_COLUMN = "name"; + public static final String CREDIT_COLUMN = "credit"; + + public CustomerCredit mapRow(ResultSet rs, int rowNum) throws SQLException { + CustomerCredit customerCredit = new CustomerCredit(); + + customerCredit.setId(rs.getInt(ID_COLUMN)); + customerCredit.setName(rs.getString(NAME_COLUMN)); + customerCredit.setCredit(rs.getBigDecimal(CREDIT_COLUMN)); + + return customerCredit; + } +} +``` + +Because `JdbcCursorItemReader` shares key interfaces with `JdbcTemplate`, it is useful to +see an example of how to read in this data with `JdbcTemplate`, in order to contrast it +with the `ItemReader`. For the purposes of this example, assume there are 1,000 rows in +the `CUSTOMER` database. The first example uses `JdbcTemplate`: + +``` +//For simplicity sake, assume a dataSource has already been obtained +JdbcTemplate jdbcTemplate = new JdbcTemplate(dataSource); +List customerCredits = jdbcTemplate.query("SELECT ID, NAME, CREDIT from CUSTOMER", + new CustomerCreditRowMapper()); +``` + +After running the preceding code snippet, the `customerCredits` list contains 1,000`CustomerCredit` objects. In the query method, a connection is obtained from the`DataSource`, the provided SQL is run against it, and the `mapRow` method is called for +each row in the `ResultSet`. Contrast this with the approach of the`JdbcCursorItemReader`, shown in the following example: + +``` +JdbcCursorItemReader itemReader = new JdbcCursorItemReader(); +itemReader.setDataSource(dataSource); +itemReader.setSql("SELECT ID, NAME, CREDIT from CUSTOMER"); +itemReader.setRowMapper(new CustomerCreditRowMapper()); +int counter = 0; +ExecutionContext executionContext = new ExecutionContext(); +itemReader.open(executionContext); +Object customerCredit = new Object(); +while(customerCredit != null){ + customerCredit = itemReader.read(); + counter++; +} +itemReader.close(); +``` + +After running the preceding code snippet, the counter equals 1,000. If the code above had +put the returned `customerCredit` into a list, the result would have been exactly the +same as with the `JdbcTemplate` example. However, the big advantage of the `ItemReader`is that it allows items to be 'streamed'. The `read` method can be called once, the item +can be written out by an `ItemWriter`, and then the next item can be obtained with`read`. This allows item reading and writing to be done in 'chunks' and committed +periodically, which is the essence of high performance batch processing. Furthermore, it +is easily configured for injection into a Spring Batch `Step`. + +The following example shows how to inject an `ItemReader` into a `Step` in XML: + +XML Configuration + +``` + + + + + + + +``` + +The following example shows how to inject an `ItemReader` into a `Step` in Java: + +Java Configuration + +``` +@Bean +public JdbcCursorItemReader itemReader() { + return new JdbcCursorItemReaderBuilder() + .dataSource(this.dataSource) + .name("creditReader") + .sql("select ID, NAME, CREDIT from CUSTOMER") + .rowMapper(new CustomerCreditRowMapper()) + .build(); + +} +``` + +###### Additional Properties + +Because there are so many varying options for opening a cursor in Java, there are many +properties on the `JdbcCursorItemReader` that can be set, as described in the following +table: + +| ignoreWarnings | Determines whether or not SQLWarnings are logged or cause an exception.
The default is `true` (meaning that warnings are logged). | +|------------------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| fetchSize | Gives the JDBC driver a hint as to the number of rows that should be fetched
from the database when more rows are needed by the `ResultSet` object used by the`ItemReader`. By default, no hint is given. | +| maxRows | Sets the limit for the maximum number of rows the underlying `ResultSet` can
hold at any one time. | +| queryTimeout | Sets the number of seconds the driver waits for a `Statement` object to
run. If the limit is exceeded, a `DataAccessException` is thrown. (Consult your driver
vendor documentation for details). | +| verifyCursorPosition | Because the same `ResultSet` held by the `ItemReader` is passed to
the `RowMapper`, it is possible for users to call `ResultSet.next()` themselves, which
could cause issues with the reader’s internal count. Setting this value to `true` causes
an exception to be thrown if the cursor position is not the same after the `RowMapper`call as it was before. | +| saveState | Indicates whether or not the reader’s state should be saved in the`ExecutionContext` provided by `ItemStream#update(ExecutionContext)`. The default is`true`. | +| driverSupportsAbsolute | Indicates whether the JDBC driver supports
setting the absolute row on a `ResultSet`. It is recommended that this is set to `true`for JDBC drivers that support `ResultSet.absolute()`, as it may improve performance,
especially if a step fails while working with a large data set. Defaults to `false`. | +|setUseSharedExtendedConnection|Indicates whether the connection
used for the cursor should be used by all other processing, thus sharing the same
transaction. If this is set to `false`, then the cursor is opened with its own connection
and does not participate in any transactions started for the rest of the step processing.
If you set this flag to `true` then you must wrap the DataSource in an`ExtendedConnectionDataSourceProxy` to prevent the connection from being closed and
released after each commit. When you set this option to `true`, the statement used to
open the cursor is created with both 'READ\_ONLY' and 'HOLD\_CURSORS\_OVER\_COMMIT' options.
This allows holding the cursor open over transaction start and commits performed in the
step processing. To use this feature, you need a database that supports this and a JDBC
driver supporting JDBC 3.0 or later. Defaults to `false`.| + +##### `HibernateCursorItemReader` + +Just as normal Spring users make important decisions about whether or not to use ORM +solutions, which affect whether or not they use a `JdbcTemplate` or a`HibernateTemplate`, Spring Batch users have the same options.`HibernateCursorItemReader` is the Hibernate implementation of the cursor technique. +Hibernate’s usage in batch has been fairly controversial. This has largely been because +Hibernate was originally developed to support online application styles. However, that +does not mean it cannot be used for batch processing. The easiest approach for solving +this problem is to use a `StatelessSession` rather than a standard session. This removes +all of the caching and dirty checking Hibernate employs and that can cause issues in a +batch scenario. For more information on the differences between stateless and normal +hibernate sessions, refer to the documentation of your specific hibernate release. The`HibernateCursorItemReader` lets you declare an HQL statement and pass in a`SessionFactory`, which will pass back one item per call to read in the same basic +fashion as the `JdbcCursorItemReader`. The following example configuration uses the same +'customer credit' example as the JDBC reader: + +``` +HibernateCursorItemReader itemReader = new HibernateCursorItemReader(); +itemReader.setQueryString("from CustomerCredit"); +//For simplicity sake, assume sessionFactory already obtained. +itemReader.setSessionFactory(sessionFactory); +itemReader.setUseStatelessSession(true); +int counter = 0; +ExecutionContext executionContext = new ExecutionContext(); +itemReader.open(executionContext); +Object customerCredit = new Object(); +while(customerCredit != null){ + customerCredit = itemReader.read(); + counter++; +} +itemReader.close(); +``` + +This configured `ItemReader` returns `CustomerCredit` objects in the exact same manner +as described by the `JdbcCursorItemReader`, assuming hibernate mapping files have been +created correctly for the `Customer` table. The 'useStatelessSession' property defaults +to true but has been added here to draw attention to the ability to switch it on or off. +It is also worth noting that the fetch size of the underlying cursor can be set with the`setFetchSize` property. As with `JdbcCursorItemReader`, configuration is +straightforward. + +The following example shows how to inject a Hibernate `ItemReader` in XML: + +XML Configuration + +``` + + + + +``` + +The following example shows how to inject a Hibernate `ItemReader` in Java: + +Java Configuration + +``` +@Bean +public HibernateCursorItemReader itemReader(SessionFactory sessionFactory) { + return new HibernateCursorItemReaderBuilder() + .name("creditReader") + .sessionFactory(sessionFactory) + .queryString("from CustomerCredit") + .build(); +} +``` + +##### `StoredProcedureItemReader` + +Sometimes it is necessary to obtain the cursor data by using a stored procedure. The`StoredProcedureItemReader` works like the `JdbcCursorItemReader`, except that, instead +of running a query to obtain a cursor, it runs a stored procedure that returns a cursor. +The stored procedure can return the cursor in three different ways: + +* As a returned `ResultSet` (used by SQL Server, Sybase, DB2, Derby, and MySQL). + +* As a ref-cursor returned as an out parameter (used by Oracle and PostgreSQL). + +* As the return value of a stored function call. + +The following XML example configuration uses the same 'customer credit' example as earlier +examples: + +XML Configuration + +``` + + + + + + + +``` + +The following Java example configuration uses the same 'customer credit' example as +earlier examples: + +Java Configuration + +``` +@Bean +public StoredProcedureItemReader reader(DataSource dataSource) { + StoredProcedureItemReader reader = new StoredProcedureItemReader(); + + reader.setDataSource(dataSource); + reader.setProcedureName("sp_customer_credit"); + reader.setRowMapper(new CustomerCreditRowMapper()); + + return reader; +} +``` + +The preceding example relies on the stored procedure to provide a `ResultSet` as a +returned result (option 1 from earlier). + +If the stored procedure returned a `ref-cursor` (option 2), then we would need to provide +the position of the out parameter that is the returned `ref-cursor`. + +The following example shows how to work with the first parameter being a ref-cursor in +XML: + +XML Configuration + +``` + + + + + + + + +``` + +The following example shows how to work with the first parameter being a ref-cursor in +Java: + +Java Configuration + +``` +@Bean +public StoredProcedureItemReader reader(DataSource dataSource) { + StoredProcedureItemReader reader = new StoredProcedureItemReader(); + + reader.setDataSource(dataSource); + reader.setProcedureName("sp_customer_credit"); + reader.setRowMapper(new CustomerCreditRowMapper()); + reader.setRefCursorPosition(1); + + return reader; +} +``` + +If the cursor was returned from a stored function (option 3), we would need to set the +property "function" to `true`. It defaults to `false`. + +The following example shows property to `true` in XML: + +XML Configuration + +``` + + + + + + + + +``` + +The following example shows property to `true` in Java: + +Java Configuration + +``` +@Bean +public StoredProcedureItemReader reader(DataSource dataSource) { + StoredProcedureItemReader reader = new StoredProcedureItemReader(); + + reader.setDataSource(dataSource); + reader.setProcedureName("sp_customer_credit"); + reader.setRowMapper(new CustomerCreditRowMapper()); + reader.setFunction(true); + + return reader; +} +``` + +In all of these cases, we need to define a `RowMapper` as well as a `DataSource` and the +actual procedure name. + +If the stored procedure or function takes in parameters, then they must be declared and +set by using the `parameters` property. The following example, for Oracle, declares three +parameters. The first one is the `out` parameter that returns the ref-cursor, and the +second and third are in parameters that takes a value of type `INTEGER`. + +The following example shows how to work with parameters in XML: + +XML Configuration + +``` + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +``` + +The following example shows how to work with parameters in Java: + +Java Configuration + +``` +@Bean +public StoredProcedureItemReader reader(DataSource dataSource) { + List parameters = new ArrayList<>(); + parameters.add(new SqlOutParameter("newId", OracleTypes.CURSOR)); + parameters.add(new SqlParameter("amount", Types.INTEGER); + parameters.add(new SqlParameter("custId", Types.INTEGER); + + StoredProcedureItemReader reader = new StoredProcedureItemReader(); + + reader.setDataSource(dataSource); + reader.setProcedureName("spring.cursor_func"); + reader.setParameters(parameters); + reader.setRefCursorPosition(1); + reader.setRowMapper(rowMapper()); + reader.setPreparedStatementSetter(parameterSetter()); + + return reader; +} +``` + +In addition to the parameter declarations, we need to specify a `PreparedStatementSetter`implementation that sets the parameter values for the call. This works the same as for +the `JdbcCursorItemReader` above. All the additional properties listed in[Additional Properties](#JdbcCursorItemReaderProperties) apply to the `StoredProcedureItemReader` as well. + +#### Paging `ItemReader` Implementations + +An alternative to using a database cursor is running multiple queries where each query +fetches a portion of the results. We refer to this portion as a page. Each query must +specify the starting row number and the number of rows that we want returned in the page. + +##### `JdbcPagingItemReader` + +One implementation of a paging `ItemReader` is the `JdbcPagingItemReader`. The`JdbcPagingItemReader` needs a `PagingQueryProvider` responsible for providing the SQL +queries used to retrieve the rows making up a page. Since each database has its own +strategy for providing paging support, we need to use a different `PagingQueryProvider`for each supported database type. There is also the `SqlPagingQueryProviderFactoryBean`that auto-detects the database that is being used and determine the appropriate`PagingQueryProvider` implementation. This simplifies the configuration and is the +recommended best practice. + +The `SqlPagingQueryProviderFactoryBean` requires that you specify a `select` clause and a`from` clause. You can also provide an optional `where` clause. These clauses and the +required `sortKey` are used to build an SQL statement. + +| |It is important to have a unique key constraint on the `sortKey` to guarantee that
no data is lost between executions.| +|---|--------------------------------------------------------------------------------------------------------------------------| + +After the reader has been opened, it passes back one item per call to `read` in the same +basic fashion as any other `ItemReader`. The paging happens behind the scenes when +additional rows are needed. + +The following XML example configuration uses a similar 'customer credit' example as the +cursor-based `ItemReaders` shown previously: + +XML Configuration + +``` + + + + + + + + + + + + + + + + + + +``` + +The following Java example configuration uses a similar 'customer credit' example as the +cursor-based `ItemReaders` shown previously: + +Java Configuration + +``` +@Bean +public JdbcPagingItemReader itemReader(DataSource dataSource, PagingQueryProvider queryProvider) { + Map parameterValues = new HashMap<>(); + parameterValues.put("status", "NEW"); + + return new JdbcPagingItemReaderBuilder() + .name("creditReader") + .dataSource(dataSource) + .queryProvider(queryProvider) + .parameterValues(parameterValues) + .rowMapper(customerCreditMapper()) + .pageSize(1000) + .build(); +} + +@Bean +public SqlPagingQueryProviderFactoryBean queryProvider() { + SqlPagingQueryProviderFactoryBean provider = new SqlPagingQueryProviderFactoryBean(); + + provider.setSelectClause("select id, name, credit"); + provider.setFromClause("from customer"); + provider.setWhereClause("where status=:status"); + provider.setSortKey("id"); + + return provider; +} +``` + +This configured `ItemReader` returns `CustomerCredit` objects using the `RowMapper`, +which must be specified. The 'pageSize' property determines the number of entities read +from the database for each query run. + +The 'parameterValues' property can be used to specify a `Map` of parameter values for the +query. If you use named parameters in the `where` clause, the key for each entry should +match the name of the named parameter. If you use a traditional '?' placeholder, then the +key for each entry should be the number of the placeholder, starting with 1. + +##### `JpaPagingItemReader` + +Another implementation of a paging `ItemReader` is the `JpaPagingItemReader`. JPA does +not have a concept similar to the Hibernate `StatelessSession`, so we have to use other +features provided by the JPA specification. Since JPA supports paging, this is a natural +choice when it comes to using JPA for batch processing. After each page is read, the +entities become detached and the persistence context is cleared, to allow the entities to +be garbage collected once the page is processed. + +The `JpaPagingItemReader` lets you declare a JPQL statement and pass in a`EntityManagerFactory`. It then passes back one item per call to read in the same basic +fashion as any other `ItemReader`. The paging happens behind the scenes when additional +entities are needed. + +The following XML example configuration uses the same 'customer credit' example as the +JDBC reader shown previously: + +XML Configuration + +``` + + + + + +``` + +The following Java example configuration uses the same 'customer credit' example as the +JDBC reader shown previously: + +Java Configuration + +``` +@Bean +public JpaPagingItemReader itemReader() { + return new JpaPagingItemReaderBuilder() + .name("creditReader") + .entityManagerFactory(entityManagerFactory()) + .queryString("select c from CustomerCredit c") + .pageSize(1000) + .build(); +} +``` + +This configured `ItemReader` returns `CustomerCredit` objects in the exact same manner as +described for the `JdbcPagingItemReader` above, assuming the `CustomerCredit` object has the +correct JPA annotations or ORM mapping file. The 'pageSize' property determines the +number of entities read from the database for each query execution. + +#### Database ItemWriters + +While both flat files and XML files have a specific `ItemWriter` instance, there is no exact equivalent +in the database world. This is because transactions provide all the needed functionality.`ItemWriter` implementations are necessary for files because they must act as if they’re transactional, +keeping track of written items and flushing or clearing at the appropriate times. +Databases have no need for this functionality, since the write is already contained in a +transaction. Users can create their own DAOs that implement the `ItemWriter` interface or +use one from a custom `ItemWriter` that’s written for generic processing concerns. Either +way, they should work without any issues. One thing to look out for is the performance +and error handling capabilities that are provided by batching the outputs. This is most +common when using hibernate as an `ItemWriter` but could have the same issues when using +JDBC batch mode. Batching database output does not have any inherent flaws, assuming we +are careful to flush and there are no errors in the data. However, any errors while +writing can cause confusion, because there is no way to know which individual item caused +an exception or even if any individual item was responsible, as illustrated in the +following image: + +![Error On Flush](https://docs.spring.io/spring-batch/docs/current/reference/html/images/errorOnFlush.png) + +Figure 4. Error On Flush + +If items are buffered before being written, any errors are not thrown until the buffer is +flushed just before a commit. For example, assume that 20 items are written per chunk, +and the 15th item throws a `DataIntegrityViolationException`. As far as the `Step`is concerned, all 20 item are written successfully, since there is no way to know that an +error occurs until they are actually written. Once `Session#flush()` is called, the +buffer is emptied and the exception is hit. At this point, there is nothing the `Step`can do. The transaction must be rolled back. Normally, this exception might cause the +item to be skipped (depending upon the skip/retry policies), and then it is not written +again. However, in the batched scenario, there is no way to know which item caused the +issue. The whole buffer was being written when the failure happened. The only way to +solve this issue is to flush after each item, as shown in the following image: + +![Error On Write](https://docs.spring.io/spring-batch/docs/current/reference/html/images/errorOnWrite.png) + +Figure 5. Error On Write + +This is a common use case, especially when using Hibernate, and the simple guideline for +implementations of `ItemWriter` is to flush on each call to `write()`. Doing so allows +for items to be skipped reliably, with Spring Batch internally taking care of the +granularity of the calls to `ItemWriter` after an error. + +### Reusing Existing Services + +Batch systems are often used in conjunction with other application styles. The most +common is an online system, but it may also support integration or even a thick client +application by moving necessary bulk data that each application style uses. For this +reason, it is common that many users want to reuse existing DAOs or other services within +their batch jobs. The Spring container itself makes this fairly easy by allowing any +necessary class to be injected. However, there may be cases where the existing service +needs to act as an `ItemReader` or `ItemWriter`, either to satisfy the dependency of +another Spring Batch class or because it truly is the main `ItemReader` for a step. It is +fairly trivial to write an adapter class for each service that needs wrapping, but +because it is such a common concern, Spring Batch provides implementations:`ItemReaderAdapter` and `ItemWriterAdapter`. Both classes implement the standard Spring +method by invoking the delegate pattern and are fairly simple to set up. + +The following XML example uses the `ItemReaderAdapter`: + +XML Configuration + +``` + + + + + + +``` + +The following Java example uses the `ItemReaderAdapter`: + +Java Configuration + +``` +@Bean +public ItemReaderAdapter itemReader() { + ItemReaderAdapter reader = new ItemReaderAdapter(); + + reader.setTargetObject(fooService()); + reader.setTargetMethod("generateFoo"); + + return reader; +} + +@Bean +public FooService fooService() { + return new FooService(); +} +``` + +One important point to note is that the contract of the `targetMethod` must be the same +as the contract for `read`: When exhausted, it returns `null`. Otherwise, it returns an`Object`. Anything else prevents the framework from knowing when processing should end, +either causing an infinite loop or incorrect failure, depending upon the implementation +of the `ItemWriter`. + +The following XML example uses the `ItemWriterAdapter`: + +XML Configuration + +``` + + + + + + +``` + +The following Java example uses the `ItemWriterAdapter`: + +Java Configuration + +``` +@Bean +public ItemWriterAdapter itemWriter() { + ItemWriterAdapter writer = new ItemWriterAdapter(); + + writer.setTargetObject(fooService()); + writer.setTargetMethod("processFoo"); + + return writer; +} + +@Bean +public FooService fooService() { + return new FooService(); +} +``` + +### Preventing State Persistence + +By default, all of the `ItemReader` and `ItemWriter` implementations store their current +state in the `ExecutionContext` before it is committed. However, this may not always be +the desired behavior. For example, many developers choose to make their database readers +'rerunnable' by using a process indicator. An extra column is added to the input data to +indicate whether or not it has been processed. When a particular record is being read (or +written) the processed flag is flipped from `false` to `true`. The SQL statement can then +contain an extra statement in the `where` clause, such as `where PROCESSED_IND = false`, +thereby ensuring that only unprocessed records are returned in the case of a restart. In +this scenario, it is preferable to not store any state, such as the current row number, +since it is irrelevant upon restart. For this reason, all readers and writers include the +'saveState' property. + +The following bean definition shows how to prevent state persistence in XML: + +XML Configuration + +``` + + + + + + + + + SELECT games.player_id, games.year_no, SUM(COMPLETES), + SUM(ATTEMPTS), SUM(PASSING_YARDS), SUM(PASSING_TD), + SUM(INTERCEPTIONS), SUM(RUSHES), SUM(RUSH_YARDS), + SUM(RECEPTIONS), SUM(RECEPTIONS_YARDS), SUM(TOTAL_TD) + from games, players where players.player_id = + games.player_id group by games.player_id, games.year_no + + + +``` + +The following bean definition shows how to prevent state persistence in Java: + +Java Configuration + +``` +@Bean +public JdbcCursorItemReader playerSummarizationSource(DataSource dataSource) { + return new JdbcCursorItemReaderBuilder() + .dataSource(dataSource) + .rowMapper(new PlayerSummaryMapper()) + .saveState(false) + .sql("SELECT games.player_id, games.year_no, SUM(COMPLETES)," + + "SUM(ATTEMPTS), SUM(PASSING_YARDS), SUM(PASSING_TD)," + + "SUM(INTERCEPTIONS), SUM(RUSHES), SUM(RUSH_YARDS)," + + "SUM(RECEPTIONS), SUM(RECEPTIONS_YARDS), SUM(TOTAL_TD)" + + "from games, players where players.player_id =" + + "games.player_id group by games.player_id, games.year_no") + .build(); + +} +``` + +The `ItemReader` configured above does not make any entries in the `ExecutionContext` for +any executions in which it participates. + +### Creating Custom ItemReaders and ItemWriters + +So far, this chapter has discussed the basic contracts of reading and writing in Spring +Batch and some common implementations for doing so. However, these are all fairly +generic, and there are many potential scenarios that may not be covered by out-of-the-box +implementations. This section shows, by using a simple example, how to create a custom`ItemReader` and `ItemWriter` implementation and implement their contracts correctly. The`ItemReader` also implements `ItemStream`, in order to illustrate how to make a reader or +writer restartable. + +#### Custom `ItemReader` Example + +For the purpose of this example, we create a simple `ItemReader` implementation that +reads from a provided list. We start by implementing the most basic contract of`ItemReader`, the `read` method, as shown in the following code: + +``` +public class CustomItemReader implements ItemReader { + + List items; + + public CustomItemReader(List items) { + this.items = items; + } + + public T read() throws Exception, UnexpectedInputException, + NonTransientResourceException, ParseException { + + if (!items.isEmpty()) { + return items.remove(0); + } + return null; + } +} +``` + +The preceding class takes a list of items and returns them one at a time, removing each +from the list. When the list is empty, it returns `null`, thus satisfying the most basic +requirements of an `ItemReader`, as illustrated in the following test code: + +``` +List items = new ArrayList<>(); +items.add("1"); +items.add("2"); +items.add("3"); + +ItemReader itemReader = new CustomItemReader<>(items); +assertEquals("1", itemReader.read()); +assertEquals("2", itemReader.read()); +assertEquals("3", itemReader.read()); +assertNull(itemReader.read()); +``` + +##### Making the `ItemReader` Restartable + +The final challenge is to make the `ItemReader` restartable. Currently, if processing is +interrupted and begins again, the `ItemReader` must start at the beginning. This is +actually valid in many scenarios, but it is sometimes preferable that a batch job +restarts where it left off. The key discriminant is often whether the reader is stateful +or stateless. A stateless reader does not need to worry about restartability, but a +stateful one has to try to reconstitute its last known state on restart. For this reason, +we recommend that you keep custom readers stateless if possible, so you need not worry +about restartability. + +If you do need to store state, then the `ItemStream` interface should be used: + +``` +public class CustomItemReader implements ItemReader, ItemStream { + + List items; + int currentIndex = 0; + private static final String CURRENT_INDEX = "current.index"; + + public CustomItemReader(List items) { + this.items = items; + } + + public T read() throws Exception, UnexpectedInputException, + ParseException, NonTransientResourceException { + + if (currentIndex < items.size()) { + return items.get(currentIndex++); + } + + return null; + } + + public void open(ExecutionContext executionContext) throws ItemStreamException { + if (executionContext.containsKey(CURRENT_INDEX)) { + currentIndex = new Long(executionContext.getLong(CURRENT_INDEX)).intValue(); + } + else { + currentIndex = 0; + } + } + + public void update(ExecutionContext executionContext) throws ItemStreamException { + executionContext.putLong(CURRENT_INDEX, new Long(currentIndex).longValue()); + } + + public void close() throws ItemStreamException {} +} +``` + +On each call to the `ItemStream` `update` method, the current index of the `ItemReader`is stored in the provided `ExecutionContext` with a key of 'current.index'. When the`ItemStream` `open` method is called, the `ExecutionContext` is checked to see if it +contains an entry with that key. If the key is found, then the current index is moved to +that location. This is a fairly trivial example, but it still meets the general contract: + +``` +ExecutionContext executionContext = new ExecutionContext(); +((ItemStream)itemReader).open(executionContext); +assertEquals("1", itemReader.read()); +((ItemStream)itemReader).update(executionContext); + +List items = new ArrayList<>(); +items.add("1"); +items.add("2"); +items.add("3"); +itemReader = new CustomItemReader<>(items); + +((ItemStream)itemReader).open(executionContext); +assertEquals("2", itemReader.read()); +``` + +Most `ItemReaders` have much more sophisticated restart logic. The`JdbcCursorItemReader`, for example, stores the row ID of the last processed row in the +cursor. + +It is also worth noting that the key used within the `ExecutionContext` should not be +trivial. That is because the same `ExecutionContext` is used for all `ItemStreams` within +a `Step`. In most cases, simply prepending the key with the class name should be enough +to guarantee uniqueness. However, in the rare cases where two of the same type of`ItemStream` are used in the same step (which can happen if two files are needed for +output), a more unique name is needed. For this reason, many of the Spring Batch`ItemReader` and `ItemWriter` implementations have a `setName()` property that lets this +key name be overridden. + +#### Custom `ItemWriter` Example + +Implementing a Custom `ItemWriter` is similar in many ways to the `ItemReader` example +above but differs in enough ways as to warrant its own example. However, adding +restartability is essentially the same, so it is not covered in this example. As with the`ItemReader` example, a `List` is used in order to keep the example as simple as +possible: + +``` +public class CustomItemWriter implements ItemWriter { + + List output = TransactionAwareProxyFactory.createTransactionalList(); + + public void write(List items) throws Exception { + output.addAll(items); + } + + public List getOutput() { + return output; + } +} +``` + +##### Making the `ItemWriter` Restartable + +To make the `ItemWriter` restartable, we would follow the same process as for the`ItemReader`, adding and implementing the `ItemStream` interface to synchronize the +execution context. In the example, we might have to count the number of items processed +and add that as a footer record. If we needed to do that, we could implement`ItemStream` in our `ItemWriter` so that the counter was reconstituted from the execution +context if the stream was re-opened. + +In many realistic cases, custom `ItemWriters` also delegate to another writer that itself +is restartable (for example, when writing to a file), or else it writes to a +transactional resource and so does not need to be restartable, because it is stateless. +When you have a stateful writer you should probably be sure to implement `ItemStream` as +well as `ItemWriter`. Remember also that the client of the writer needs to be aware of +the `ItemStream`, so you may need to register it as a stream in the configuration. + +### Item Reader and Writer Implementations + +In this section, we will introduce you to readers and writers that have not already been +discussed in the previous sections. + +#### Decorators + +In some cases, a user needs specialized behavior to be appended to a pre-existing`ItemReader`. Spring Batch offers some out of the box decorators that can add +additional behavior to to your `ItemReader` and `ItemWriter` implementations. + +Spring Batch includes the following decorators: + +* [`SynchronizedItemStreamReader`](#synchronizedItemStreamReader) + +* [`SingleItemPeekableItemReader`](#singleItemPeekableItemReader) + +* [`SynchronizedItemStreamWriter`](#synchronizedItemStreamWriter) + +* [`MultiResourceItemWriter`](#multiResourceItemWriter) + +* [`ClassifierCompositeItemWriter`](#classifierCompositeItemWriter) + +* [`ClassifierCompositeItemProcessor`](#classifierCompositeItemProcessor) + +##### `SynchronizedItemStreamReader` + +When using an `ItemReader` that is not thread safe, Spring Batch offers the`SynchronizedItemStreamReader` decorator, which can be used to make the `ItemReader`thread safe. Spring Batch provides a `SynchronizedItemStreamReaderBuilder` to construct +an instance of the `SynchronizedItemStreamReader`. + +##### `SingleItemPeekableItemReader` + +Spring Batch includes a decorator that adds a peek method to an `ItemReader`. This peek +method lets the user peek one item ahead. Repeated calls to the peek returns the same +item, and this is the next item returned from the `read` method. Spring Batch provides a`SingleItemPeekableItemReaderBuilder` to construct an instance of the`SingleItemPeekableItemReader`. + +| |SingleItemPeekableItemReader’s peek method is not thread-safe, because it would not
be possible to honor the peek in multiple threads. Only one of the threads that peeked
would get that item in the next call to read.| +|---|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +##### `SynchronizedItemStreamWriter` + +When using an `ItemWriter` that is not thread safe, Spring Batch offers the`SynchronizedItemStreamWriter` decorator, which can be used to make the `ItemWriter`thread safe. Spring Batch provides a `SynchronizedItemStreamWriterBuilder` to construct +an instance of the `SynchronizedItemStreamWriter`. + +##### `MultiResourceItemWriter` + +The `MultiResourceItemWriter` wraps a `ResourceAwareItemWriterItemStream` and creates a new +output resource when the count of items written in the current resource exceeds the`itemCountLimitPerResource`. Spring Batch provides a `MultiResourceItemWriterBuilder` to +construct an instance of the `MultiResourceItemWriter`. + +##### `ClassifierCompositeItemWriter` + +The `ClassifierCompositeItemWriter` calls one of a collection of `ItemWriter`implementations for each item, based on a router pattern implemented through the provided`Classifier`. The implementation is thread-safe if all delegates are thread-safe. Spring +Batch provides a `ClassifierCompositeItemWriterBuilder` to construct an instance of the`ClassifierCompositeItemWriter`. + +##### `ClassifierCompositeItemProcessor` + +The `ClassifierCompositeItemProcessor` is an `ItemProcessor` that calls one of a +collection of `ItemProcessor` implementations, based on a router pattern implemented +through the provided `Classifier`. Spring Batch provides a`ClassifierCompositeItemProcessorBuilder` to construct an instance of the`ClassifierCompositeItemProcessor`. + +#### Messaging Readers And Writers + +Spring Batch offers the following readers and writers for commonly used messaging systems: + +* [`AmqpItemReader`](#amqpItemReader) + +* [`AmqpItemWriter`](#amqpItemWriter) + +* [`JmsItemReader`](#jmsItemReader) + +* [`JmsItemWriter`](#jmsItemWriter) + +* [`KafkaItemReader`](#kafkaItemReader) + +* [`KafkaItemWriter`](#kafkaItemWriter) + +##### `AmqpItemReader` + +The `AmqpItemReader` is an `ItemReader` that uses an `AmqpTemplate` to receive or convert +messages from an exchange. Spring Batch provides a `AmqpItemReaderBuilder` to construct +an instance of the `AmqpItemReader`. + +##### `AmqpItemWriter` + +The `AmqpItemWriter` is an `ItemWriter` that uses an `AmqpTemplate` to send messages to +an AMQP exchange. Messages are sent to the nameless exchange if the name not specified in +the provided `AmqpTemplate`. Spring Batch provides an `AmqpItemWriterBuilder` to +construct an instance of the `AmqpItemWriter`. + +##### `JmsItemReader` + +The `JmsItemReader` is an `ItemReader` for JMS that uses a `JmsTemplate`. The template +should have a default destination, which is used to provide items for the `read()`method. Spring Batch provides a `JmsItemReaderBuilder` to construct an instance of the`JmsItemReader`. + +##### `JmsItemWriter` + +The `JmsItemWriter` is an `ItemWriter` for JMS that uses a `JmsTemplate`. The template +should have a default destination, which is used to send items in `write(List)`. Spring +Batch provides a `JmsItemWriterBuilder` to construct an instance of the `JmsItemWriter`. + +##### `KafkaItemReader` + +The `KafkaItemReader` is an `ItemReader` for an Apache Kafka topic. It can be configured +to read messages from multiple partitions of the same topic. It stores message offsets +in the execution context to support restart capabilities. Spring Batch provides a`KafkaItemReaderBuilder` to construct an instance of the `KafkaItemReader`. + +##### `KafkaItemWriter` + +The `KafkaItemWriter` is an `ItemWriter` for Apache Kafka that uses a `KafkaTemplate` to +send events to a default topic. Spring Batch provides a `KafkaItemWriterBuilder` to +construct an instance of the `KafkaItemWriter`. + +#### Database Readers + +Spring Batch offers the following database readers: + +* [`Neo4jItemReader`](#Neo4jItemReader) + +* [`MongoItemReader`](#mongoItemReader) + +* [`HibernateCursorItemReader`](#hibernateCursorItemReader) + +* [`HibernatePagingItemReader`](#hibernatePagingItemReader) + +* [`RepositoryItemReader`](#repositoryItemReader) + +##### `Neo4jItemReader` + +The `Neo4jItemReader` is an `ItemReader` that reads objects from the graph database Neo4j +by using a paging technique. Spring Batch provides a `Neo4jItemReaderBuilder` to +construct an instance of the `Neo4jItemReader`. + +##### `MongoItemReader` + +The `MongoItemReader` is an `ItemReader` that reads documents from MongoDB by using a +paging technique. Spring Batch provides a `MongoItemReaderBuilder` to construct an +instance of the `MongoItemReader`. + +##### `HibernateCursorItemReader` + +The `HibernateCursorItemReader` is an `ItemStreamReader` for reading database records +built on top of Hibernate. It executes the HQL query and then, when initialized, iterates +over the result set as the `read()` method is called, successively returning an object +corresponding to the current row. Spring Batch provides a`HibernateCursorItemReaderBuilder` to construct an instance of the`HibernateCursorItemReader`. + +##### `HibernatePagingItemReader` + +The `HibernatePagingItemReader` is an `ItemReader` for reading database records built on +top of Hibernate and reading only up to a fixed number of items at a time. Spring Batch +provides a `HibernatePagingItemReaderBuilder` to construct an instance of the`HibernatePagingItemReader`. + +##### `RepositoryItemReader` + +The `RepositoryItemReader` is an `ItemReader` that reads records by using a`PagingAndSortingRepository`. Spring Batch provides a `RepositoryItemReaderBuilder` to +construct an instance of the `RepositoryItemReader`. + +#### Database Writers + +Spring Batch offers the following database writers: + +* [`Neo4jItemWriter`](#neo4jItemWriter) + +* [`MongoItemWriter`](#mongoItemWriter) + +* [`RepositoryItemWriter`](#repositoryItemWriter) + +* [`HibernateItemWriter`](#hibernateItemWriter) + +* [`JdbcBatchItemWriter`](#jdbcBatchItemWriter) + +* [`JpaItemWriter`](#jpaItemWriter) + +* [`GemfireItemWriter`](#gemfireItemWriter) + +##### `Neo4jItemWriter` + +The `Neo4jItemWriter` is an `ItemWriter` implementation that writes to a Neo4j database. +Spring Batch provides a `Neo4jItemWriterBuilder` to construct an instance of the`Neo4jItemWriter`. + +##### `MongoItemWriter` + +The `MongoItemWriter` is an `ItemWriter` implementation that writes to a MongoDB store +using an implementation of Spring Data’s `MongoOperations`. Spring Batch provides a`MongoItemWriterBuilder` to construct an instance of the `MongoItemWriter`. + +##### `RepositoryItemWriter` + +The `RepositoryItemWriter` is an `ItemWriter` wrapper for a `CrudRepository` from Spring +Data. Spring Batch provides a `RepositoryItemWriterBuilder` to construct an instance of +the `RepositoryItemWriter`. + +##### `HibernateItemWriter` + +The `HibernateItemWriter` is an `ItemWriter` that uses a Hibernate session to save or +update entities that are not part of the current Hibernate session. Spring Batch provides +a `HibernateItemWriterBuilder` to construct an instance of the `HibernateItemWriter`. + +##### `JdbcBatchItemWriter` + +The `JdbcBatchItemWriter` is an `ItemWriter` that uses the batching features from`NamedParameterJdbcTemplate` to execute a batch of statements for all items provided. +Spring Batch provides a `JdbcBatchItemWriterBuilder` to construct an instance of the`JdbcBatchItemWriter`. + +##### `JpaItemWriter` + +The `JpaItemWriter` is an `ItemWriter` that uses a JPA `EntityManagerFactory` to merge +any entities that are not part of the persistence context. Spring Batch provides a`JpaItemWriterBuilder` to construct an instance of the `JpaItemWriter`. + +##### `GemfireItemWriter` + +The `GemfireItemWriter` is an `ItemWriter` that uses a `GemfireTemplate` that stores +items in GemFire as key/value pairs. Spring Batch provides a `GemfireItemWriterBuilder`to construct an instance of the `GemfireItemWriter`. + +#### Specialized Readers + +Spring Batch offers the following specialized readers: + +* [`LdifReader`](#ldifReader) + +* [`MappingLdifReader`](#mappingLdifReader) + +* [`AvroItemReader`](#avroItemReader) + +##### `LdifReader` + +The `LdifReader` reads LDIF (LDAP Data Interchange Format) records from a `Resource`, +parses them, and returns a `LdapAttribute` object for each `read` executed. Spring Batch +provides a `LdifReaderBuilder` to construct an instance of the `LdifReader`. + +##### `MappingLdifReader` + +The `MappingLdifReader` reads LDIF (LDAP Data Interchange Format) records from a`Resource`, parses them then maps each LDIF record to a POJO (Plain Old Java Object). +Each read returns a POJO. Spring Batch provides a `MappingLdifReaderBuilder` to construct +an instance of the `MappingLdifReader`. + +##### `AvroItemReader` + +The `AvroItemReader` reads serialized Avro data from a Resource. +Each read returns an instance of the type specified by a Java class or Avro Schema. +The reader may be optionally configured for input that embeds an Avro schema or not. +Spring Batch provides an `AvroItemReaderBuilder` to construct an instance of the `AvroItemReader`. + +#### Specialized Writers + +Spring Batch offers the following specialized writers: + +* [`SimpleMailMessageItemWriter`](#simpleMailMessageItemWriter) + +* [`AvroItemWriter`](#avroItemWriter) + +##### `SimpleMailMessageItemWriter` + +The `SimpleMailMessageItemWriter` is an `ItemWriter` that can send mail messages. It +delegates the actual sending of messages to an instance of `MailSender`. Spring Batch +provides a `SimpleMailMessageItemWriterBuilder` to construct an instance of the`SimpleMailMessageItemWriter`. + +##### `AvroItemWriter` + +The `AvroItemWrite` serializes Java objects to a WriteableResource according to the given type or Schema. +The writer may be optionally configured to embed an Avro schema in the output or not. +Spring Batch provides an `AvroItemWriterBuilder` to construct an instance of the `AvroItemWriter`. + +#### Specialized Processors + +Spring Batch offers the following specialized processors: + +* [`ScriptItemProcessor`](#scriptItemProcessor) + +##### `ScriptItemProcessor` + +The `ScriptItemProcessor` is an `ItemProcessor` that passes the current item to process +to the provided script and the result of the script is returned by the processor. Spring +Batch provides a `ScriptItemProcessorBuilder` to construct an instance of the`ScriptItemProcessor`. \ No newline at end of file diff --git a/docs/en/spring-batch/repeat.md b/docs/en/spring-batch/repeat.md new file mode 100644 index 0000000000000000000000000000000000000000..faf19b5c82840c13e072014d77186fac80eec54e --- /dev/null +++ b/docs/en/spring-batch/repeat.md @@ -0,0 +1,212 @@ +# Repeat + +## Repeat + +XMLJavaBoth + +### RepeatTemplate + +Batch processing is about repetitive actions, either as a simple optimization or as part +of a job. To strategize and generalize the repetition and to provide what amounts to an +iterator framework, Spring Batch has the `RepeatOperations` interface. The`RepeatOperations` interface has the following definition: + +``` +public interface RepeatOperations { + + RepeatStatus iterate(RepeatCallback callback) throws RepeatException; + +} +``` + +The callback is an interface, shown in the following definition, that lets you insert +some business logic to be repeated: + +``` +public interface RepeatCallback { + + RepeatStatus doInIteration(RepeatContext context) throws Exception; + +} +``` + +The callback is executed repeatedly until the implementation determines that the +iteration should end. The return value in these interfaces is an enumeration that can +either be `RepeatStatus.CONTINUABLE` or `RepeatStatus.FINISHED`. A `RepeatStatus`enumeration conveys information to the caller of the repeat operations about whether +there is any more work to do. Generally speaking, implementations of `RepeatOperations`should inspect the `RepeatStatus` and use it as part of the decision to end the +iteration. Any callback that wishes to signal to the caller that there is no more work to +do can return `RepeatStatus.FINISHED`. + +The simplest general purpose implementation of `RepeatOperations` is `RepeatTemplate`, as +shown in the following example: + +``` +RepeatTemplate template = new RepeatTemplate(); + +template.setCompletionPolicy(new SimpleCompletionPolicy(2)); + +template.iterate(new RepeatCallback() { + + public RepeatStatus doInIteration(RepeatContext context) { + // Do stuff in batch... + return RepeatStatus.CONTINUABLE; + } + +}); +``` + +In the preceding example, we return `RepeatStatus.CONTINUABLE`, to show that there is +more work to do. The callback can also return `RepeatStatus.FINISHED`, to signal to the +caller that there is no more work to do. Some iterations can be terminated by +considerations intrinsic to the work being done in the callback. Others are effectively +infinite loops as far as the callback is concerned and the completion decision is +delegated to an external policy, as in the case shown in the preceding example. + +#### RepeatContext + +The method parameter for the `RepeatCallback` is a `RepeatContext`. Many callbacks ignore +the context. However, if necessary, it can be used as an attribute bag to store transient +data for the duration of the iteration. After the `iterate` method returns, the context +no longer exists. + +If there is a nested iteration in progress, a `RepeatContext` has a parent context. The +parent context is occasionally useful for storing data that need to be shared between +calls to `iterate`. This is the case, for instance, if you want to count the number of +occurrences of an event in the iteration and remember it across subsequent calls. + +#### RepeatStatus + +`RepeatStatus` is an enumeration used by Spring Batch to indicate whether processing has +finished. It has two possible `RepeatStatus` values, described in the following table: + +| *Value* | *Description* | +|-----------|--------------------------------------| +|CONTINUABLE| There is more work to do. | +| FINISHED |No more repetitions should take place.| + +`RepeatStatus` values can also be combined with a logical AND operation by using the`and()` method in `RepeatStatus`. The effect of this is to do a logical AND on the +continuable flag. In other words, if either status is `FINISHED`, then the result is`FINISHED`. + +### Completion Policies + +Inside a `RepeatTemplate`, the termination of the loop in the `iterate` method is +determined by a `CompletionPolicy`, which is also a factory for the `RepeatContext`. The`RepeatTemplate` has the responsibility to use the current policy to create a`RepeatContext` and pass that in to the `RepeatCallback` at every stage in the iteration. +After a callback completes its `doInIteration`, the `RepeatTemplate` has to make a call +to the `CompletionPolicy` to ask it to update its state (which will be stored in the`RepeatContext`). Then it asks the policy if the iteration is complete. + +Spring Batch provides some simple general purpose implementations of `CompletionPolicy`.`SimpleCompletionPolicy` allows execution up to a fixed number of times (with`RepeatStatus.FINISHED` forcing early completion at any time). + +Users might need to implement their own completion policies for more complicated +decisions. For example, a batch processing window that prevents batch jobs from executing +once the online systems are in use would require a custom policy. + +### Exception Handling + +If there is an exception thrown inside a `RepeatCallback`, the `RepeatTemplate` consults +an `ExceptionHandler`, which can decide whether or not to re-throw the exception. + +The following listing shows the `ExceptionHandler` interface definition: + +``` +public interface ExceptionHandler { + + void handleException(RepeatContext context, Throwable throwable) + throws Throwable; + +} +``` + +A common use case is to count the number of exceptions of a given type and fail when a +limit is reached. For this purpose, Spring Batch provides the`SimpleLimitExceptionHandler` and a slightly more flexible`RethrowOnThresholdExceptionHandler`. The `SimpleLimitExceptionHandler` has a limit +property and an exception type that should be compared with the current exception. All +subclasses of the provided type are also counted. Exceptions of the given type are +ignored until the limit is reached, and then they are rethrown. Exceptions of other types +are always rethrown. + +An important optional property of the `SimpleLimitExceptionHandler` is the boolean flag +called `useParent`. It is `false` by default, so the limit is only accounted for in the +current `RepeatContext`. When set to `true`, the limit is kept across sibling contexts in +a nested iteration (such as a set of chunks inside a step). + +### Listeners + +Often, it is useful to be able to receive additional callbacks for cross-cutting concerns +across a number of different iterations. For this purpose, Spring Batch provides the`RepeatListener` interface. The `RepeatTemplate` lets users register `RepeatListener`implementations, and they are given callbacks with the `RepeatContext` and `RepeatStatus`where available during the iteration. + +The `RepeatListener` interface has the following definition: + +``` +public interface RepeatListener { + void before(RepeatContext context); + void after(RepeatContext context, RepeatStatus result); + void open(RepeatContext context); + void onError(RepeatContext context, Throwable e); + void close(RepeatContext context); +} +``` + +The `open` and `close` callbacks come before and after the entire iteration. `before`,`after`, and `onError` apply to the individual `RepeatCallback` calls. + +Note that, when there is more than one listener, they are in a list, so there is an +order. In this case, `open` and `before` are called in the same order while `after`,`onError`, and `close` are called in reverse order. + +### Parallel Processing + +Implementations of `RepeatOperations` are not restricted to executing the callback +sequentially. It is quite important that some implementations are able to execute their +callbacks in parallel. To this end, Spring Batch provides the`TaskExecutorRepeatTemplate`, which uses the Spring `TaskExecutor` strategy to run the`RepeatCallback`. The default is to use a `SynchronousTaskExecutor`, which has the effect +of executing the whole iteration in the same thread (the same as a normal`RepeatTemplate`). + +### Declarative Iteration + +Sometimes there is some business processing that you know you want to repeat every time +it happens. The classic example of this is the optimization of a message pipeline. It is +more efficient to process a batch of messages, if they are arriving frequently, than to +bear the cost of a separate transaction for every message. Spring Batch provides an AOP +interceptor that wraps a method call in a `RepeatOperations` object for just this +purpose. The `RepeatOperationsInterceptor` executes the intercepted method and repeats +according to the `CompletionPolicy` in the provided `RepeatTemplate`. + +The following example shows declarative iteration using the Spring AOP namespace to +repeat a service call to a method called `processMessage` (for more detail on how to +configure AOP interceptors, see the Spring User Guide): + +``` + + + + + + +``` + +The following example demonstrates using Java configuration to +repeat a service call to a method called `processMessage` (for more detail on how to +configure AOP interceptors, see the Spring User Guide): + +``` +@Bean +public MyService myService() { + ProxyFactory factory = new ProxyFactory(RepeatOperations.class.getClassLoader()); + factory.setInterfaces(MyService.class); + factory.setTarget(new MyService()); + + MyService service = (MyService) factory.getProxy(); + JdkRegexpMethodPointcut pointcut = new JdkRegexpMethodPointcut(); + pointcut.setPatterns(".*processMessage.*"); + + RepeatOperationsInterceptor interceptor = new RepeatOperationsInterceptor(); + + ((Advised) service).addAdvisor(new DefaultPointcutAdvisor(pointcut, interceptor)); + + return service; +} +``` + +The preceding example uses a default `RepeatTemplate` inside the interceptor. To change +the policies, listeners, and other details, you can inject an instance of`RepeatTemplate` into the interceptor. + +If the intercepted method returns `void`, then the interceptor always returns`RepeatStatus.CONTINUABLE` (so there is a danger of an infinite loop if the`CompletionPolicy` does not have a finite end point). Otherwise, it returns`RepeatStatus.CONTINUABLE` until the return value from the intercepted method is `null`, +at which point it returns `RepeatStatus.FINISHED`. Consequently, the business logic +inside the target method can signal that there is no more work to do by returning `null`or by throwing an exception that is re-thrown by the `ExceptionHandler` in the provided`RepeatTemplate`. diff --git a/docs/en/spring-batch/retry.md b/docs/en/spring-batch/retry.md new file mode 100644 index 0000000000000000000000000000000000000000..1c4eadd02c3cba8a90120ac0a6c10d5ecf7923e2 --- /dev/null +++ b/docs/en/spring-batch/retry.md @@ -0,0 +1,312 @@ +# Retry + +## Retry + +XMLJavaBoth + +To make processing more robust and less prone to failure, it sometimes helps to +automatically retry a failed operation in case it might succeed on a subsequent attempt. +Errors that are susceptible to intermittent failure are often transient in nature. +Examples include remote calls to a web service that fails because of a network glitch or a`DeadlockLoserDataAccessException` in a database update. + +### `RetryTemplate` + +| |The retry functionality was pulled out of Spring Batch as of 2.2.0.
It is now part of a new library, [Spring Retry](https://github.com/spring-projects/spring-retry).| +|---|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +To automate retry operations Spring Batch has the `RetryOperations` strategy. The +following interface definition for `RetryOperations`: + +``` +public interface RetryOperations { + + T execute(RetryCallback retryCallback) throws E; + + T execute(RetryCallback retryCallback, RecoveryCallback recoveryCallback) + throws E; + + T execute(RetryCallback retryCallback, RetryState retryState) + throws E, ExhaustedRetryException; + + T execute(RetryCallback retryCallback, RecoveryCallback recoveryCallback, + RetryState retryState) throws E; + +} +``` + +The basic callback is a simple interface that lets you insert some business logic to be +retried, as shown in the following interface definition: + +``` +public interface RetryCallback { + + T doWithRetry(RetryContext context) throws E; + +} +``` + +The callback runs and, if it fails (by throwing an `Exception`), it is retried until +either it is successful or the implementation aborts. There are a number of overloaded`execute` methods in the `RetryOperations` interface. Those methods deal with various use +cases for recovery when all retry attempts are exhausted and deal with retry state, which +lets clients and implementations store information between calls (we cover this in more +detail later in the chapter). + +The simplest general purpose implementation of `RetryOperations` is `RetryTemplate`. It +can be used as follows: + +``` +RetryTemplate template = new RetryTemplate(); + +TimeoutRetryPolicy policy = new TimeoutRetryPolicy(); +policy.setTimeout(30000L); + +template.setRetryPolicy(policy); + +Foo result = template.execute(new RetryCallback() { + + public Foo doWithRetry(RetryContext context) { + // Do stuff that might fail, e.g. webservice operation + return result; + } + +}); +``` + +In the preceding example, we make a web service call and return the result to the user. If +that call fails, then it is retried until a timeout is reached. + +#### `RetryContext` + +The method parameter for the `RetryCallback` is a `RetryContext`. Many callbacks ignore +the context, but, if necessary, it can be used as an attribute bag to store data for the +duration of the iteration. + +A `RetryContext` has a parent context if there is a nested retry in progress in the same +thread. The parent context is occasionally useful for storing data that need to be shared +between calls to `execute`. + +#### `RecoveryCallback` + +When a retry is exhausted, the `RetryOperations` can pass control to a different callback, +called the `RecoveryCallback`. To use this feature, clients pass in the callbacks together +to the same method, as shown in the following example: + +``` +Foo foo = template.execute(new RetryCallback() { + public Foo doWithRetry(RetryContext context) { + // business logic here + }, + new RecoveryCallback() { + Foo recover(RetryContext context) throws Exception { + // recover logic here + } +}); +``` + +If the business logic does not succeed before the template decides to abort, then the +client is given the chance to do some alternate processing through the recovery callback. + +#### Stateless Retry + +In the simplest case, a retry is just a while loop. The `RetryTemplate` can just keep +trying until it either succeeds or fails. The `RetryContext` contains some state to +determine whether to retry or abort, but this state is on the stack and there is no need +to store it anywhere globally, so we call this stateless retry. The distinction between +stateless and stateful retry is contained in the implementation of the `RetryPolicy` (the`RetryTemplate` can handle both). In a stateless retry, the retry callback is always +executed in the same thread it was on when it failed. + +#### Stateful Retry + +Where the failure has caused a transactional resource to become invalid, there are some +special considerations. This does not apply to a simple remote call because there is no +transactional resource (usually), but it does sometimes apply to a database update, +especially when using Hibernate. In this case it only makes sense to re-throw the +exception that called the failure immediately, so that the transaction can roll back and +we can start a new, valid transaction. + +In cases involving transactions, a stateless retry is not good enough, because the +re-throw and roll back necessarily involve leaving the `RetryOperations.execute()` method +and potentially losing the context that was on the stack. To avoid losing it we have to +introduce a storage strategy to lift it off the stack and put it (at a minimum) in heap +storage. For this purpose, Spring Batch provides a storage strategy called`RetryContextCache`, which can be injected into the `RetryTemplate`. The default +implementation of the `RetryContextCache` is in memory, using a simple `Map`. Advanced +usage with multiple processes in a clustered environment might also consider implementing +the `RetryContextCache` with a cluster cache of some sort (however, even in a clustered +environment, this might be overkill). + +Part of the responsibility of the `RetryOperations` is to recognize the failed operations +when they come back in a new execution (and usually wrapped in a new transaction). To +facilitate this, Spring Batch provides the `RetryState` abstraction. This works in +conjunction with a special `execute` methods in the `RetryOperations` interface. + +The way the failed operations are recognized is by identifying the state across multiple +invocations of the retry. To identify the state, the user can provide a `RetryState`object that is responsible for returning a unique key identifying the item. The identifier +is used as a key in the `RetryContextCache` interface. + +| |Be very careful with the implementation of `Object.equals()` and `Object.hashCode()` in
the key returned by `RetryState`. The best advice is to use a business key to identify the
items. In the case of a JMS message, the message ID can be used.| +|---|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +When the retry is exhausted, there is also the option to handle the failed item in a +different way, instead of calling the `RetryCallback` (which is now presumed to be likely +to fail). Just like in the stateless case, this option is provided by the`RecoveryCallback`, which can be provided by passing it in to the `execute` method of`RetryOperations`. + +The decision to retry or not is actually delegated to a regular `RetryPolicy`, so the +usual concerns about limits and timeouts can be injected there (described later in this +chapter). + +### Retry Policies + +Inside a `RetryTemplate`, the decision to retry or fail in the `execute` method is +determined by a `RetryPolicy`, which is also a factory for the `RetryContext`. The`RetryTemplate` has the responsibility to use the current policy to create a`RetryContext` and pass that in to the `RetryCallback` at every attempt. After a callback +fails, the `RetryTemplate` has to make a call to the `RetryPolicy` to ask it to update its +state (which is stored in the `RetryContext`) and then asks the policy if another attempt +can be made. If another attempt cannot be made (such as when a limit is reached or a +timeout is detected) then the policy is also responsible for handling the exhausted state. +Simple implementations throw `RetryExhaustedException`, which causes any enclosing +transaction to be rolled back. More sophisticated implementations might attempt to take +some recovery action, in which case the transaction can remain intact. + +| |Failures are inherently either retryable or not. If the same exception is always going to
be thrown from the business logic, it does no good to retry it. So do not retry on all
exception types. Rather, try to focus on only those exceptions that you expect to be
retryable. It is not usually harmful to the business logic to retry more aggressively, but
it is wasteful, because, if a failure is deterministic, you spend time retrying something
that you know in advance is fatal.| +|---|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +Spring Batch provides some simple general purpose implementations of stateless`RetryPolicy`, such as `SimpleRetryPolicy` and `TimeoutRetryPolicy` (used in the preceding example). + +The `SimpleRetryPolicy` allows a retry on any of a named list of exception types, up to a +fixed number of times. It also has a list of "fatal" exceptions that should never be +retried, and this list overrides the retryable list so that it can be used to give finer +control over the retry behavior, as shown in the following example: + +``` +SimpleRetryPolicy policy = new SimpleRetryPolicy(); +// Set the max retry attempts +policy.setMaxAttempts(5); +// Retry on all exceptions (this is the default) +policy.setRetryableExceptions(new Class[] {Exception.class}); +// ... but never retry IllegalStateException +policy.setFatalExceptions(new Class[] {IllegalStateException.class}); + +// Use the policy... +RetryTemplate template = new RetryTemplate(); +template.setRetryPolicy(policy); +template.execute(new RetryCallback() { + public Foo doWithRetry(RetryContext context) { + // business logic here + } +}); +``` + +There is also a more flexible implementation called `ExceptionClassifierRetryPolicy`, +which lets the user configure different retry behavior for an arbitrary set of exception +types though the `ExceptionClassifier` abstraction. The policy works by calling on the +classifier to convert an exception into a delegate `RetryPolicy`. For example, one +exception type can be retried more times before failure than another by mapping it to a +different policy. + +Users might need to implement their own retry policies for more customized decisions. For +instance, a custom retry policy makes sense when there is a well-known, solution-specific +classification of exceptions into retryable and not retryable. + +### Backoff Policies + +When retrying after a transient failure, it often helps to wait a bit before trying again, +because usually the failure is caused by some problem that can only be resolved by +waiting. If a `RetryCallback` fails, the `RetryTemplate` can pause execution according to +the `BackoffPolicy`. + +The following code shows the interface definition for the `BackOffPolicy` interface: + +``` +public interface BackoffPolicy { + + BackOffContext start(RetryContext context); + + void backOff(BackOffContext backOffContext) + throws BackOffInterruptedException; + +} +``` + +A `BackoffPolicy` is free to implement the backOff in any way it chooses. The policies +provided by Spring Batch out of the box all use `Object.wait()`. A common use case is to +backoff with an exponentially increasing wait period, to avoid two retries getting into +lock step and both failing (this is a lesson learned from ethernet). For this purpose, +Spring Batch provides the `ExponentialBackoffPolicy`. + +### Listeners + +Often, it is useful to be able to receive additional callbacks for cross cutting concerns +across a number of different retries. For this purpose, Spring Batch provides the`RetryListener` interface. The `RetryTemplate` lets users register `RetryListeners`, and +they are given callbacks with `RetryContext` and `Throwable` where available during the +iteration. + +The following code shows the interface definition for `RetryListener`: + +``` +public interface RetryListener { + + boolean open(RetryContext context, RetryCallback callback); + + void onError(RetryContext context, RetryCallback callback, Throwable throwable); + + void close(RetryContext context, RetryCallback callback, Throwable throwable); +} +``` + +The `open` and `close` callbacks come before and after the entire retry in the simplest +case, and `onError` applies to the individual `RetryCallback` calls. The `close` method +might also receive a `Throwable`. If there has been an error, it is the last one thrown by +the `RetryCallback`. + +Note that, when there is more than one listener, they are in a list, so there is an order. +In this case, `open` is called in the same order while `onError` and `close` are called in +reverse order. + +### Declarative Retry + +Sometimes, there is some business processing that you know you want to retry every time it +happens. The classic example of this is the remote service call. Spring Batch provides an +AOP interceptor that wraps a method call in a `RetryOperations` implementation for just +this purpose. The `RetryOperationsInterceptor` executes the intercepted method and retries +on failure according to the `RetryPolicy` in the provided `RepeatTemplate`. + +The following example shows a declarative retry that uses the Spring AOP namespace to +retry a service call to a method called `remoteCall` (for more detail on how to configure +AOP interceptors, see the Spring User Guide): + +``` + + + + + + +``` + +The following example shows a declarative retry that uses java configuration to retry a +service call to a method called `remoteCall` (for more detail on how to configure AOP +interceptors, see the Spring User Guide): + +``` +@Bean +public MyService myService() { + ProxyFactory factory = new ProxyFactory(RepeatOperations.class.getClassLoader()); + factory.setInterfaces(MyService.class); + factory.setTarget(new MyService()); + + MyService service = (MyService) factory.getProxy(); + JdkRegexpMethodPointcut pointcut = new JdkRegexpMethodPointcut(); + pointcut.setPatterns(".*remoteCall.*"); + + RetryOperationsInterceptor interceptor = new RetryOperationsInterceptor(); + + ((Advised) service).addAdvisor(new DefaultPointcutAdvisor(pointcut, interceptor)); + + return service; +} +``` + +The preceding example uses a default `RetryTemplate` inside the interceptor. To change the +policies or listeners, you can inject an instance of `RetryTemplate` into the interceptor. \ No newline at end of file diff --git a/docs/en/spring-batch/scalability.md b/docs/en/spring-batch/scalability.md new file mode 100644 index 0000000000000000000000000000000000000000..df00f11321661d44b10400634759f2c8a347dbc5 --- /dev/null +++ b/docs/en/spring-batch/scalability.md @@ -0,0 +1,447 @@ +# Scaling and Parallel Processing + +## Scaling and Parallel Processing + +XMLJavaBoth + +Many batch processing problems can be solved with single threaded, single process jobs, +so it is always a good idea to properly check if that meets your needs before thinking +about more complex implementations. Measure the performance of a realistic job and see if +the simplest implementation meets your needs first. You can read and write a file of +several hundred megabytes in well under a minute, even with standard hardware. + +When you are ready to start implementing a job with some parallel processing, Spring +Batch offers a range of options, which are described in this chapter, although some +features are covered elsewhere. At a high level, there are two modes of parallel +processing: + +* Single process, multi-threaded + +* Multi-process + +These break down into categories as well, as follows: + +* Multi-threaded Step (single process) + +* Parallel Steps (single process) + +* Remote Chunking of Step (multi process) + +* Partitioning a Step (single or multi process) + +First, we review the single-process options. Then we review the multi-process options. + +### Multi-threaded Step + +The simplest way to start parallel processing is to add a `TaskExecutor` to your Step +configuration. + +For example, you might add an attribute of the `tasklet`, as follows: + +``` + + ... + +``` + +When using java configuration, a `TaskExecutor` can be added to the step, +as shown in the following example: + +Java Configuration + +``` +@Bean +public TaskExecutor taskExecutor() { + return new SimpleAsyncTaskExecutor("spring_batch"); +} + +@Bean +public Step sampleStep(TaskExecutor taskExecutor) { + return this.stepBuilderFactory.get("sampleStep") + .chunk(10) + .reader(itemReader()) + .writer(itemWriter()) + .taskExecutor(taskExecutor) + .build(); +} +``` + +In this example, the `taskExecutor` is a reference to another bean definition that +implements the `TaskExecutor` interface.[`TaskExecutor`](https://docs.spring.io/spring/docs/current/javadoc-api/org/springframework/core/task/TaskExecutor.html)is a standard Spring interface, so consult the Spring User Guide for details of available +implementations. The simplest multi-threaded `TaskExecutor` is a`SimpleAsyncTaskExecutor`. + +The result of the above configuration is that the `Step` executes by reading, processing, +and writing each chunk of items (each commit interval) in a separate thread of execution. +Note that this means there is no fixed order for the items to be processed, and a chunk +might contain items that are non-consecutive compared to the single-threaded case. In +addition to any limits placed by the task executor (such as whether it is backed by a +thread pool), there is a throttle limit in the tasklet configuration which defaults to 4. +You may need to increase this to ensure that a thread pool is fully utilized. + +For example you might increase the throttle-limit, as shown in the following example: + +``` + ... + +``` + +When using Java configuration, the builders provide access to the throttle limit, as shown +in the following example: + +Java Configuration + +``` +@Bean +public Step sampleStep(TaskExecutor taskExecutor) { + return this.stepBuilderFactory.get("sampleStep") + .chunk(10) + .reader(itemReader()) + .writer(itemWriter()) + .taskExecutor(taskExecutor) + .throttleLimit(20) + .build(); +} +``` + +Note also that there may be limits placed on concurrency by any pooled resources used in +your step, such as a `DataSource`. Be sure to make the pool in those resources at least +as large as the desired number of concurrent threads in the step. + +There are some practical limitations of using multi-threaded `Step` implementations for +some common batch use cases. Many participants in a `Step` (such as readers and writers) +are stateful. If the state is not segregated by thread, then those components are not +usable in a multi-threaded `Step`. In particular, most of the off-the-shelf readers and +writers from Spring Batch are not designed for multi-threaded use. It is, however, +possible to work with stateless or thread safe readers and writers, and there is a sample +(called `parallelJob`) in the[Spring +Batch Samples](https://github.com/spring-projects/spring-batch/tree/master/spring-batch-samples) that shows the use of a process indicator (see[Preventing State Persistence](readersAndWriters.html#process-indicator)) to keep track +of items that have been processed in a database input table. + +Spring Batch provides some implementations of `ItemWriter` and `ItemReader`. Usually, +they say in the Javadoc if they are thread safe or not or what you have to do to avoid +problems in a concurrent environment. If there is no information in the Javadoc, you can +check the implementation to see if there is any state. If a reader is not thread safe, +you can decorate it with the provided `SynchronizedItemStreamReader` or use it in your own +synchronizing delegator. You can synchronize the call to `read()` and as long as the +processing and writing is the most expensive part of the chunk, your step may still +complete much faster than it would in a single threaded configuration. + +### Parallel Steps + +As long as the application logic that needs to be parallelized can be split into distinct +responsibilities and assigned to individual steps, then it can be parallelized in a +single process. Parallel Step execution is easy to configure and use. + +For example, executing steps `(step1,step2)` in parallel with `step3` is straightforward, +as shown in the following example: + +``` + + + + + + + + + + + + + + +``` + +When using Java configuration, executing steps `(step1,step2)` in parallel with `step3`is straightforward, as shown in the following example: + +Java Configuration + +``` +@Bean +public Job job() { + return jobBuilderFactory.get("job") + .start(splitFlow()) + .next(step4()) + .build() //builds FlowJobBuilder instance + .build(); //builds Job instance +} + +@Bean +public Flow splitFlow() { + return new FlowBuilder("splitFlow") + .split(taskExecutor()) + .add(flow1(), flow2()) + .build(); +} + +@Bean +public Flow flow1() { + return new FlowBuilder("flow1") + .start(step1()) + .next(step2()) + .build(); +} + +@Bean +public Flow flow2() { + return new FlowBuilder("flow2") + .start(step3()) + .build(); +} + +@Bean +public TaskExecutor taskExecutor() { + return new SimpleAsyncTaskExecutor("spring_batch"); +} +``` + +The configurable task executor is used to specify which `TaskExecutor`implementation should be used to execute the individual flows. The default is`SyncTaskExecutor`, but an asynchronous `TaskExecutor` is required to run the steps in +parallel. Note that the job ensures that every flow in the split completes before +aggregating the exit statuses and transitioning. + +See the section on [Split Flows](step.html#split-flows) for more detail. + +### Remote Chunking + +In remote chunking, the `Step` processing is split across multiple processes, +communicating with each other through some middleware. The following image shows the +pattern: + +![Remote Chunking](https://docs.spring.io/spring-batch/docs/current/reference/html/images/remote-chunking.png) + +Figure 1. Remote Chunking + +The manager component is a single process, and the workers are multiple remote processes. +This pattern works best if the manager is not a bottleneck, so the processing must be more +expensive than the reading of items (as is often the case in practice). + +The manager is an implementation of a Spring Batch `Step` with the `ItemWriter` replaced +by a generic version that knows how to send chunks of items to the middleware as +messages. The workers are standard listeners for whatever middleware is being used (for +example, with JMS, they would be `MessageListener` implementations), and their role is +to process the chunks of items using a standard `ItemWriter` or `ItemProcessor` plus`ItemWriter`, through the `ChunkProcessor` interface. One of the advantages of using this +pattern is that the reader, processor, and writer components are off-the-shelf (the same +as would be used for a local execution of the step). The items are divided up dynamically +and work is shared through the middleware, so that, if the listeners are all eager +consumers, then load balancing is automatic. + +The middleware has to be durable, with guaranteed delivery and a single consumer for each +message. JMS is the obvious candidate, but other options (such as JavaSpaces) exist in +the grid computing and shared memory product space. + +See the section on[Spring Batch Integration - Remote Chunking](spring-batch-integration.html#remote-chunking)for more detail. + +### Partitioning + +Spring Batch also provides an SPI for partitioning a `Step` execution and executing it +remotely. In this case, the remote participants are `Step` instances that could just as +easily have been configured and used for local processing. The following image shows the +pattern: + +![Partitioning Overview](https://docs.spring.io/spring-batch/docs/current/reference/html/images/partitioning-overview.png) + +Figure 2. Partitioning + +The `Job` runs on the left-hand side as a sequence of `Step` instances, and one of the`Step` instances is labeled as a manager. The workers in this picture are all identical +instances of a `Step`, which could in fact take the place of the manager, resulting in the +same outcome for the `Job`. The workers are typically going to be remote services but +could also be local threads of execution. The messages sent by the manager to the workers +in this pattern do not need to be durable or have guaranteed delivery. Spring Batch +metadata in the `JobRepository` ensures that each worker is executed once and only once for +each `Job` execution. + +The SPI in Spring Batch consists of a special implementation of `Step` (called the`PartitionStep`) and two strategy interfaces that need to be implemented for the specific +environment. The strategy interfaces are `PartitionHandler` and `StepExecutionSplitter`, +and their role is shown in the following sequence diagram: + +![Partitioning SPI](https://docs.spring.io/spring-batch/docs/current/reference/html/images/partitioning-spi.png) + +Figure 3. Partitioning SPI + +The `Step` on the right in this case is the “remote” worker, so, potentially, there are +many objects and or processes playing this role, and the `PartitionStep` is shown driving +the execution. + +The following example shows the `PartitionStep` configuration when using XML +configuration: + +``` + + + + + +``` + +The following example shows the `PartitionStep` configuration when using Java +configuration: + +Java Configuration + +``` +@Bean +public Step step1Manager() { + return stepBuilderFactory.get("step1.manager") + .partitioner("step1", partitioner()) + .step(step1()) + .gridSize(10) + .taskExecutor(taskExecutor()) + .build(); +} +``` + +Similar to the multi-threaded step’s `throttle-limit` attribute, the `grid-size`attribute prevents the task executor from being saturated with requests from a single +step. + +There is a simple example that can be copied and extended in the unit test suite for[Spring +Batch Samples](https://github.com/spring-projects/spring-batch/tree/master/spring-batch-samples/src/main/resources/jobs) (see `partition*Job.xml` configuration). + +Spring Batch creates step executions for the partitions called "step1:partition0", and so +on. Many people prefer to call the manager step "step1:manager" for consistency. You can +use an alias for the step (by specifying the `name` attribute instead of the `id`attribute). + +#### PartitionHandler + +The `PartitionHandler` is the component that knows about the fabric of the remoting or +grid environment. It is able to send `StepExecution` requests to the remote `Step`instances, wrapped in some fabric-specific format, like a DTO. It does not have to know +how to split the input data or how to aggregate the result of multiple `Step` executions. +Generally speaking, it probably also does not need to know about resilience or failover, +since those are features of the fabric in many cases. In any case, Spring Batch always +provides restartability independent of the fabric. A failed `Job` can always be restarted +and only the failed `Steps` are re-executed. + +The `PartitionHandler` interface can have specialized implementations for a variety of +fabric types, including simple RMI remoting, EJB remoting, custom web service, JMS, Java +Spaces, shared memory grids (like Terracotta or Coherence), and grid execution fabrics +(like GridGain). Spring Batch does not contain implementations for any proprietary grid +or remoting fabrics. + +Spring Batch does, however, provide a useful implementation of `PartitionHandler` that +executes `Step` instances locally in separate threads of execution, using the`TaskExecutor` strategy from Spring. The implementation is called`TaskExecutorPartitionHandler`. + +The `TaskExecutorPartitionHandler` is the default for a step configured with the XML +namespace shown previously. It can also be configured explicitly, as shown in the +following example: + +``` + + + + + + + + + +``` + +The `TaskExecutorPartitionHandler` can be configured explicitly within java configuration, +as shown in the following example: + +Java Configuration + +``` +@Bean +public Step step1Manager() { + return stepBuilderFactory.get("step1.manager") + .partitioner("step1", partitioner()) + .partitionHandler(partitionHandler()) + .build(); +} + +@Bean +public PartitionHandler partitionHandler() { + TaskExecutorPartitionHandler retVal = new TaskExecutorPartitionHandler(); + retVal.setTaskExecutor(taskExecutor()); + retVal.setStep(step1()); + retVal.setGridSize(10); + return retVal; +} +``` + +The `gridSize` attribute determines the number of separate step executions to create, so +it can be matched to the size of the thread pool in the `TaskExecutor`. Alternatively, it +can be set to be larger than the number of threads available, which makes the blocks of +work smaller. + +The `TaskExecutorPartitionHandler` is useful for IO-intensive `Step` instances, such as +copying large numbers of files or replicating filesystems into content management +systems. It can also be used for remote execution by providing a `Step` implementation +that is a proxy for a remote invocation (such as using Spring Remoting). + +#### Partitioner + +The `Partitioner` has a simpler responsibility: to generate execution contexts as input +parameters for new step executions only (no need to worry about restarts). It has a +single method, as shown in the following interface definition: + +``` +public interface Partitioner { + Map partition(int gridSize); +} +``` + +The return value from this method associates a unique name for each step execution (the`String`) with input parameters in the form of an `ExecutionContext`. The names show up +later in the Batch metadata as the step name in the partitioned `StepExecutions`. The`ExecutionContext` is just a bag of name-value pairs, so it might contain a range of +primary keys, line numbers, or the location of an input file. The remote `Step` then +normally binds to the context input using `#{…​}` placeholders (late binding in step +scope), as illustrated in the next section. + +The names of the step executions (the keys in the `Map` returned by `Partitioner`) need +to be unique amongst the step executions of a `Job` but do not have any other specific +requirements. The easiest way to do this (and to make the names meaningful for users) is +to use a prefix+suffix naming convention, where the prefix is the name of the step that +is being executed (which itself is unique in the `Job`), and the suffix is just a +counter. There is a `SimplePartitioner` in the framework that uses this convention. + +An optional interface called `PartitionNameProvider` can be used to provide the partition +names separately from the partitions themselves. If a `Partitioner` implements this +interface, then, on a restart, only the names are queried. If partitioning is expensive, +this can be a useful optimization. The names provided by the `PartitionNameProvider` must +match those provided by the `Partitioner`. + +#### Binding Input Data to Steps + +It is very efficient for the steps that are executed by the `PartitionHandler` to have +identical configuration and for their input parameters to be bound at runtime from the`ExecutionContext`. This is easy to do with the StepScope feature of Spring Batch +(covered in more detail in the section on [Late Binding](step.html#late-binding)). For +example, if the `Partitioner` creates `ExecutionContext` instances with an attribute key +called `fileName`, pointing to a different file (or directory) for each step invocation, +the `Partitioner` output might resemble the content of the following table: + +|*Step Execution Name (key)*|*ExecutionContext (value)*| +|---------------------------|--------------------------| +| filecopy:partition0 | fileName=/home/data/one | +| filecopy:partition1 | fileName=/home/data/two | +| filecopy:partition2 |fileName=/home/data/three | + +Then the file name can be bound to a step using late binding to the execution context. + +The following example shows how to define late binding in XML: + +XML Configuration + +``` + + + +``` + +The following example shows how to define late binding in Java: + +Java Configuration + +``` +@Bean +public MultiResourceItemReader itemReader( + @Value("#{stepExecutionContext['fileName']}/*") Resource [] resources) { + return new MultiResourceItemReaderBuilder() + .delegate(fileReader()) + .name("itemReader") + .resources(resources) + .build(); +} +``` \ No newline at end of file diff --git a/docs/en/spring-batch/schema-appendix.md b/docs/en/spring-batch/schema-appendix.md new file mode 100644 index 0000000000000000000000000000000000000000..eecf926584e1f5a7d7ccf3d7eaa42cd340227ff9 --- /dev/null +++ b/docs/en/spring-batch/schema-appendix.md @@ -0,0 +1,389 @@ +# Meta-Data Schema + +## Appendix A: Meta-Data Schema + +### Overview + +The Spring Batch Metadata tables closely match the Domain objects that represent them in +Java. For example, `JobInstance`, `JobExecution`, `JobParameters`, and `StepExecution`map to `BATCH_JOB_INSTANCE`, `BATCH_JOB_EXECUTION`, `BATCH_JOB_EXECUTION_PARAMS`, and`BATCH_STEP_EXECUTION`, respectively. `ExecutionContext` maps to both`BATCH_JOB_EXECUTION_CONTEXT` and `BATCH_STEP_EXECUTION_CONTEXT`. The `JobRepository` is +responsible for saving and storing each Java object into its correct table. This appendix +describes the metadata tables in detail, along with many of the design decisions that +were made when creating them. When viewing the various table creation statements below, +it is important to realize that the data types used are as generic as possible. Spring +Batch provides many schemas as examples, all of which have varying data types, due to +variations in how individual database vendors handle data types. The following image +shows an ERD model of all 6 tables and their relationships to one another: + +![Spring Batch Meta-Data ERD](https://docs.spring.io/spring-batch/docs/current/reference/html/images/meta-data-erd.png) + +Figure 1. Spring Batch Meta-Data ERD + +#### Example DDL Scripts + +The Spring Batch Core JAR file contains example scripts to create the relational tables +for a number of database platforms (which are, in turn, auto-detected by the job +repository factory bean or namespace equivalent). These scripts can be used as is or +modified with additional indexes and constraints as desired. The file names are in the +form `schema-*.sql`, where "\*" is the short name of the target database platform. +The scripts are in the package `org.springframework.batch.core`. + +#### Migration DDL Scripts + +Spring Batch provides migration DDL scripts that you need to execute when you upgrade versions. +These scripts can be found in the Core Jar file under `org/springframework/batch/core/migration`. +Migration scripts are organized into folders corresponding to version numbers in which they were introduced: + +* `2.2`: contains scripts needed if you are migrating from a version before `2.2` to version `2.2` + +* `4.1`: contains scripts needed if you are migrating from a version before `4.1` to version `4.1` + +#### Version + +Many of the database tables discussed in this appendix contain a version column. This +column is important because Spring Batch employs an optimistic locking strategy when +dealing with updates to the database. This means that each time a record is 'touched' +(updated) the value in the version column is incremented by one. When the repository goes +back to save the value, if the version number has changed it throws an`OptimisticLockingFailureException`, indicating there has been an error with concurrent +access. This check is necessary, since, even though different batch jobs may be running +in different machines, they all use the same database tables. + +#### Identity + +`BATCH_JOB_INSTANCE`, `BATCH_JOB_EXECUTION`, and `BATCH_STEP_EXECUTION` each contain +columns ending in `_ID`. These fields act as primary keys for their respective tables. +However, they are not database generated keys. Rather, they are generated by separate +sequences. This is necessary because, after inserting one of the domain objects into the +database, the key it is given needs to be set on the actual object so that they can be +uniquely identified in Java. Newer database drivers (JDBC 3.0 and up) support this +feature with database-generated keys. However, rather than require that feature, +sequences are used. Each variation of the schema contains some form of the following +statements: + +``` +CREATE SEQUENCE BATCH_STEP_EXECUTION_SEQ; +CREATE SEQUENCE BATCH_JOB_EXECUTION_SEQ; +CREATE SEQUENCE BATCH_JOB_SEQ; +``` + +Many database vendors do not support sequences. In these cases, work-arounds are used, +such as the following statements for MySQL: + +``` +CREATE TABLE BATCH_STEP_EXECUTION_SEQ (ID BIGINT NOT NULL) type=InnoDB; +INSERT INTO BATCH_STEP_EXECUTION_SEQ values(0); +CREATE TABLE BATCH_JOB_EXECUTION_SEQ (ID BIGINT NOT NULL) type=InnoDB; +INSERT INTO BATCH_JOB_EXECUTION_SEQ values(0); +CREATE TABLE BATCH_JOB_SEQ (ID BIGINT NOT NULL) type=InnoDB; +INSERT INTO BATCH_JOB_SEQ values(0); +``` + +In the preceding case, a table is used in place of each sequence. The Spring core class,`MySQLMaxValueIncrementer`, then increments the one column in this sequence in order to +give similar functionality. + +### `BATCH_JOB_INSTANCE` + +The `BATCH_JOB_INSTANCE` table holds all information relevant to a `JobInstance`, and +serves as the top of the overall hierarchy. The following generic DDL statement is used +to create it: + +``` +CREATE TABLE BATCH_JOB_INSTANCE ( + JOB_INSTANCE_ID BIGINT PRIMARY KEY , + VERSION BIGINT, + JOB_NAME VARCHAR(100) NOT NULL , + JOB_KEY VARCHAR(2500) +); +``` + +The following list describes each column in the table: + +* `JOB_INSTANCE_ID`: The unique ID that identifies the instance. It is also the primary + key. The value of this column should be obtainable by calling the `getId` method on`JobInstance`. + +* `VERSION`: See [Version](#metaDataVersion). + +* `JOB_NAME`: Name of the job obtained from the `Job` object. Because it is required to + identify the instance, it must not be null. + +* `JOB_KEY`: A serialization of the `JobParameters` that uniquely identifies separate + instances of the same job from one another. (`JobInstances` with the same job name must + have different `JobParameters` and, thus, different `JOB_KEY` values). + +### `BATCH_JOB_EXECUTION_PARAMS` + +The `BATCH_JOB_EXECUTION_PARAMS` table holds all information relevant to the`JobParameters` object. It contains 0 or more key/value pairs passed to a `Job` and +serves as a record of the parameters with which a job was run. For each parameter that +contributes to the generation of a job’s identity, the `IDENTIFYING` flag is set to true. +Note that the table has been denormalized. Rather than creating a separate table for each +type, there is one table with a column indicating the type, as shown in the following +listing: + +``` +CREATE TABLE BATCH_JOB_EXECUTION_PARAMS ( + JOB_EXECUTION_ID BIGINT NOT NULL , + TYPE_CD VARCHAR(6) NOT NULL , + KEY_NAME VARCHAR(100) NOT NULL , + STRING_VAL VARCHAR(250) , + DATE_VAL DATETIME DEFAULT NULL , + LONG_VAL BIGINT , + DOUBLE_VAL DOUBLE PRECISION , + IDENTIFYING CHAR(1) NOT NULL , + constraint JOB_EXEC_PARAMS_FK foreign key (JOB_EXECUTION_ID) + references BATCH_JOB_EXECUTION(JOB_EXECUTION_ID) +); +``` + +The following list describes each column: + +* `JOB_EXECUTION_ID`: Foreign key from the `BATCH_JOB_EXECUTION` table that indicates the + job execution to which the parameter entry belongs. Note that multiple rows (that is, + key/value pairs) may exist for each execution. + +* TYPE\_CD: String representation of the type of value stored, which can be a string, a + date, a long, or a double. Because the type must be known, it cannot be null. + +* KEY\_NAME: The parameter key. + +* STRING\_VAL: Parameter value, if the type is string. + +* DATE\_VAL: Parameter value, if the type is date. + +* LONG\_VAL: Parameter value, if the type is long. + +* DOUBLE\_VAL: Parameter value, if the type is double. + +* IDENTIFYING: Flag indicating whether the parameter contributed to the identity of the + related `JobInstance`. + +Note that there is no primary key for this table. This is because the framework has no +use for one and, thus, does not require it. If need be, you can add a primary key may be +added with a database generated key without causing any issues to the framework itself. + +### `BATCH_JOB_EXECUTION` + +The `BATCH_JOB_EXECUTION` table holds all information relevant to the `JobExecution`object. Every time a `Job` is run, there is always a new `JobExecution`, and a new row in +this table. The following listing shows the definition of the `BATCH_JOB_EXECUTION`table: + +``` +CREATE TABLE BATCH_JOB_EXECUTION ( + JOB_EXECUTION_ID BIGINT PRIMARY KEY , + VERSION BIGINT, + JOB_INSTANCE_ID BIGINT NOT NULL, + CREATE_TIME TIMESTAMP NOT NULL, + START_TIME TIMESTAMP DEFAULT NULL, + END_TIME TIMESTAMP DEFAULT NULL, + STATUS VARCHAR(10), + EXIT_CODE VARCHAR(20), + EXIT_MESSAGE VARCHAR(2500), + LAST_UPDATED TIMESTAMP, + JOB_CONFIGURATION_LOCATION VARCHAR(2500) NULL, + constraint JOB_INSTANCE_EXECUTION_FK foreign key (JOB_INSTANCE_ID) + references BATCH_JOB_INSTANCE(JOB_INSTANCE_ID) +) ; +``` + +The following list describes each column: + +* `JOB_EXECUTION_ID`: Primary key that uniquely identifies this execution. The value of + this column is obtainable by calling the `getId` method of the `JobExecution` object. + +* `VERSION`: See [Version](#metaDataVersion). + +* `JOB_INSTANCE_ID`: Foreign key from the `BATCH_JOB_INSTANCE` table. It indicates the + instance to which this execution belongs. There may be more than one execution per + instance. + +* `CREATE_TIME`: Timestamp representing the time when the execution was created. + +* `START_TIME`: Timestamp representing the time when the execution was started. + +* `END_TIME`: Timestamp representing the time when the execution finished, regardless of + success or failure. An empty value in this column when the job is not currently running + indicates that there has been some type of error and the framework was unable to perform + a last save before failing. + +* `STATUS`: Character string representing the status of the execution. This may be`COMPLETED`, `STARTED`, and others. The object representation of this column is the`BatchStatus` enumeration. + +* `EXIT_CODE`: Character string representing the exit code of the execution. In the case + of a command-line job, this may be converted into a number. + +* `EXIT_MESSAGE`: Character string representing a more detailed description of how the + job exited. In the case of failure, this might include as much of the stack trace as is + possible. + +* `LAST_UPDATED`: Timestamp representing the last time this execution was persisted. + +### `BATCH_STEP_EXECUTION` + +The BATCH\_STEP\_EXECUTION table holds all information relevant to the `StepExecution`object. This table is similar in many ways to the `BATCH_JOB_EXECUTION` table, and there +is always at least one entry per `Step` for each `JobExecution` created. The following +listing shows the definition of the `BATCH_STEP_EXECUTION` table: + +``` +CREATE TABLE BATCH_STEP_EXECUTION ( + STEP_EXECUTION_ID BIGINT PRIMARY KEY , + VERSION BIGINT NOT NULL, + STEP_NAME VARCHAR(100) NOT NULL, + JOB_EXECUTION_ID BIGINT NOT NULL, + START_TIME TIMESTAMP NOT NULL , + END_TIME TIMESTAMP DEFAULT NULL, + STATUS VARCHAR(10), + COMMIT_COUNT BIGINT , + READ_COUNT BIGINT , + FILTER_COUNT BIGINT , + WRITE_COUNT BIGINT , + READ_SKIP_COUNT BIGINT , + WRITE_SKIP_COUNT BIGINT , + PROCESS_SKIP_COUNT BIGINT , + ROLLBACK_COUNT BIGINT , + EXIT_CODE VARCHAR(20) , + EXIT_MESSAGE VARCHAR(2500) , + LAST_UPDATED TIMESTAMP, + constraint JOB_EXECUTION_STEP_FK foreign key (JOB_EXECUTION_ID) + references BATCH_JOB_EXECUTION(JOB_EXECUTION_ID) +) ; +``` + +The following list describes for each column: + +* `STEP_EXECUTION_ID`: Primary key that uniquely identifies this execution. The value of + this column should be obtainable by calling the `getId` method of the `StepExecution`object. + +* `VERSION`: See [Version](#metaDataVersion). + +* `STEP_NAME`: The name of the step to which this execution belongs. + +* `JOB_EXECUTION_ID`: Foreign key from the `BATCH_JOB_EXECUTION` table. It indicates the`JobExecution` to which this `StepExecution` belongs. There may be only one`StepExecution` for a given `JobExecution` for a given `Step` name. + +* `START_TIME`: Timestamp representing the time when the execution was started. + +* `END_TIME`: Timestamp representing the time the when execution was finished, regardless + of success or failure. An empty value in this column, even though the job is not + currently running, indicates that there has been some type of error and the framework was + unable to perform a last save before failing. + +* `STATUS`: Character string representing the status of the execution. This may be`COMPLETED`, `STARTED`, and others. The object representation of this column is the`BatchStatus` enumeration. + +* `COMMIT_COUNT`: The number of times in which the step has committed a transaction + during this execution. + +* `READ_COUNT`: The number of items read during this execution. + +* `FILTER_COUNT`: The number of items filtered out of this execution. + +* `WRITE_COUNT`: The number of items written and committed during this execution. + +* `READ_SKIP_COUNT`: The number of items skipped on read during this execution. + +* `WRITE_SKIP_COUNT`: The number of items skipped on write during this execution. + +* `PROCESS_SKIP_COUNT`: The number of items skipped during processing during this + execution. + +* `ROLLBACK_COUNT`: The number of rollbacks during this execution. Note that this count + includes each time rollback occurs, including rollbacks for retry and those in the skip + recovery procedure. + +* `EXIT_CODE`: Character string representing the exit code of the execution. In the case + of a command-line job, this may be converted into a number. + +* `EXIT_MESSAGE`: Character string representing a more detailed description of how the + job exited. In the case of failure, this might include as much of the stack trace as is + possible. + +* `LAST_UPDATED`: Timestamp representing the last time this execution was persisted. + +### `BATCH_JOB_EXECUTION_CONTEXT` + +The `BATCH_JOB_EXECUTION_CONTEXT` table holds all information relevant to the`ExecutionContext` of a `Job`. There is exactly one `Job` `ExecutionContext` per`JobExecution`, and it contains all of the job-level data that is needed for a particular +job execution. This data typically represents the state that must be retrieved after a +failure, so that a `JobInstance` can "start from where it left off". The following +listing shows the definition of the `BATCH_JOB_EXECUTION_CONTEXT` table: + +``` +CREATE TABLE BATCH_JOB_EXECUTION_CONTEXT ( + JOB_EXECUTION_ID BIGINT PRIMARY KEY, + SHORT_CONTEXT VARCHAR(2500) NOT NULL, + SERIALIZED_CONTEXT CLOB, + constraint JOB_EXEC_CTX_FK foreign key (JOB_EXECUTION_ID) + references BATCH_JOB_EXECUTION(JOB_EXECUTION_ID) +) ; +``` + +The following list describes each column: + +* `JOB_EXECUTION_ID`: Foreign key representing the `JobExecution` to which the context + belongs. There may be more than one row associated with a given execution. + +* `SHORT_CONTEXT`: A string version of the `SERIALIZED_CONTEXT`. + +* `SERIALIZED_CONTEXT`: The entire context, serialized. + +### `BATCH_STEP_EXECUTION_CONTEXT` + +The `BATCH_STEP_EXECUTION_CONTEXT` table holds all information relevant to the`ExecutionContext` of a `Step`. There is exactly one `ExecutionContext` per`StepExecution`, and it contains all of the data that +needs to be persisted for a particular step execution. This data typically represents the +state that must be retrieved after a failure, so that a `JobInstance` can 'start from +where it left off'. The following listing shows the definition of the`BATCH_STEP_EXECUTION_CONTEXT` table: + +``` +CREATE TABLE BATCH_STEP_EXECUTION_CONTEXT ( + STEP_EXECUTION_ID BIGINT PRIMARY KEY, + SHORT_CONTEXT VARCHAR(2500) NOT NULL, + SERIALIZED_CONTEXT CLOB, + constraint STEP_EXEC_CTX_FK foreign key (STEP_EXECUTION_ID) + references BATCH_STEP_EXECUTION(STEP_EXECUTION_ID) +) ; +``` + +The following list describes each column: + +* `STEP_EXECUTION_ID`: Foreign key representing the `StepExecution` to which the context + belongs. There may be more than one row associated to a given execution. + +* `SHORT_CONTEXT`: A string version of the `SERIALIZED_CONTEXT`. + +* `SERIALIZED_CONTEXT`: The entire context, serialized. + +### Archiving + +Because there are entries in multiple tables every time a batch job is run, it is common +to create an archive strategy for the metadata tables. The tables themselves are designed +to show a record of what happened in the past and generally do not affect the run of any +job, with a few notable exceptions pertaining to restart: + +* The framework uses the metadata tables to determine whether a particular `JobInstance`has been run before. If it has been run and if the job is not restartable, then an + exception is thrown. + +* If an entry for a `JobInstance` is removed without having completed successfully, the + framework thinks that the job is new rather than a restart. + +* If a job is restarted, the framework uses any data that has been persisted to the`ExecutionContext` to restore the `Job’s` state. Therefore, removing any entries from + this table for jobs that have not completed successfully prevents them from starting at + the correct point if run again. + +### International and Multi-byte Characters + +If you are using multi-byte character sets (such as Chinese or Cyrillic) in your business +processing, then those characters might need to be persisted in the Spring Batch schema. +Many users find that simply changing the schema to double the length of the `VARCHAR`columns is enough. Others prefer to configure the[JobRepository](job.html#configuringJobRepository) with `max-varchar-length` half the +value of the `VARCHAR` column length. Some users have also reported that they use`NVARCHAR` in place of `VARCHAR` in their schema definitions. The best result depends on +the database platform and the way the database server has been configured locally. + +### Recommendations for Indexing Meta Data Tables + +Spring Batch provides DDL samples for the metadata tables in the core jar file for +several common database platforms. Index declarations are not included in that DDL, +because there are too many variations in how users may want to index, depending on their +precise platform, local conventions, and the business requirements of how the jobs are +operated. The following below provides some indication as to which columns are going to +be used in a `WHERE` clause by the DAO implementations provided by Spring Batch and how +frequently they might be used, so that individual projects can make up their own minds +about indexing: + +| Default Table Name | Where Clause | Frequency | +|----------------------|-----------------------------------------|-------------------------------------------------------------------| +| BATCH\_JOB\_INSTANCE | JOB\_NAME = ? and JOB\_KEY = ? | Every time a job is launched | +|BATCH\_JOB\_EXECUTION | JOB\_INSTANCE\_ID = ? | Every time a job is restarted | +|BATCH\_STEP\_EXECUTION| VERSION = ? |On commit interval, a.k.a. chunk (and at start and end of
step)| +|BATCH\_STEP\_EXECUTION|STEP\_NAME = ? and JOB\_EXECUTION\_ID = ?| Before each step execution | \ No newline at end of file diff --git a/docs/en/spring-batch/spring-batch-integration.md b/docs/en/spring-batch/spring-batch-integration.md new file mode 100644 index 0000000000000000000000000000000000000000..2c1f98547278d23f0adee7711d4b3f26282b9a5b --- /dev/null +++ b/docs/en/spring-batch/spring-batch-integration.md @@ -0,0 +1,1249 @@ +# Spring Batch Integration + +## Spring Batch Integration + +XMLJavaBoth + +### Spring Batch Integration Introduction + +Many users of Spring Batch may encounter requirements that are +outside the scope of Spring Batch but that may be efficiently and +concisely implemented by using Spring Integration. Conversely, Spring +Integration users may encounter Spring Batch requirements and need a way +to efficiently integrate both frameworks. In this context, several +patterns and use-cases emerge, and Spring Batch Integration +addresses those requirements. + +The line between Spring Batch and Spring Integration is not always +clear, but two pieces of advice can +help: Think about granularity, and apply common patterns. Some +of those common patterns are described in this reference manual +section. + +Adding messaging to a batch process enables automation of +operations and also separation and strategizing of key concerns. +For example, a message might trigger a job to execute, and then the +sending of the message can be exposed in a variety of ways. Alternatively, when +a job completes or fails, that event might trigger a message to be sent, +and the consumers of those messages might have operational concerns +that have nothing to do with the application itself. Messaging can +also be embedded in a job (for example reading or writing items for +processing via channels). Remote partitioning and remote chunking +provide methods to distribute workloads over a number of workers. + +This section covers the following key concepts: + +* [Namespace Support](#namespace-support) + +* [Launching Batch Jobs through Messages](#launching-batch-jobs-through-messages) + +* [Providing Feedback with Informational Messages](#providing-feedback-with-informational-messages) + +* [Asynchronous Processors](#asynchronous-processors) + +* [Externalizing + Batch Process Execution](#externalizing-batch-process-execution) + +#### Namespace Support + +Since Spring Batch Integration 1.3, dedicated XML Namespace +support was added, with the aim to provide an easier configuration +experience. In order to activate the namespace, add the following +namespace declarations to your Spring XML Application Context +file: + +``` + + + ... + + +``` + +A fully configured Spring XML Application Context file for Spring +Batch Integration may look like the following: + +``` + + + ... + + +``` + +Appending version numbers to the referenced XSD file is also +allowed, but, as a version-less declaration always uses the +latest schema, we generally do not recommend appending the version +number to the XSD name. Adding a version number +could possibly create issues when updating the Spring Batch +Integration dependencies, as they may require more recent versions +of the XML schema. + +#### Launching Batch Jobs through Messages + +When starting batch jobs by using the core Spring Batch API, you +basically have 2 options: + +* From the command line, with the `CommandLineJobRunner` + +* Programmatically, with either `JobOperator.start()` or `JobLauncher.run()` + +For example, you may want to use the`CommandLineJobRunner` when invoking Batch Jobs by +using a shell script. Alternatively, you may use the`JobOperator` directly (for example, when using +Spring Batch as part of a web application). However, what about +more complex use cases? Maybe you need to poll a remote (S)FTP +server to retrieve the data for the Batch Job or your application +has to support multiple different data sources simultaneously. For +example, you may receive data files not only from the web, but also from +FTP and other sources. Maybe additional transformation of the input files is +needed before invoking Spring Batch. + +Therefore, it would be much more powerful to execute the batch job +using Spring Integration and its numerous adapters. For example, +you can use a *File Inbound Channel Adapter* to +monitor a directory in the file-system and start the Batch Job as +soon as the input file arrives. Additionally, you can create Spring +Integration flows that use multiple different adapters to easily +ingest data for your batch jobs from multiple sources +simultaneously using only configuration. Implementing all these +scenarios with Spring Integration is easy, as it allows for +decoupled, event-driven execution of the`JobLauncher`. + +Spring Batch Integration provides the`JobLaunchingMessageHandler` class that you can +use to launch batch jobs. The input for the`JobLaunchingMessageHandler` is provided by a +Spring Integration message, which has a payload of type`JobLaunchRequest`. This class is a wrapper around the `Job`that needs to be launched and around the `JobParameters`necessary to launch the Batch job. + +The following image illustrates the typical Spring Integration +message flow in order to start a Batch job. The[EIP (Enterprise Integration Patterns) website](https://www.enterpriseintegrationpatterns.com/toc.html)provides a full overview of messaging icons and their descriptions. + +![Launch Batch Job](https://docs.spring.io/spring-batch/docs/current/reference/html/images/launch-batch-job.png) + +Figure 1. Launch Batch Job + +##### Transforming a file into a JobLaunchRequest + +``` +package io.spring.sbi; + +import org.springframework.batch.core.Job; +import org.springframework.batch.core.JobParametersBuilder; +import org.springframework.batch.integration.launch.JobLaunchRequest; +import org.springframework.integration.annotation.Transformer; +import org.springframework.messaging.Message; + +import java.io.File; + +public class FileMessageToJobRequest { + private Job job; + private String fileParameterName; + + public void setFileParameterName(String fileParameterName) { + this.fileParameterName = fileParameterName; + } + + public void setJob(Job job) { + this.job = job; + } + + @Transformer + public JobLaunchRequest toRequest(Message message) { + JobParametersBuilder jobParametersBuilder = + new JobParametersBuilder(); + + jobParametersBuilder.addString(fileParameterName, + message.getPayload().getAbsolutePath()); + + return new JobLaunchRequest(job, jobParametersBuilder.toJobParameters()); + } +} +``` + +##### The `JobExecution` Response + +When a batch job is being executed, a`JobExecution` instance is returned. This +instance can be used to determine the status of an execution. If +a `JobExecution` is able to be created +successfully, it is always returned, regardless of whether +or not the actual execution is successful. + +The exact behavior on how the `JobExecution`instance is returned depends on the provided`TaskExecutor`. If a`synchronous` (single-threaded)`TaskExecutor` implementation is used, the`JobExecution` response is returned only`after` the job completes. When using an`asynchronous``TaskExecutor`, the`JobExecution` instance is returned +immediately. Users can then take the `id` of`JobExecution` instance +(with `JobExecution.getJobId()`) and query the`JobRepository` for the job’s updated status +using the `JobExplorer`. For more +information, please refer to the Spring +Batch reference documentation on[Querying the Repository](job.html#queryingRepository). + +##### Spring Batch Integration Configuration + +Consider a case where someone needs to create a file `inbound-channel-adapter` to listen +for CSV files in the provided directory, hand them off to a transformer +(`FileMessageToJobRequest`), launch the job through the *Job Launching Gateway*, and then +log the output of the `JobExecution` with the `logging-channel-adapter`. + +The following example shows how that common case can be configured in XML: + +XML Configuration + +``` + + + + + + + + + + + + + + + + + + +``` + +The following example shows how that common case can be configured in Java: + +Java Configuration + +``` +@Bean +public FileMessageToJobRequest fileMessageToJobRequest() { + FileMessageToJobRequest fileMessageToJobRequest = new FileMessageToJobRequest(); + fileMessageToJobRequest.setFileParameterName("input.file.name"); + fileMessageToJobRequest.setJob(personJob()); + return fileMessageToJobRequest; +} + +@Bean +public JobLaunchingGateway jobLaunchingGateway() { + SimpleJobLauncher simpleJobLauncher = new SimpleJobLauncher(); + simpleJobLauncher.setJobRepository(jobRepository); + simpleJobLauncher.setTaskExecutor(new SyncTaskExecutor()); + JobLaunchingGateway jobLaunchingGateway = new JobLaunchingGateway(simpleJobLauncher); + + return jobLaunchingGateway; +} + +@Bean +public IntegrationFlow integrationFlow(JobLaunchingGateway jobLaunchingGateway) { + return IntegrationFlows.from(Files.inboundAdapter(new File("/tmp/myfiles")). + filter(new SimplePatternFileListFilter("*.csv")), + c -> c.poller(Pollers.fixedRate(1000).maxMessagesPerPoll(1))). + transform(fileMessageToJobRequest()). + handle(jobLaunchingGateway). + log(LoggingHandler.Level.WARN, "headers.id + ': ' + payload"). + get(); +} +``` + +##### Example ItemReader Configuration + +Now that we are polling for files and launching jobs, we need to configure our Spring +Batch `ItemReader` (for example) to use the files found at the location defined by the job +parameter called "input.file.name", as shown in the following bean configuration: + +The following XML example shows the necessary bean configuration: + +XML Configuration + +``` + + + ... + +``` + +The following Java example shows the necessary bean configuration: + +Java Configuration + +``` +@Bean +@StepScope +public ItemReader sampleReader(@Value("#{jobParameters[input.file.name]}") String resource) { +... + FlatFileItemReader flatFileItemReader = new FlatFileItemReader(); + flatFileItemReader.setResource(new FileSystemResource(resource)); +... + return flatFileItemReader; +} +``` + +The main points of interest in the preceding example are injecting the value of`#{jobParameters['input.file.name']}`as the Resource property value and setting the `ItemReader` bean +to have *Step scope*. Setting the bean to have Step scope takes advantage of +the late binding support, which allows access to the`jobParameters` variable. + +### Available Attributes of the Job-Launching Gateway + +The job-launching gateway has the following attributes that you can set to control a job: + +* `id`: Identifies the underlying Spring bean definition, which is an instance of either: + + * `EventDrivenConsumer` + + * `PollingConsumer`(The exact implementation depends on whether the component’s input channel is a`SubscribableChannel` or `PollableChannel`.) + +* `auto-startup`: Boolean flag to indicate that the endpoint should start automatically on + startup. The default is *true*. + +* `request-channel`: The input `MessageChannel` of this endpoint. + +* `reply-channel`: `MessageChannel` to which the resulting `JobExecution` payload is sent. + +* `reply-timeout`: Lets you specify how long (in milliseconds) this gateway waits for the reply message + to be sent successfully to the reply channel before throwing + an exception. This attribute only applies when the channel + might block (for example, when using a bounded queue channel + that is currently full). Also, keep in mind that, when sending to a`DirectChannel`, the invocation occurs + in the sender’s thread. Therefore, the failing of the send + operation may be caused by other components further downstream. + The `reply-timeout` attribute maps to the`sendTimeout` property of the underlying`MessagingTemplate` instance. If not specified, the attribute + defaults to\-1\, + meaning that, by default, the `Gateway` waits indefinitely. + +* `job-launcher`: Optional. Accepts a + custom`JobLauncher`bean reference. + If not specified the adapter + re-uses the instance that is registered under the `id` of`jobLauncher`. If no default instance + exists, an exception is thrown. + +* `order`: Specifies the order of invocation when this endpoint is connected as a subscriber + to a `SubscribableChannel`. + +### Sub-Elements + +When this `Gateway` is receiving messages from a`PollableChannel`, you must either provide +a global default `Poller` or provide a `Poller` sub-element to the`Job Launching Gateway`. + +The following example shows how to provide a poller in XML: + +XML Configuration + +``` + + + +``` + +The following example shows how to provide a poller in Java: + +Java Configuration + +``` +@Bean +@ServiceActivator(inputChannel = "queueChannel", poller = @Poller(fixedRate="1000")) +public JobLaunchingGateway sampleJobLaunchingGateway() { + JobLaunchingGateway jobLaunchingGateway = new JobLaunchingGateway(jobLauncher()); + jobLaunchingGateway.setOutputChannel(replyChannel()); + return jobLaunchingGateway; +} +``` + +#### Providing Feedback with Informational Messages + +As Spring Batch jobs can run for long times, providing progress +information is often critical. For example, stake-holders may want +to be notified if some or all parts of a batch job have failed. +Spring Batch provides support for this information being gathered +through: + +* Active polling + +* Event-driven listeners + +When starting a Spring Batch job asynchronously (for example, by using the `Job Launching +Gateway`), a `JobExecution` instance is returned. Thus, `JobExecution.getJobId()` can be +used to continuously poll for status updates by retrieving updated instances of the`JobExecution` from the `JobRepository` by using the `JobExplorer`. However, this is +considered sub-optimal, and an event-driven approach should be preferred. + +Therefore, Spring Batch provides listeners, including the three most commonly used +listeners: + +* `StepListener` + +* `ChunkListener` + +* `JobExecutionListener` + +In the example shown in the following image, a Spring Batch job has been configured with a`StepExecutionListener`. Thus, Spring Integration receives and processes any step before +or after events. For example, the received `StepExecution` can be inspected by using a`Router`. Based on the results of that inspection, various things can occur (such as +routing a message to a Mail Outbound Channel Adapter), so that an Email notification can +be sent out based on some condition. + +![Handling Informational Messages](https://docs.spring.io/spring-batch/docs/current/reference/html/images/handling-informational-messages.png) + +Figure 2. Handling Informational Messages + +The following two-part example shows how a listener is configured to send a +message to a `Gateway` for a `StepExecution` events and log its output to a`logging-channel-adapter`. + +First, create the notification integration beans. + +The following example shows the how to create the notification integration beans in XML: + +XML Configuration + +``` + + + + + +``` + +The following example shows the how to create the notification integration beans in Java: + +Java Configuration + +``` +@Bean +@ServiceActivator(inputChannel = "stepExecutionsChannel") +public LoggingHandler loggingHandler() { + LoggingHandler adapter = new LoggingHandler(LoggingHandler.Level.WARN); + adapter.setLoggerName("TEST_LOGGER"); + adapter.setLogExpressionString("headers.id + ': ' + payload"); + return adapter; +} + +@MessagingGateway(name = "notificationExecutionsListener", defaultRequestChannel = "stepExecutionsChannel") +public interface NotificationExecutionListener extends StepExecutionListener {} +``` + +| |You need to add the `@IntegrationComponentScan` annotation to your configuration.| +|---|---------------------------------------------------------------------------------| + +Second, modify your job to add a step-level listener. + +The following example shows the how to add a step-level listener in XML: + +XML Configuration + +``` + + + + + + + + + ... + + +``` + +The following example shows the how to add a step-level listener in Java: + +Java Configuration + +``` +public Job importPaymentsJob() { + return jobBuilderFactory.get("importPayments") + .start(stepBuilderFactory.get("step1") + .chunk(200) + .listener(notificationExecutionsListener()) + ... +} +``` + +#### Asynchronous Processors + +Asynchronous Processors help you to scale the processing of items. In the asynchronous +processor use case, an `AsyncItemProcessor` serves as a dispatcher, executing the logic of +the `ItemProcessor` for an item on a new thread. Once the item completes, the `Future` is +passed to the `AsynchItemWriter` to be written. + +Therefore, you can increase performance by using asynchronous item processing, basically +letting you implement *fork-join* scenarios. The `AsyncItemWriter` gathers the results and +writes back the chunk as soon as all the results become available. + +The following example shows how to configuration the `AsyncItemProcessor` in XML: + +XML Configuration + +``` + + + + + + + + +``` + +The following example shows how to configuration the `AsyncItemProcessor` in XML: + +Java Configuration + +``` +@Bean +public AsyncItemProcessor processor(ItemProcessor itemProcessor, TaskExecutor taskExecutor) { + AsyncItemProcessor asyncItemProcessor = new AsyncItemProcessor(); + asyncItemProcessor.setTaskExecutor(taskExecutor); + asyncItemProcessor.setDelegate(itemProcessor); + return asyncItemProcessor; +} +``` + +The `delegate` property refers to your `ItemProcessor` bean, and the `taskExecutor`property refers to the `TaskExecutor` of your choice. + +The following example shows how to configure the `AsyncItemWriter` in XML: + +XML Configuration + +``` + + + + + +``` + +The following example shows how to configure the `AsyncItemWriter` in Java: + +Java Configuration + +``` +@Bean +public AsyncItemWriter writer(ItemWriter itemWriter) { + AsyncItemWriter asyncItemWriter = new AsyncItemWriter(); + asyncItemWriter.setDelegate(itemWriter); + return asyncItemWriter; +} +``` + +Again, the `delegate` property is +actually a reference to your `ItemWriter` bean. + +#### Externalizing Batch Process Execution + +The integration approaches discussed so far suggest use cases +where Spring Integration wraps Spring Batch like an outer-shell. +However, Spring Batch can also use Spring Integration internally. +Using this approach, Spring Batch users can delegate the +processing of items or even chunks to outside processes. This +allows you to offload complex processing. Spring Batch Integration +provides dedicated support for: + +* Remote Chunking + +* Remote Partitioning + +##### Remote Chunking + +![Remote Chunking](https://docs.spring.io/spring-batch/docs/current/reference/html/images/remote-chunking-sbi.png) + +Figure 3. Remote Chunking + +Taking things one step further, one can also externalize the +chunk processing by using the`ChunkMessageChannelItemWriter`(provided by Spring Batch Integration), which sends items out +and collects the result. Once sent, Spring Batch continues the +process of reading and grouping items, without waiting for the results. +Rather, it is the responsibility of the `ChunkMessageChannelItemWriter`to gather the results and integrate them back into the Spring Batch process. + +With Spring Integration, you have full +control over the concurrency of your processes (for instance, by +using a `QueueChannel` instead of a`DirectChannel`). Furthermore, by relying on +Spring Integration’s rich collection of Channel Adapters (such as +JMS and AMQP), you can distribute chunks of a Batch job to +external systems for processing. + +A job with a step to be remotely chunked might have a configuration similar to the +following in XML: + +XML Configuration + +``` + + + + + + ... + + +``` + +A job with a step to be remotely chunked might have a configuration similar to the +following in Java: + +Java Configuration + +``` +public Job chunkJob() { + return jobBuilderFactory.get("personJob") + .start(stepBuilderFactory.get("step1") + .chunk(200) + .reader(itemReader()) + .writer(itemWriter()) + .build()) + .build(); + } +``` + +The `ItemReader` reference points to the bean you want to use for reading data on the +manager. The `ItemWriter` reference points to a special `ItemWriter` (called`ChunkMessageChannelItemWriter`), as described above. The processor (if any) is left off +the manager configuration, as it is configured on the worker. You should check any +additional component properties, such as throttle limits and so on, when implementing +your use case. + +The following XML configuration provides a basic manager setup: + +XML Configuration + +``` + + + + + + + + + + + + + + + + + + + + + +``` + +The following Java configuration provides a basic manager setup: + +Java Configuration + +``` +@Bean +public org.apache.activemq.ActiveMQConnectionFactory connectionFactory() { + ActiveMQConnectionFactory factory = new ActiveMQConnectionFactory(); + factory.setBrokerURL("tcp://localhost:61616"); + return factory; +} + +/* + * Configure outbound flow (requests going to workers) + */ +@Bean +public DirectChannel requests() { + return new DirectChannel(); +} + +@Bean +public IntegrationFlow outboundFlow(ActiveMQConnectionFactory connectionFactory) { + return IntegrationFlows + .from(requests()) + .handle(Jms.outboundAdapter(connectionFactory).destination("requests")) + .get(); +} + +/* + * Configure inbound flow (replies coming from workers) + */ +@Bean +public QueueChannel replies() { + return new QueueChannel(); +} + +@Bean +public IntegrationFlow inboundFlow(ActiveMQConnectionFactory connectionFactory) { + return IntegrationFlows + .from(Jms.messageDrivenChannelAdapter(connectionFactory).destination("replies")) + .channel(replies()) + .get(); +} + +/* + * Configure the ChunkMessageChannelItemWriter + */ +@Bean +public ItemWriter itemWriter() { + MessagingTemplate messagingTemplate = new MessagingTemplate(); + messagingTemplate.setDefaultChannel(requests()); + messagingTemplate.setReceiveTimeout(2000); + ChunkMessageChannelItemWriter chunkMessageChannelItemWriter + = new ChunkMessageChannelItemWriter<>(); + chunkMessageChannelItemWriter.setMessagingOperations(messagingTemplate); + chunkMessageChannelItemWriter.setReplyChannel(replies()); + return chunkMessageChannelItemWriter; +} +``` + +The preceding configuration provides us with a number of beans. We +configure our messaging middleware using ActiveMQ and the +inbound/outbound JMS adapters provided by Spring Integration. As +shown, our `itemWriter` bean, which is +referenced by our job step, uses the`ChunkMessageChannelItemWriter` for writing chunks over the +configured middleware. + +Now we can move on to the worker configuration, as shown in the following example: + +The following example shows the worker configuration in XML: + +XML Configuration + +``` + + + + + + + + + + + + + + + + + + + + + + + + + + +``` + +The following example shows the worker configuration in Java: + +Java Configuration + +``` +@Bean +public org.apache.activemq.ActiveMQConnectionFactory connectionFactory() { + ActiveMQConnectionFactory factory = new ActiveMQConnectionFactory(); + factory.setBrokerURL("tcp://localhost:61616"); + return factory; +} + +/* + * Configure inbound flow (requests coming from the manager) + */ +@Bean +public DirectChannel requests() { + return new DirectChannel(); +} + +@Bean +public IntegrationFlow inboundFlow(ActiveMQConnectionFactory connectionFactory) { + return IntegrationFlows + .from(Jms.messageDrivenChannelAdapter(connectionFactory).destination("requests")) + .channel(requests()) + .get(); +} + +/* + * Configure outbound flow (replies going to the manager) + */ +@Bean +public DirectChannel replies() { + return new DirectChannel(); +} + +@Bean +public IntegrationFlow outboundFlow(ActiveMQConnectionFactory connectionFactory) { + return IntegrationFlows + .from(replies()) + .handle(Jms.outboundAdapter(connectionFactory).destination("replies")) + .get(); +} + +/* + * Configure the ChunkProcessorChunkHandler + */ +@Bean +@ServiceActivator(inputChannel = "requests", outputChannel = "replies") +public ChunkProcessorChunkHandler chunkProcessorChunkHandler() { + ChunkProcessor chunkProcessor + = new SimpleChunkProcessor<>(itemProcessor(), itemWriter()); + ChunkProcessorChunkHandler chunkProcessorChunkHandler + = new ChunkProcessorChunkHandler<>(); + chunkProcessorChunkHandler.setChunkProcessor(chunkProcessor); + return chunkProcessorChunkHandler; +} +``` + +Most of these configuration items should look familiar from the +manager configuration. Workers do not need access to +the Spring Batch `JobRepository` nor +to the actual job configuration file. The main bean of interest +is the `chunkProcessorChunkHandler`. The`chunkProcessor` property of `ChunkProcessorChunkHandler` takes a +configured `SimpleChunkProcessor`, which is where you would provide a reference to your`ItemWriter` (and, optionally, your`ItemProcessor`) that will run on the worker +when it receives chunks from the manager. + +For more information, see the section of the "Scalability" chapter on[Remote Chunking](https://docs.spring.io/spring-batch/docs/current/reference/html/scalability.html#remoteChunking). + +Starting from version 4.1, Spring Batch Integration introduces the `@EnableBatchIntegration`annotation that can be used to simplify a remote chunking setup. This annotation provides +two beans that can be autowired in the application context: + +* `RemoteChunkingManagerStepBuilderFactory`: used to configure the manager step + +* `RemoteChunkingWorkerBuilder`: used to configure the remote worker integration flow + +These APIs take care of configuring a number of components as described in the following diagram: + +![Remote Chunking Configuration](https://docs.spring.io/spring-batch/docs/current/reference/html/images/remote-chunking-config.png) + +Figure 4. Remote Chunking Configuration + +On the manager side, the `RemoteChunkingManagerStepBuilderFactory` lets you +configure a manager step by declaring: + +* the item reader to read items and send them to workers + +* the output channel ("Outgoing requests") to send requests to workers + +* the input channel ("Incoming replies") to receive replies from workers + +A `ChunkMessageChannelItemWriter` and the `MessagingTemplate` are not needed to be explicitly configured +(Those can still be explicitly configured if required). + +On the worker side, the `RemoteChunkingWorkerBuilder` allows you to configure a worker to: + +* listen to requests sent by the manager on the input channel ("Incoming requests") + +* call the `handleChunk` method of `ChunkProcessorChunkHandler` for each request + with the configured `ItemProcessor` and `ItemWriter` + +* send replies on the output channel ("Outgoing replies") to the manager + +There is no need to explicitly configure the `SimpleChunkProcessor`and the `ChunkProcessorChunkHandler` (Those can be explicitly configured if required). + +The following example shows how to use these APIs: + +``` +@EnableBatchIntegration +@EnableBatchProcessing +public class RemoteChunkingJobConfiguration { + + @Configuration + public static class ManagerConfiguration { + + @Autowired + private RemoteChunkingManagerStepBuilderFactory managerStepBuilderFactory; + + @Bean + public TaskletStep managerStep() { + return this.managerStepBuilderFactory.get("managerStep") + .chunk(100) + .reader(itemReader()) + .outputChannel(requests()) // requests sent to workers + .inputChannel(replies()) // replies received from workers + .build(); + } + + // Middleware beans setup omitted + + } + + @Configuration + public static class WorkerConfiguration { + + @Autowired + private RemoteChunkingWorkerBuilder workerBuilder; + + @Bean + public IntegrationFlow workerFlow() { + return this.workerBuilder + .itemProcessor(itemProcessor()) + .itemWriter(itemWriter()) + .inputChannel(requests()) // requests received from the manager + .outputChannel(replies()) // replies sent to the manager + .build(); + } + + // Middleware beans setup omitted + + } + +} +``` + +You can find a complete example of a remote chunking job[here](https://github.com/spring-projects/spring-batch/tree/master/spring-batch-samples#remote-chunking-sample). + +##### Remote Partitioning + +![Remote Partitioning](https://docs.spring.io/spring-batch/docs/current/reference/html/images/remote-partitioning.png) + +Figure 5. Remote Partitioning + +Remote Partitioning, on the other hand, is useful when it +is not the processing of items but rather the associated I/O that +causes the bottleneck. Using Remote Partitioning, work can +be farmed out to workers that execute complete Spring Batch +steps. Thus, each worker has its own `ItemReader`, `ItemProcessor`, and`ItemWriter`. For this purpose, Spring Batch +Integration provides the `MessageChannelPartitionHandler`. + +This implementation of the `PartitionHandler`interface uses `MessageChannel` instances to +send instructions to remote workers and receive their responses. +This provides a nice abstraction from the transports (such as JMS +and AMQP) being used to communicate with the remote workers. + +The section of the "Scalability" chapter that addresses[remote partitioning](scalability.html#partitioning) provides an overview of the concepts and +components needed to configure remote partitioning and shows an +example of using the default`TaskExecutorPartitionHandler` to partition +in separate local threads of execution. For remote partitioning +to multiple JVMs, two additional components are required: + +* A remoting fabric or grid environment + +* A `PartitionHandler` implementation that supports the desired + remoting fabric or grid environment + +Similar to remote chunking, JMS can be used as the “remoting fabric”. In that case, use +a `MessageChannelPartitionHandler` instance as the `PartitionHandler` implementation, +as described earlier. + +The following example assumes an existing partitioned job and focuses on the`MessageChannelPartitionHandler` and JMS configuration in XML: + +XML Configuration + +``` + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +``` + +The following example assumes an existing partitioned job and focuses on the`MessageChannelPartitionHandler` and JMS configuration in Java: + +Java Configuration + +``` +/* + * Configuration of the manager side + */ +@Bean +public PartitionHandler partitionHandler() { + MessageChannelPartitionHandler partitionHandler = new MessageChannelPartitionHandler(); + partitionHandler.setStepName("step1"); + partitionHandler.setGridSize(3); + partitionHandler.setReplyChannel(outboundReplies()); + MessagingTemplate template = new MessagingTemplate(); + template.setDefaultChannel(outboundRequests()); + template.setReceiveTimeout(100000); + partitionHandler.setMessagingOperations(template); + return partitionHandler; +} + +@Bean +public QueueChannel outboundReplies() { + return new QueueChannel(); +} + +@Bean +public DirectChannel outboundRequests() { + return new DirectChannel(); +} + +@Bean +public IntegrationFlow outboundJmsRequests() { + return IntegrationFlows.from("outboundRequests") + .handle(Jms.outboundGateway(connectionFactory()) + .requestDestination("requestsQueue")) + .get(); +} + +@Bean +@ServiceActivator(inputChannel = "inboundStaging") +public AggregatorFactoryBean partitioningMessageHandler() throws Exception { + AggregatorFactoryBean aggregatorFactoryBean = new AggregatorFactoryBean(); + aggregatorFactoryBean.setProcessorBean(partitionHandler()); + aggregatorFactoryBean.setOutputChannel(outboundReplies()); + // configure other propeties of the aggregatorFactoryBean + return aggregatorFactoryBean; +} + +@Bean +public DirectChannel inboundStaging() { + return new DirectChannel(); +} + +@Bean +public IntegrationFlow inboundJmsStaging() { + return IntegrationFlows + .from(Jms.messageDrivenChannelAdapter(connectionFactory()) + .configureListenerContainer(c -> c.subscriptionDurable(false)) + .destination("stagingQueue")) + .channel(inboundStaging()) + .get(); +} + +/* + * Configuration of the worker side + */ +@Bean +public StepExecutionRequestHandler stepExecutionRequestHandler() { + StepExecutionRequestHandler stepExecutionRequestHandler = new StepExecutionRequestHandler(); + stepExecutionRequestHandler.setJobExplorer(jobExplorer); + stepExecutionRequestHandler.setStepLocator(stepLocator()); + return stepExecutionRequestHandler; +} + +@Bean +@ServiceActivator(inputChannel = "inboundRequests", outputChannel = "outboundStaging") +public StepExecutionRequestHandler serviceActivator() throws Exception { + return stepExecutionRequestHandler(); +} + +@Bean +public DirectChannel inboundRequests() { + return new DirectChannel(); +} + +public IntegrationFlow inboundJmsRequests() { + return IntegrationFlows + .from(Jms.messageDrivenChannelAdapter(connectionFactory()) + .configureListenerContainer(c -> c.subscriptionDurable(false)) + .destination("requestsQueue")) + .channel(inboundRequests()) + .get(); +} + +@Bean +public DirectChannel outboundStaging() { + return new DirectChannel(); +} + +@Bean +public IntegrationFlow outboundJmsStaging() { + return IntegrationFlows.from("outboundStaging") + .handle(Jms.outboundGateway(connectionFactory()) + .requestDestination("stagingQueue")) + .get(); +} +``` + +You must also ensure that the partition `handler` attribute maps to the `partitionHandler`bean. + +The following example maps the partition `handler` attribute to the `partitionHandler` in +XML: + +XML Configuration + +``` + + + + ... + + +``` + +The following example maps the partition `handler` attribute to the `partitionHandler` in +Java: + +Java Configuration + +``` + public Job personJob() { + return jobBuilderFactory.get("personJob") + .start(stepBuilderFactory.get("step1.manager") + .partitioner("step1.worker", partitioner()) + .partitionHandler(partitionHandler()) + .build()) + .build(); + } +``` + +You can find a complete example of a remote partitioning job[here](https://github.com/spring-projects/spring-batch/tree/master/spring-batch-samples#remote-partitioning-sample). + +The `@EnableBatchIntegration` annotation that can be used to simplify a remote +partitioning setup. This annotation provides two beans useful for remote partitioning: + +* `RemotePartitioningManagerStepBuilderFactory`: used to configure the manager step + +* `RemotePartitioningWorkerStepBuilderFactory`: used to configure the worker step + +These APIs take care of configuring a number of components as described in the following diagram: + +![Remote Partitioning Configuration (with job repository polling)](https://docs.spring.io/spring-batch/docs/current/reference/html/images/remote-partitioning-polling-config.png) + +Figure 6. Remote Partitioning Configuration (with job repository polling) + +![Remote Partitioning Configuration (with replies aggregation)](https://docs.spring.io/spring-batch/docs/current/reference/html/images/remote-partitioning-aggregation-config.png) + +Figure 7. Remote Partitioning Configuration (with replies aggregation) + +On the manager side, the `RemotePartitioningManagerStepBuilderFactory` allows you to +configure a manager step by declaring: + +* the `Partitioner` used to partition data + +* the output channel ("Outgoing requests") to send requests to workers + +* the input channel ("Incoming replies") to receive replies from workers (when configuring replies aggregation) + +* the poll interval and timeout parameters (when configuring job repository polling) + +The `MessageChannelPartitionHandler` and the `MessagingTemplate` are not needed to be explicitly configured +(Those can still be explicitly configured if required). + +On the worker side, the `RemotePartitioningWorkerStepBuilderFactory` allows you to configure a worker to: + +* listen to requests sent by the manager on the input channel ("Incoming requests") + +* call the `handle` method of `StepExecutionRequestHandler` for each request + +* send replies on the output channel ("Outgoing replies") to the manager + +There is no need to explicitly configure the `StepExecutionRequestHandler` (which can be explicitly configured if required). + +The following example shows how to use these APIs: + +``` +@Configuration +@EnableBatchProcessing +@EnableBatchIntegration +public class RemotePartitioningJobConfiguration { + + @Configuration + public static class ManagerConfiguration { + + @Autowired + private RemotePartitioningManagerStepBuilderFactory managerStepBuilderFactory; + + @Bean + public Step managerStep() { + return this.managerStepBuilderFactory + .get("managerStep") + .partitioner("workerStep", partitioner()) + .gridSize(10) + .outputChannel(outgoingRequestsToWorkers()) + .inputChannel(incomingRepliesFromWorkers()) + .build(); + } + + // Middleware beans setup omitted + + } + + @Configuration + public static class WorkerConfiguration { + + @Autowired + private RemotePartitioningWorkerStepBuilderFactory workerStepBuilderFactory; + + @Bean + public Step workerStep() { + return this.workerStepBuilderFactory + .get("workerStep") + .inputChannel(incomingRequestsFromManager()) + .outputChannel(outgoingRepliesToManager()) + .chunk(100) + .reader(itemReader()) + .processor(itemProcessor()) + .writer(itemWriter()) + .build(); + } + + // Middleware beans setup omitted + + } + +} +``` \ No newline at end of file diff --git a/docs/en/spring-batch/spring-batch-intro.md b/docs/en/spring-batch/spring-batch-intro.md new file mode 100644 index 0000000000000000000000000000000000000000..f91446f721572fd654f3f66293815eddf84911db --- /dev/null +++ b/docs/en/spring-batch/spring-batch-intro.md @@ -0,0 +1,569 @@ +# Spring Batch Introduction + +## Spring Batch Introduction + +Many applications within the enterprise domain require bulk processing to perform +business operations in mission critical environments. These business operations include: + +* Automated, complex processing of large volumes of information that is most efficiently + processed without user interaction. These operations typically include time-based events + (such as month-end calculations, notices, or correspondence). + +* Periodic application of complex business rules processed repetitively across very large + data sets (for example, insurance benefit determination or rate adjustments). + +* Integration of information that is received from internal and external systems that + typically requires formatting, validation, and processing in a transactional manner into + the system of record. Batch processing is used to process billions of transactions every + day for enterprises. + +Spring Batch is a lightweight, comprehensive batch framework designed to enable the +development of robust batch applications vital for the daily operations of enterprise +systems. Spring Batch builds upon the characteristics of the Spring Framework that people +have come to expect (productivity, POJO-based development approach, and general ease of +use), while making it easy for developers to access and leverage more advance enterprise +services when necessary. Spring Batch is not a scheduling framework. There are many good +enterprise schedulers (such as Quartz, Tivoli, Control-M, etc.) available in both the +commercial and open source spaces. It is intended to work in conjunction with a +scheduler, not replace a scheduler. + +Spring Batch provides reusable functions that are essential in processing large volumes +of records, including logging/tracing, transaction management, job processing statistics, +job restart, skip, and resource management. It also provides more advanced technical +services and features that enable extremely high-volume and high performance batch jobs +through optimization and partitioning techniques. Spring Batch can be used in both simple +use cases (such as reading a file into a database or running a stored procedure) as well +as complex, high volume use cases (such as moving high volumes of data between databases, +transforming it, and so on). High-volume batch jobs can leverage the framework in a +highly scalable manner to process significant volumes of information. + +### Background + +While open source software projects and associated communities have focused greater +attention on web-based and microservices-based architecture frameworks, there has been a +notable lack of focus on reusable architecture frameworks to accommodate Java-based batch +processing needs, despite continued needs to handle such processing within enterprise IT +environments. The lack of a standard, reusable batch architecture has resulted in the +proliferation of many one-off, in-house solutions developed within client enterprise IT +functions. + +SpringSource (now Pivotal) and Accenture collaborated to change this. Accenture’s +hands-on industry and technical experience in implementing batch architectures, +SpringSource’s depth of technical experience, and Spring’s proven programming model +together made a natural and powerful partnership to create high-quality, market-relevant +software aimed at filling an important gap in enterprise Java. Both companies worked with +a number of clients who were solving similar problems by developing Spring-based batch +architecture solutions. This provided some useful additional detail and real-life +constraints that helped to ensure the solution can be applied to the real-world problems +posed by clients. + +Accenture contributed previously proprietary batch processing architecture frameworks to +the Spring Batch project, along with committer resources to drive support, enhancements, +and the existing feature set. Accenture’s contribution was based upon decades of +experience in building batch architectures with the last several generations of +platforms: COBOL/Mainframe, C++/Unix, and now Java/anywhere. + +The collaborative effort between Accenture and SpringSource aimed to promote the +standardization of software processing approaches, frameworks, and tools that can be +consistently leveraged by enterprise users when creating batch applications. Companies +and government agencies desiring to deliver standard, proven solutions to their +enterprise IT environments can benefit from Spring Batch. + +### Usage Scenarios + +A typical batch program generally: + +* Reads a large number of records from a database, file, or queue. + +* Processes the data in some fashion. + +* Writes back data in a modified form. + +Spring Batch automates this basic batch iteration, providing the capability to process +similar transactions as a set, typically in an offline environment without any user +interaction. Batch jobs are part of most IT projects, and Spring Batch is the only open +source framework that provides a robust, enterprise-scale solution. + +Business Scenarios + +* Commit batch process periodically + +* Concurrent batch processing: parallel processing of a job + +* Staged, enterprise message-driven processing + +* Massively parallel batch processing + +* Manual or scheduled restart after failure + +* Sequential processing of dependent steps (with extensions to workflow-driven batches) + +* Partial processing: skip records (for example, on rollback) + +* Whole-batch transaction, for cases with a small batch size or existing stored + procedures/scripts + +Technical Objectives + +* Batch developers use the Spring programming model: Concentrate on business logic and + let the framework take care of infrastructure. + +* Clear separation of concerns between the infrastructure, the batch execution + environment, and the batch application. + +* Provide common, core execution services as interfaces that all projects can implement. + +* Provide simple and default implementations of the core execution interfaces that can be + used 'out of the box'. + +* Easy to configure, customize, and extend services, by leveraging the spring framework + in all layers. + +* All existing core services should be easy to replace or extend, without any impact to + the infrastructure layer. + +* Provide a simple deployment model, with the architecture JARs completely separate from + the application, built using Maven. + +### Spring Batch Architecture + +Spring Batch is designed with extensibility and a diverse group of end users in mind. The +figure below shows the layered architecture that supports the extensibility and ease of +use for end-user developers. + +![Figure 1.1: Spring Batch Layered Architecture](https://docs.spring.io/spring-batch/docs/current/reference/html/images/spring-batch-layers.png) + +Figure 1. Spring Batch Layered Architecture + +This layered architecture highlights three major high-level components: Application, +Core, and Infrastructure. The application contains all batch jobs and custom code written +by developers using Spring Batch. The Batch Core contains the core runtime classes +necessary to launch and control a batch job. It includes implementations for`JobLauncher`, `Job`, and `Step`. Both Application and Core are built on top of a common +infrastructure. This infrastructure contains common readers and writers and services +(such as the `RetryTemplate`), which are used both by application developers(readers and +writers, such as `ItemReader` and `ItemWriter`) and the core framework itself (retry, +which is its own library). + +### General Batch Principles and Guidelines + +The following key principles, guidelines, and general considerations should be considered +when building a batch solution. + +* Remember that a batch architecture typically affects on-line architecture and vice + versa. Design with both architectures and environments in mind using common building + blocks when possible. + +* Simplify as much as possible and avoid building complex logical structures in single + batch applications. + +* Keep the processing and storage of data physically close together (in other words, keep + your data where your processing occurs). + +* Minimize system resource use, especially I/O. Perform as many operations as possible in + internal memory. + +* Review application I/O (analyze SQL statements) to ensure that unnecessary physical I/O + is avoided. In particular, the following four common flaws need to be looked for: + + * Reading data for every transaction when the data could be read once and cached or kept + in the working storage. + + * Rereading data for a transaction where the data was read earlier in the same + transaction. + + * Causing unnecessary table or index scans. + + * Not specifying key values in the WHERE clause of an SQL statement. + +* Do not do things twice in a batch run. For instance, if you need data summarization for + reporting purposes, you should (if possible) increment stored totals when data is being + initially processed, so your reporting application does not have to reprocess the same + data. + +* Allocate enough memory at the beginning of a batch application to avoid time-consuming + reallocation during the process. + +* Always assume the worst with regard to data integrity. Insert adequate checks and + record validation to maintain data integrity. + +* Implement checksums for internal validation where possible. For example, flat files + should have a trailer record telling the total of records in the file and an aggregate of + the key fields. + +* Plan and execute stress tests as early as possible in a production-like environment + with realistic data volumes. + +* In large batch systems, backups can be challenging, especially if the system is running + concurrent with on-line on a 24-7 basis. Database backups are typically well taken care + of in the on-line design, but file backups should be considered to be just as important. + If the system depends on flat files, file backup procedures should not only be in place + and documented but be regularly tested as well. + +### Batch Processing Strategies + +To help design and implement batch systems, basic batch application building blocks and +patterns should be provided to the designers and programmers in the form of sample +structure charts and code shells. When starting to design a batch job, the business logic +should be decomposed into a series of steps that can be implemented using the following +standard building blocks: + +* *Conversion Applications:* For each type of file supplied by or generated to an + external system, a conversion application must be created to convert the transaction + records supplied into a standard format required for processing. This type of batch + application can partly or entirely consist of translation utility modules (see Basic + Batch Services). + +* *Validation Applications:* Validation applications ensure that all input/output + records are correct and consistent. Validation is typically based on file headers and + trailers, checksums and validation algorithms, and record level cross-checks. + +* *Extract Applications:* An application that reads a set of records from a database or + input file, selects records based on predefined rules, and writes the records to an + output file. + +* *Extract/Update Applications:* An application that reads records from a database or + an input file and makes changes to a database or an output file driven by the data found + in each input record. + +* *Processing and Updating Applications:* An application that performs processing on + input transactions from an extract or a validation application. The processing usually + involves reading a database to obtain data required for processing, potentially updating + the database and creating records for output processing. + +* *Output/Format Applications:* Applications that read an input file, restructure data + from this record according to a standard format, and produce an output file for printing + or transmission to another program or system. + +Additionally, a basic application shell should be provided for business logic that cannot +be built using the previously mentioned building blocks. + +In addition to the main building blocks, each application may use one or more of standard +utility steps, such as: + +* Sort: A program that reads an input file and produces an output file where records + have been re-sequenced according to a sort key field in the records. Sorts are usually + performed by standard system utilities. + +* Split: A program that reads a single input file and writes each record to one of + several output files based on a field value. Splits can be tailored or performed by + parameter-driven standard system utilities. + +* Merge: A program that reads records from multiple input files and produces one output + file with combined data from the input files. Merges can be tailored or performed by + parameter-driven standard system utilities. + +Batch applications can additionally be categorized by their input source: + +* Database-driven applications are driven by rows or values retrieved from the database. + +* File-driven applications are driven by records or values retrieved from a file. + +* Message-driven applications are driven by messages retrieved from a message queue. + +The foundation of any batch system is the processing strategy. Factors affecting the +selection of the strategy include: estimated batch system volume, concurrency with +on-line systems or with other batch systems, available batch windows. (Note that, with +more enterprises wanting to be up and running 24x7, clear batch windows are +disappearing). + +Typical processing options for batch are (in increasing order of implementation +complexity): + +* Normal processing during a batch window in off-line mode. + +* Concurrent batch or on-line processing. + +* Parallel processing of many different batch runs or jobs at the same time. + +* Partitioning (processing of many instances of the same job at the same time). + +* A combination of the preceding options. + +Some or all of these options may be supported by a commercial scheduler. + +The following section discusses these processing options in more detail. It is important +to notice that, as a rule of thumb, the commit and locking strategy adopted by batch +processes depends on the type of processing performed and that the on-line locking +strategy should also use the same principles. Therefore, the batch architecture cannot be +simply an afterthought when designing an overall architecture. + +The locking strategy can be to use only normal database locks or to implement an +additional custom locking service in the architecture. The locking service would track +database locking (for example, by storing the necessary information in a dedicated +db-table) and give or deny permissions to the application programs requesting a db +operation. Retry logic could also be implemented by this architecture to avoid aborting a +batch job in case of a lock situation. + +**1. Normal processing in a batch window** For simple batch processes running in a separate +batch window where the data being updated is not required by on-line users or other batch +processes, concurrency is not an issue and a single commit can be done at the end of the +batch run. + +In most cases, a more robust approach is more appropriate. Keep in mind that batch +systems have a tendency to grow as time goes by, both in terms of complexity and the data +volumes they handle. If no locking strategy is in place and the system still relies on a +single commit point, modifying the batch programs can be painful. Therefore, even with +the simplest batch systems, consider the need for commit logic for restart-recovery +options as well as the information concerning the more complex cases described later in +this section. + +**2. Concurrent batch or on-line processing** Batch applications processing data that can +be simultaneously updated by on-line users should not lock any data (either in the +database or in files) which could be required by on-line users for more than a few +seconds. Also, updates should be committed to the database at the end of every few +transactions. This minimizes the portion of data that is unavailable to other processes +and the elapsed time the data is unavailable. + +Another option to minimize physical locking is to have logical row-level locking +implemented with either an Optimistic Locking Pattern or a Pessimistic Locking Pattern. + +* Optimistic locking assumes a low likelihood of record contention. It typically means + inserting a timestamp column in each database table used concurrently by both batch and + on-line processing. When an application fetches a row for processing, it also fetches the + timestamp. As the application then tries to update the processed row, the update uses the + original timestamp in the WHERE clause. If the timestamp matches, the data and the + timestamp are updated. If the timestamp does not match, this indicates that another + application has updated the same row between the fetch and the update attempt. Therefore, + the update cannot be performed. + +* Pessimistic locking is any locking strategy that assumes there is a high likelihood of + record contention and therefore either a physical or logical lock needs to be obtained at + retrieval time. One type of pessimistic logical locking uses a dedicated lock-column in + the database table. When an application retrieves the row for update, it sets a flag in + the lock column. With the flag in place, other applications attempting to retrieve the + same row logically fail. When the application that sets the flag updates the row, it also + clears the flag, enabling the row to be retrieved by other applications. Please note that + the integrity of data must be maintained also between the initial fetch and the setting + of the flag, for example by using db locks (such as `SELECT FOR UPDATE`). Note also that + this method suffers from the same downside as physical locking except that it is somewhat + easier to manage building a time-out mechanism that gets the lock released if the user + goes to lunch while the record is locked. + +These patterns are not necessarily suitable for batch processing, but they might be used +for concurrent batch and on-line processing (such as in cases where the database does not +support row-level locking). As a general rule, optimistic locking is more suitable for +on-line applications, while pessimistic locking is more suitable for batch applications. +Whenever logical locking is used, the same scheme must be used for all applications +accessing data entities protected by logical locks. + +Note that both of these solutions only address locking a single record. Often, we may +need to lock a logically related group of records. With physical locks, you have to +manage these very carefully in order to avoid potential deadlocks. With logical locks, it +is usually best to build a logical lock manager that understands the logical record +groups you want to protect and that can ensure that locks are coherent and +non-deadlocking. This logical lock manager usually uses its own tables for lock +management, contention reporting, time-out mechanism, and other concerns. + +**3. Parallel Processing** Parallel processing allows multiple batch runs or jobs to run in +parallel to minimize the total elapsed batch processing time. This is not a problem as +long as the jobs are not sharing the same files, db-tables, or index spaces. If they do, +this service should be implemented using partitioned data. Another option is to build an +architecture module for maintaining interdependencies by using a control table. A control +table should contain a row for each shared resource and whether it is in use by an +application or not. The batch architecture or the application in a parallel job would +then retrieve information from that table to determine if it can get access to the +resource it needs or not. + +If the data access is not a problem, parallel processing can be implemented through the +use of additional threads to process in parallel. In the mainframe environment, parallel +job classes have traditionally been used, in order to ensure adequate CPU time for all +the processes. Regardless, the solution has to be robust enough to ensure time slices for +all the running processes. + +Other key issues in parallel processing include load balancing and the availability of +general system resources such as files, database buffer pools, and so on. Also note that +the control table itself can easily become a critical resource. + +**4. Partitioning** Using partitioning allows multiple versions of large batch applications +to run concurrently. The purpose of this is to reduce the elapsed time required to +process long batch jobs. Processes that can be successfully partitioned are those where +the input file can be split and/or the main database tables partitioned to allow the +application to run against different sets of data. + +In addition, processes which are partitioned must be designed to only process their +assigned data set. A partitioning architecture has to be closely tied to the database +design and the database partitioning strategy. Note that database partitioning does not +necessarily mean physical partitioning of the database, although in most cases this is +advisable. The following picture illustrates the partitioning approach: + +![Figure 1.2: Partitioned Process](https://docs.spring.io/spring-batch/docs/current/reference/html/images/partitioned.png) + +Figure 2. Partitioned Process + +The architecture should be flexible enough to allow dynamic configuration of the number +of partitions. Both automatic and user controlled configuration should be considered. +Automatic configuration may be based on parameters such as the input file size and the +number of input records. + +**4.1 Partitioning Approaches** Selecting a partitioning approach has to be done on a +case-by-case basis. The following list describes some of the possible partitioning +approaches: + +*1. Fixed and Even Break-Up of Record Set* + +This involves breaking the input record set into an even number of portions (for example, +10, where each portion has exactly 1/10th of the entire record set). Each portion is then +processed by one instance of the batch/extract application. + +In order to use this approach, preprocessing is required to split the record set up. The +result of this split will be a lower and upper bound placement number which can be used +as input to the batch/extract application in order to restrict its processing to only its +portion. + +Preprocessing could be a large overhead, as it has to calculate and determine the bounds +of each portion of the record set. + +*2. Break up by a Key Column* + +This involves breaking up the input record set by a key column, such as a location code, +and assigning data from each key to a batch instance. In order to achieve this, column +values can be either: + +* Assigned to a batch instance by a partitioning table (described later in this + section). + +* Assigned to a batch instance by a portion of the value (such as 0000-0999, 1000 - 1999, + and so on). + +Under option 1, adding new values means a manual reconfiguration of the batch/extract to +ensure that the new value is added to a particular instance. + +Under option 2, this ensures that all values are covered via an instance of the batch +job. However, the number of values processed by one instance is dependent on the +distribution of column values (there may be a large number of locations in the 0000-0999 +range, and few in the 1000-1999 range). Under this option, the data range should be +designed with partitioning in mind. + +Under both options, the optimal even distribution of records to batch instances cannot be +realized. There is no dynamic configuration of the number of batch instances used. + +*3. Breakup by Views* + +This approach is basically breakup by a key column but on the database level. It involves +breaking up the record set into views. These views are used by each instance of the batch +application during its processing. The breakup is done by grouping the data. + +With this option, each instance of a batch application has to be configured to hit a +particular view (instead of the master table). Also, with the addition of new data +values, this new group of data has to be included into a view. There is no dynamic +configuration capability, as a change in the number of instances results in a change to +the views. + +*4. Addition of a Processing Indicator* + +This involves the addition of a new column to the input table, which acts as an +indicator. As a preprocessing step, all indicators are marked as being non-processed. +During the record fetch stage of the batch application, records are read on the condition +that that record is marked as being non-processed, and once they are read (with lock), +they are marked as being in processing. When that record is completed, the indicator is +updated to either complete or error. Many instances of a batch application can be started +without a change, as the additional column ensures that a record is only processed once. + +With this option, I/O on the table increases dynamically. In the case of an updating +batch application, this impact is reduced, as a write must occur anyway. + +*5. Extract Table to a Flat File* + +This involves the extraction of the table into a file. This file can then be split into +multiple segments and used as input to the batch instances. + +With this option, the additional overhead of extracting the table into a file and +splitting it may cancel out the effect of multi-partitioning. Dynamic configuration can +be achieved by changing the file splitting script. + +*6. Use of a Hashing Column* + +This scheme involves the addition of a hash column (key/index) to the database tables +used to retrieve the driver record. This hash column has an indicator to determine which +instance of the batch application processes this particular row. For example, if there +are three batch instances to be started, then an indicator of 'A' marks a row for +processing by instance 1, an indicator of 'B' marks a row for processing by instance 2, +and an indicator of 'C' marks a row for processing by instance 3. + +The procedure used to retrieve the records would then have an additional `WHERE` clause +to select all rows marked by a particular indicator. The inserts in this table would +involve the addition of the marker field, which would be defaulted to one of the +instances (such as 'A'). + +A simple batch application would be used to update the indicators, such as to +redistribute the load between the different instances. When a sufficiently large number +of new rows have been added, this batch can be run (anytime, except in the batch window) +to redistribute the new rows to other instances. + +Additional instances of the batch application only require the running of the batch +application as described in the preceding paragraphs to redistribute the indicators to +work with a new number of instances. + +**4.2 Database and Application Design Principles** + +An architecture that supports multi-partitioned applications which run against +partitioned database tables using the key column approach should include a central +partition repository for storing partition parameters. This provides flexibility and +ensures maintainability. The repository generally consists of a single table, known as +the partition table. + +Information stored in the partition table is static and, in general, should be maintained +by the DBA. The table should consist of one row of information for each partition of a +multi-partitioned application. The table should have columns for Program ID Code, +Partition Number (logical ID of the partition), Low Value of the db key column for this +partition, and High Value of the db key column for this partition. + +On program start-up, the program `id` and partition number should be passed to the +application from the architecture (specifically, from the Control Processing Tasklet). If +a key column approach is used, these variables are used to read the partition table in +order to determine what range of data the application is to process. In addition the +partition number must be used throughout the processing to: + +* Add to the output files/database updates in order for the merge process to work + properly. + +* Report normal processing to the batch log and any errors to the architecture error + handler. + +**4.3 Minimizing Deadlocks** + +When applications run in parallel or are partitioned, contention in database resources +and deadlocks may occur. It is critical that the database design team eliminates +potential contention situations as much as possible as part of the database design. + +Also, the developers must ensure that the database index tables are designed with +deadlock prevention and performance in mind. + +Deadlocks or hot spots often occur in administration or architecture tables, such as log +tables, control tables, and lock tables. The implications of these should be taken into +account as well. A realistic stress test is crucial for identifying the possible +bottlenecks in the architecture. + +To minimize the impact of conflicts on data, the architecture should provide services +such as wait-and-retry intervals when attaching to a database or when encountering a +deadlock. This means a built-in mechanism to react to certain database return codes and, +instead of issuing an immediate error, waiting a predetermined amount of time and +retrying the database operation. + +**4.4 Parameter Passing and Validation** + +The partition architecture should be relatively transparent to application developers. +The architecture should perform all tasks associated with running the application in a +partitioned mode, including: + +* Retrieving partition parameters before application start-up. + +* Validating partition parameters before application start-up. + +* Passing parameters to the application at start-up. + +The validation should include checks to ensure that: + +* The application has sufficient partitions to cover the whole data range. + +* There are no gaps between partitions. + +If the database is partitioned, some additional validation may be necessary to ensure +that a single partition does not span database partitions. + +Also, the architecture should take into consideration the consolidation of partitions. +Key questions include: + +* Must all the partitions be finished before going into the next job step? + +* What happens if one of the partitions aborts? \ No newline at end of file diff --git a/docs/en/spring-batch/step.md b/docs/en/spring-batch/step.md new file mode 100644 index 0000000000000000000000000000000000000000..e301031bba80552a4fe2340860fbcbaf0e95526b --- /dev/null +++ b/docs/en/spring-batch/step.md @@ -0,0 +1,2194 @@ +# Configuring a Step + +## Configuring a `Step` + +XMLJavaBoth + +As discussed in [the domain chapter](domain.html#domainLanguageOfBatch), a `Step` is a +domain object that encapsulates an independent, sequential phase of a batch job and +contains all of the information necessary to define and control the actual batch +processing. This is a necessarily vague description because the contents of any given`Step` are at the discretion of the developer writing a `Job`. A `Step` can be as simple +or complex as the developer desires. A simple `Step` might load data from a file into the +database, requiring little or no code (depending upon the implementations used). A more +complex `Step` might have complicated business rules that are applied as part of the +processing, as shown in the following image: + +![Step](https://docs.spring.io/spring-batch/docs/current/reference/html/images/step.png) + +Figure 1. Step + +### Chunk-oriented Processing + +Spring Batch uses a 'Chunk-oriented' processing style within its most common +implementation. Chunk oriented processing refers to reading the data one at a time and +creating 'chunks' that are written out within a transaction boundary. Once the number of +items read equals the commit interval, the entire chunk is written out by the`ItemWriter`, and then the transaction is committed. The following image shows the +process: + +![Chunk Oriented Processing](https://docs.spring.io/spring-batch/docs/current/reference/html/images/chunk-oriented-processing.png) + +Figure 2. Chunk-oriented Processing + +The following pseudo code shows the same concepts in a simplified form: + +``` +List items = new Arraylist(); +for(int i = 0; i < commitInterval; i++){ + Object item = itemReader.read(); + if (item != null) { + items.add(item); + } +} +itemWriter.write(items); +``` + +A chunk-oriented step can also be configured with an optional `ItemProcessor`to process items before passing them to the `ItemWriter`. The following image +shows the process when an `ItemProcessor` is registered in the step: + +![Chunk Oriented Processing With Item Processor](https://docs.spring.io/spring-batch/docs/current/reference/html/images/chunk-oriented-processing-with-item-processor.png) + +Figure 3. Chunk-oriented Processing with Item Processor + +The following pseudo code shows how this is implemented in a simplified form: + +``` +List items = new Arraylist(); +for(int i = 0; i < commitInterval; i++){ + Object item = itemReader.read(); + if (item != null) { + items.add(item); + } +} + +List processedItems = new Arraylist(); +for(Object item: items){ + Object processedItem = itemProcessor.process(item); + if (processedItem != null) { + processedItems.add(processedItem); + } +} + +itemWriter.write(processedItems); +``` + +For more details about item processors and their use cases, please refer to the[Item processing](processor.html#itemProcessor) section. + +#### Configuring a `Step` + +Despite the relatively short list of required dependencies for a `Step`, it is an +extremely complex class that can potentially contain many collaborators. + +In order to ease configuration, the Spring Batch XML namespace can be used, as shown in +the following example: + +XML Configuration + +``` + + + + + + + +``` + +When using Java configuration, the Spring Batch builders can be used, as shown in the +following example: + +Java Configuration + +``` +/** + * Note the JobRepository is typically autowired in and not needed to be explicitly + * configured + */ +@Bean +public Job sampleJob(JobRepository jobRepository, Step sampleStep) { + return this.jobBuilderFactory.get("sampleJob") + .repository(jobRepository) + .start(sampleStep) + .build(); +} + +/** + * Note the TransactionManager is typically autowired in and not needed to be explicitly + * configured + */ +@Bean +public Step sampleStep(PlatformTransactionManager transactionManager) { + return this.stepBuilderFactory.get("sampleStep") + .transactionManager(transactionManager) + .chunk(10) + .reader(itemReader()) + .writer(itemWriter()) + .build(); +} +``` + +The configuration above includes the only required dependencies to create a item-oriented +step: + +* `reader`: The `ItemReader` that provides items for processing. + +* `writer`: The `ItemWriter` that processes the items provided by the `ItemReader`. + +* `transaction-manager`: Spring’s `PlatformTransactionManager` that begins and commits + transactions during processing. + +* `transactionManager`: Spring’s `PlatformTransactionManager` that begins and commits + transactions during processing. + +* `job-repository`: The XML-specific name of the `JobRepository` that periodically stores + the `StepExecution` and `ExecutionContext` during processing (just before committing). For + an in-line `` (one defined within a ``), it is an attribute on the ``element. For a standalone ``, it is defined as an attribute of the \. + +* `repository`: The Java-specific name of the `JobRepository` that periodically stores + the `StepExecution` and `ExecutionContext` during processing (just before committing). + +* `commit-interval`: The XML-specific name of the number of items to be processed + before the transaction is committed. + +* `chunk`: The Java-specific name of the dependency that indicates that this is an + item-based step and the number of items to be processed before the transaction is + committed. + +It should be noted that `job-repository` defaults to `jobRepository` and`transaction-manager` defaults to `transactionManager`. Also, the `ItemProcessor` is +optional, since the item could be directly passed from the reader to the writer. + +It should be noted that `repository` defaults to `jobRepository` and `transactionManager`defaults to `transactionManager` (all provided through the infrastructure from`@EnableBatchProcessing`). Also, the `ItemProcessor` is optional, since the item could be +directly passed from the reader to the writer. + +#### Inheriting from a Parent `Step` + +If a group of `Steps` share similar configurations, then it may be helpful to define a +"parent" `Step` from which the concrete `Steps` may inherit properties. Similar to class +inheritance in Java, the "child" `Step` combines its elements and attributes with the +parent’s. The child also overrides any of the parent’s `Steps`. + +In the following example, the `Step`, "concreteStep1", inherits from "parentStep". It is +instantiated with 'itemReader', 'itemProcessor', 'itemWriter', `startLimit=5`, and`allowStartIfComplete=true`. Additionally, the `commitInterval` is '5', since it is +overridden by the "concreteStep1" `Step`, as shown in the following example: + +``` + + + + + + + + + + + +``` + +The `id` attribute is still required on the step within the job element. This is for two +reasons: + +* The `id` is used as the step name when persisting the `StepExecution`. If the same + standalone step is referenced in more than one step in the job, an error occurs. + +* When creating job flows, as described later in this chapter, the `next` attribute + should be referring to the step in the flow, not the standalone step. + +##### Abstract `Step` + +Sometimes, it may be necessary to define a parent `Step` that is not a complete `Step`configuration. If, for instance, the `reader`, `writer`, and `tasklet` attributes are +left off of a `Step` configuration, then initialization fails. If a parent must be +defined without these properties, then the `abstract` attribute should be used. An`abstract` `Step` is only extended, never instantiated. + +In the following example, the `Step` `abstractParentStep` would not be instantiated if it +were not declared to be abstract. The `Step`, "concreteStep2", has 'itemReader', +'itemWriter', and commit-interval=10. + +``` + + + + + + + + + + + +``` + +##### Merging Lists + +Some of the configurable elements on `Steps` are lists, such as the `` element. +If both the parent and child `Steps` declare a `` element, then the +child’s list overrides the parent’s. In order to allow a child to add additional +listeners to the list defined by the parent, every list element has a `merge` attribute. +If the element specifies that `merge="true"`, then the child’s list is combined with the +parent’s instead of overriding it. + +In the following example, the `Step` "concreteStep3", is created with two listeners:`listenerOne` and `listenerTwo`: + +``` + + + + + + + + + + + + + + +``` + +#### The Commit Interval + +As mentioned previously, a step reads in and writes out items, periodically committing +using the supplied `PlatformTransactionManager`. With a `commit-interval` of 1, it +commits after writing each individual item. This is less than ideal in many situations, +since beginning and committing a transaction is expensive. Ideally, it is preferable to +process as many items as possible in each transaction, which is completely dependent upon +the type of data being processed and the resources with which the step is interacting. +For this reason, the number of items that are processed within a commit can be +configured. + +The following example shows a `step` whose `tasklet` has a `commit-interval`value of 10 as it would be defined in XML: + +XML Configuration + +``` + + + + + + + +``` + +The following example shows a `step` whose `tasklet` has a `commit-interval`value of 10 as it would be defined in Java: + +Java Configuration + +``` +@Bean +public Job sampleJob() { + return this.jobBuilderFactory.get("sampleJob") + .start(step1()) + .build(); +} + +@Bean +public Step step1() { + return this.stepBuilderFactory.get("step1") + .chunk(10) + .reader(itemReader()) + .writer(itemWriter()) + .build(); +} +``` + +In the preceding example, 10 items are processed within each transaction. At the +beginning of processing, a transaction is begun. Also, each time `read` is called on the`ItemReader`, a counter is incremented. When it reaches 10, the list of aggregated items +is passed to the `ItemWriter`, and the transaction is committed. + +#### Configuring a `Step` for Restart + +In the "[Configuring and Running a Job](job.html#configureJob)" section , restarting a`Job` was discussed. Restart has numerous impacts on steps, and, consequently, may +require some specific configuration. + +##### Setting a Start Limit + +There are many scenarios where you may want to control the number of times a `Step` may +be started. For example, a particular `Step` might need to be configured so that it only +runs once because it invalidates some resource that must be fixed manually before it can +be run again. This is configurable on the step level, since different steps may have +different requirements. A `Step` that may only be executed once can exist as part of the +same `Job` as a `Step` that can be run infinitely. + +The following code fragment shows an example of a start limit configuration in XML: + +XML Configuration + +``` + + + + + +``` + +The following code fragment shows an example of a start limit configuration in Java: + +Java Configuration + +``` +@Bean +public Step step1() { + return this.stepBuilderFactory.get("step1") + .chunk(10) + .reader(itemReader()) + .writer(itemWriter()) + .startLimit(1) + .build(); +} +``` + +The step shown in the preceding example can be run only once. Attempting to run it again +causes a `StartLimitExceededException` to be thrown. Note that the default value for the +start-limit is `Integer.MAX_VALUE`. + +##### Restarting a Completed `Step` + +In the case of a restartable job, there may be one or more steps that should always be +run, regardless of whether or not they were successful the first time. An example might +be a validation step or a `Step` that cleans up resources before processing. During +normal processing of a restarted job, any step with a status of 'COMPLETED', meaning it +has already been completed successfully, is skipped. Setting `allow-start-if-complete` to +"true" overrides this so that the step always runs. + +The following code fragment shows how to define a restartable job in XML: + +XML Configuration + +``` + + + + + +``` + +The following code fragment shows how to define a restartable job in Java: + +Java Configuration + +``` +@Bean +public Step step1() { + return this.stepBuilderFactory.get("step1") + .chunk(10) + .reader(itemReader()) + .writer(itemWriter()) + .allowStartIfComplete(true) + .build(); +} +``` + +##### `Step` Restart Configuration Example + +The following XML example shows how to configure a job to have steps that can be +restarted: + +XML Configuration + +``` + + + + + + + + + + + + + + + + + +``` + +The following Java example shows how to configure a job to have steps that can be +restarted: + +Java Configuration + +``` +@Bean +public Job footballJob() { + return this.jobBuilderFactory.get("footballJob") + .start(playerLoad()) + .next(gameLoad()) + .next(playerSummarization()) + .build(); +} + +@Bean +public Step playerLoad() { + return this.stepBuilderFactory.get("playerLoad") + .chunk(10) + .reader(playerFileItemReader()) + .writer(playerWriter()) + .build(); +} + +@Bean +public Step gameLoad() { + return this.stepBuilderFactory.get("gameLoad") + .allowStartIfComplete(true) + .chunk(10) + .reader(gameFileItemReader()) + .writer(gameWriter()) + .build(); +} + +@Bean +public Step playerSummarization() { + return this.stepBuilderFactory.get("playerSummarization") + .startLimit(2) + .chunk(10) + .reader(playerSummarizationSource()) + .writer(summaryWriter()) + .build(); +} +``` + +The preceding example configuration is for a job that loads in information about football +games and summarizes them. It contains three steps: `playerLoad`, `gameLoad`, and`playerSummarization`. The `playerLoad` step loads player information from a flat file, +while the `gameLoad` step does the same for games. The final step,`playerSummarization`, then summarizes the statistics for each player, based upon the +provided games. It is assumed that the file loaded by `playerLoad` must be loaded only +once, but that `gameLoad` can load any games found within a particular directory, +deleting them after they have been successfully loaded into the database. As a result, +the `playerLoad` step contains no additional configuration. It can be started any number +of times, and, if complete, is skipped. The `gameLoad` step, however, needs to be run +every time in case extra files have been added since it last ran. It has +'allow-start-if-complete' set to 'true' in order to always be started. (It is assumed +that the database table games are loaded into has a process indicator on it, to ensure +new games can be properly found by the summarization step). The summarization step, +which is the most important in the job, is configured to have a start limit of 2. This +is useful because if the step continually fails, a new exit code is returned to the +operators that control job execution, and it can not start again until manual +intervention has taken place. + +| |This job provides an example for this document and is not the same as the `footballJob`found in the samples project.| +|---|--------------------------------------------------------------------------------------------------------------------| + +The remainder of this section describes what happens for each of the three runs of the`footballJob` example. + +Run 1: + +1. `playerLoad` runs and completes successfully, adding 400 players to the 'PLAYERS' + table. + +2. `gameLoad` runs and processes 11 files worth of game data, loading their contents + into the 'GAMES' table. + +3. `playerSummarization` begins processing and fails after 5 minutes. + +Run 2: + +1. `playerLoad` does not run, since it has already completed successfully, and`allow-start-if-complete` is 'false' (the default). + +2. `gameLoad` runs again and processes another 2 files, loading their contents into the + 'GAMES' table as well (with a process indicator indicating they have yet to be + processed). + +3. `playerSummarization` begins processing of all remaining game data (filtering using the + process indicator) and fails again after 30 minutes. + +Run 3: + +1. `playerLoad` does not run, since it has already completed successfully, and`allow-start-if-complete` is 'false' (the default). + +2. `gameLoad` runs again and processes another 2 files, loading their contents into the + 'GAMES' table as well (with a process indicator indicating they have yet to be + processed). + +3. `playerSummarization` is not started and the job is immediately killed, since this is + the third execution of `playerSummarization`, and its limit is only 2. Either the limit + must be raised or the `Job` must be executed as a new `JobInstance`. + +#### Configuring Skip Logic + +There are many scenarios where errors encountered while processing should not result in`Step` failure, but should be skipped instead. This is usually a decision that must be +made by someone who understands the data itself and what meaning it has. Financial data, +for example, may not be skippable because it results in money being transferred, which +needs to be completely accurate. Loading a list of vendors, on the other hand, might +allow for skips. If a vendor is not loaded because it was formatted incorrectly or was +missing necessary information, then there probably are not issues. Usually, these bad +records are logged as well, which is covered later when discussing listeners. + +The following XML example shows an example of using a skip limit: + +XML Configuration + +``` + + + + + + + + + +``` + +The following Java example shows an example of using a skip limit: + +Java Configuration + +``` +@Bean +public Step step1() { + return this.stepBuilderFactory.get("step1") + .chunk(10) + .reader(flatFileItemReader()) + .writer(itemWriter()) + .faultTolerant() + .skipLimit(10) + .skip(FlatFileParseException.class) + .build(); +} +``` + +In the preceding example, a `FlatFileItemReader` is used. If, at any point, a`FlatFileParseException` is thrown, the item is skipped and counted against the total +skip limit of 10. Exceptions (and their subclasses) that are declared might be thrown +during any phase of the chunk processing (read, process, write) but separate counts +are made of skips on read, process, and write inside +the step execution, but the limit applies across all skips. Once the skip limit is +reached, the next exception found causes the step to fail. In other words, the eleventh +skip triggers the exception, not the tenth. + +One problem with the preceding example is that any other exception besides a`FlatFileParseException` causes the `Job` to fail. In certain scenarios, this may be the +correct behavior. However, in other scenarios, it may be easier to identify which +exceptions should cause failure and skip everything else. + +The following XML example shows an example excluding a particular exception: + +XML Configuration + +``` + + + + + + + + + + +``` + +The following Java example shows an example excluding a particular exception: + +Java Configuration + +``` +@Bean +public Step step1() { + return this.stepBuilderFactory.get("step1") + .chunk(10) + .reader(flatFileItemReader()) + .writer(itemWriter()) + .faultTolerant() + .skipLimit(10) + .skip(Exception.class) + .noSkip(FileNotFoundException.class) + .build(); +} +``` + +By identifying `java.lang.Exception` as a skippable exception class, the configuration +indicates that all `Exceptions` are skippable. However, by 'excluding'`java.io.FileNotFoundException`, the configuration refines the list of skippable +exception classes to be all `Exceptions` *except* `FileNotFoundException`. Any excluded +exception classes is fatal if encountered (that is, they are not skipped). + +For any exception encountered, the skippability is determined by the nearest superclass +in the class hierarchy. Any unclassified exception is treated as 'fatal'. + +The order of the `` and `` elements does not matter. + +The order of the `skip` and `noSkip` method calls does not matter. + +#### Configuring Retry Logic + +In most cases, you want an exception to cause either a skip or a `Step` failure. However, +not all exceptions are deterministic. If a `FlatFileParseException` is encountered while +reading, it is always thrown for that record. Resetting the `ItemReader` does not help. +However, for other exceptions, such as a `DeadlockLoserDataAccessException`, which +indicates that the current process has attempted to update a record that another process +holds a lock on. Waiting and trying again might result in success. + +In XML, retry should be configured as follows: + +``` + + + + + + + + + +``` + +In Java, retry should be configured as follows: + +``` +@Bean +public Step step1() { + return this.stepBuilderFactory.get("step1") + .chunk(2) + .reader(itemReader()) + .writer(itemWriter()) + .faultTolerant() + .retryLimit(3) + .retry(DeadlockLoserDataAccessException.class) + .build(); +} +``` + +The `Step` allows a limit for the number of times an individual item can be retried and a +list of exceptions that are 'retryable'. More details on how retry works can be found in[retry](retry.html#retry). + +#### Controlling Rollback + +By default, regardless of retry or skip, any exceptions thrown from the `ItemWriter`cause the transaction controlled by the `Step` to rollback. If skip is configured as +described earlier, exceptions thrown from the `ItemReader` do not cause a rollback. +However, there are many scenarios in which exceptions thrown from the `ItemWriter` should +not cause a rollback, because no action has taken place to invalidate the transaction. +For this reason, the `Step` can be configured with a list of exceptions that should not +cause rollback. + +In XML, you can control rollback as follows: + +XML Configuration + +``` + + + + + + + + +``` + +In Java, you can control rollback as follows: + +Java Configuration + +``` +@Bean +public Step step1() { + return this.stepBuilderFactory.get("step1") + .chunk(2) + .reader(itemReader()) + .writer(itemWriter()) + .faultTolerant() + .noRollback(ValidationException.class) + .build(); +} +``` + +##### Transactional Readers + +The basic contract of the `ItemReader` is that it is forward only. The step buffers +reader input, so that in the case of a rollback, the items do not need to be re-read +from the reader. However, there are certain scenarios in which the reader is built on +top of a transactional resource, such as a JMS queue. In this case, since the queue is +tied to the transaction that is rolled back, the messages that have been pulled from the +queue are put back on. For this reason, the step can be configured to not buffer the +items. + +The following example shows how to create reader that does not buffer items in XML: + +XML Configuration + +``` + + + + + +``` + +The following example shows how to create reader that does not buffer items in Java: + +Java Configuration + +``` +@Bean +public Step step1() { + return this.stepBuilderFactory.get("step1") + .chunk(2) + .reader(itemReader()) + .writer(itemWriter()) + .readerIsTransactionalQueue() + .build(); +} +``` + +#### Transaction Attributes + +Transaction attributes can be used to control the `isolation`, `propagation`, and`timeout` settings. More information on setting transaction attributes can be found in +the[Spring +core documentation](https://docs.spring.io/spring/docs/current/spring-framework-reference/data-access.html#transaction). + +The following example sets the `isolation`, `propagation`, and `timeout` transaction +attributes in XML: + +XML Configuration + +``` + + + + + + +``` + +The following example sets the `isolation`, `propagation`, and `timeout` transaction +attributes in Java: + +Java Configuration + +``` +@Bean +public Step step1() { + DefaultTransactionAttribute attribute = new DefaultTransactionAttribute(); + attribute.setPropagationBehavior(Propagation.REQUIRED.value()); + attribute.setIsolationLevel(Isolation.DEFAULT.value()); + attribute.setTimeout(30); + + return this.stepBuilderFactory.get("step1") + .chunk(2) + .reader(itemReader()) + .writer(itemWriter()) + .transactionAttribute(attribute) + .build(); +} +``` + +#### Registering `ItemStream` with a `Step` + +The step has to take care of `ItemStream` callbacks at the necessary points in its +lifecycle (For more information on the `ItemStream` interface, see[ItemStream](readersAndWriters.html#itemStream)). This is vital if a step fails and might +need to be restarted, because the `ItemStream` interface is where the step gets the +information it needs about persistent state between executions. + +If the `ItemReader`, `ItemProcessor`, or `ItemWriter` itself implements the `ItemStream`interface, then these are registered automatically. Any other streams need to be +registered separately. This is often the case where indirect dependencies, such as +delegates, are injected into the reader and writer. A stream can be registered on the`step` through the 'stream' element. + +The following example shows how to register a `stream` on a `step` in XML: + +XML Configuration + +``` + + + + + + + + + + + + + + + + + + + +``` + +The following example shows how to register a `stream` on a `step` in Java: + +Java Configuration + +``` +@Bean +public Step step1() { + return this.stepBuilderFactory.get("step1") + .chunk(2) + .reader(itemReader()) + .writer(compositeItemWriter()) + .stream(fileItemWriter1()) + .stream(fileItemWriter2()) + .build(); +} + +/** + * In Spring Batch 4, the CompositeItemWriter implements ItemStream so this isn't + * necessary, but used for an example. + */ +@Bean +public CompositeItemWriter compositeItemWriter() { + List writers = new ArrayList<>(2); + writers.add(fileItemWriter1()); + writers.add(fileItemWriter2()); + + CompositeItemWriter itemWriter = new CompositeItemWriter(); + + itemWriter.setDelegates(writers); + + return itemWriter; +} +``` + +In the example above, the `CompositeItemWriter` is not an `ItemStream`, but both of its +delegates are. Therefore, both delegate writers must be explicitly registered as streams +in order for the framework to handle them correctly. The `ItemReader` does not need to be +explicitly registered as a stream because it is a direct property of the `Step`. The step +is now restartable, and the state of the reader and writer is correctly persisted in the +event of a failure. + +#### Intercepting `Step` Execution + +Just as with the `Job`, there are many events during the execution of a `Step` where a +user may need to perform some functionality. For example, in order to write out to a flat +file that requires a footer, the `ItemWriter` needs to be notified when the `Step` has +been completed, so that the footer can be written. This can be accomplished with one of many`Step` scoped listeners. + +Any class that implements one of the extensions of `StepListener` (but not that interface +itself since it is empty) can be applied to a step through the `listeners` element. +The `listeners` element is valid inside a step, tasklet, or chunk declaration. It is +recommended that you declare the listeners at the level at which its function applies, +or, if it is multi-featured (such as `StepExecutionListener` and `ItemReadListener`), +then declare it at the most granular level where it applies. + +The following example shows a listener applied at the chunk level in XML: + +XML Configuration + +``` + + + + + + + + +``` + +The following example shows a listener applied at the chunk level in Java: + +Java Configuration + +``` +@Bean +public Step step1() { + return this.stepBuilderFactory.get("step1") + .chunk(10) + .reader(reader()) + .writer(writer()) + .listener(chunkListener()) + .build(); +} +``` + +An `ItemReader`, `ItemWriter` or `ItemProcessor` that itself implements one of the`StepListener` interfaces is registered automatically with the `Step` if using the +namespace `` element or one of the `*StepFactoryBean` factories. This only +applies to components directly injected into the `Step`. If the listener is nested inside +another component, it needs to be explicitly registered (as described previously under[Registering `ItemStream` with a `Step`](#registeringItemStreams)). + +In addition to the `StepListener` interfaces, annotations are provided to address the +same concerns. Plain old Java objects can have methods with these annotations that are +then converted into the corresponding `StepListener` type. It is also common to annotate +custom implementations of chunk components such as `ItemReader` or `ItemWriter` or`Tasklet`. The annotations are analyzed by the XML parser for the `` elements +as well as registered with the `listener` methods in the builders, so all you need to do +is use the XML namespace or builders to register the listeners with a step. + +##### `StepExecutionListener` + +`StepExecutionListener` represents the most generic listener for `Step` execution. It +allows for notification before a `Step` is started and after it ends, whether it ended +normally or failed, as shown in the following example: + +``` +public interface StepExecutionListener extends StepListener { + + void beforeStep(StepExecution stepExecution); + + ExitStatus afterStep(StepExecution stepExecution); + +} +``` + +`ExitStatus` is the return type of `afterStep` in order to allow listeners the chance to +modify the exit code that is returned upon completion of a `Step`. + +The annotations corresponding to this interface are: + +* `@BeforeStep` + +* `@AfterStep` + +##### `ChunkListener` + +A chunk is defined as the items processed within the scope of a transaction. Committing a +transaction, at each commit interval, commits a 'chunk'. A `ChunkListener` can be used to +perform logic before a chunk begins processing or after a chunk has completed +successfully, as shown in the following interface definition: + +``` +public interface ChunkListener extends StepListener { + + void beforeChunk(ChunkContext context); + void afterChunk(ChunkContext context); + void afterChunkError(ChunkContext context); + +} +``` + +The beforeChunk method is called after the transaction is started but before read is +called on the `ItemReader`. Conversely, `afterChunk` is called after the chunk has been +committed (and not at all if there is a rollback). + +The annotations corresponding to this interface are: + +* `@BeforeChunk` + +* `@AfterChunk` + +* `@AfterChunkError` + +A `ChunkListener` can be applied when there is no chunk declaration. The `TaskletStep` is +responsible for calling the `ChunkListener`, so it applies to a non-item-oriented tasklet +as well (it is called before and after the tasklet). + +##### `ItemReadListener` + +When discussing skip logic previously, it was mentioned that it may be beneficial to log +the skipped records, so that they can be dealt with later. In the case of read errors, +this can be done with an `ItemReaderListener`, as shown in the following interface +definition: + +``` +public interface ItemReadListener extends StepListener { + + void beforeRead(); + void afterRead(T item); + void onReadError(Exception ex); + +} +``` + +The `beforeRead` method is called before each call to read on the `ItemReader`. The`afterRead` method is called after each successful call to read and is passed the item +that was read. If there was an error while reading, the `onReadError` method is called. +The exception encountered is provided so that it can be logged. + +The annotations corresponding to this interface are: + +* `@BeforeRead` + +* `@AfterRead` + +* `@OnReadError` + +##### `ItemProcessListener` + +Just as with the `ItemReadListener`, the processing of an item can be 'listened' to, as +shown in the following interface definition: + +``` +public interface ItemProcessListener extends StepListener { + + void beforeProcess(T item); + void afterProcess(T item, S result); + void onProcessError(T item, Exception e); + +} +``` + +The `beforeProcess` method is called before `process` on the `ItemProcessor` and is +handed the item that is to be processed. The `afterProcess` method is called after the +item has been successfully processed. If there was an error while processing, the`onProcessError` method is called. The exception encountered and the item that was +attempted to be processed are provided, so that they can be logged. + +The annotations corresponding to this interface are: + +* `@BeforeProcess` + +* `@AfterProcess` + +* `@OnProcessError` + +##### `ItemWriteListener` + +The writing of an item can be 'listened' to with the `ItemWriteListener`, as shown in the +following interface definition: + +``` +public interface ItemWriteListener extends StepListener { + + void beforeWrite(List items); + void afterWrite(List items); + void onWriteError(Exception exception, List items); + +} +``` + +The `beforeWrite` method is called before `write` on the `ItemWriter` and is handed the +list of items that is written. The `afterWrite` method is called after the item has been +successfully written. If there was an error while writing, the `onWriteError` method is +called. The exception encountered and the item that was attempted to be written are +provided, so that they can be logged. + +The annotations corresponding to this interface are: + +* `@BeforeWrite` + +* `@AfterWrite` + +* `@OnWriteError` + +##### `SkipListener` + +`ItemReadListener`, `ItemProcessListener`, and `ItemWriteListener` all provide mechanisms +for being notified of errors, but none informs you that a record has actually been +skipped. `onWriteError`, for example, is called even if an item is retried and +successful. For this reason, there is a separate interface for tracking skipped items, as +shown in the following interface definition: + +``` +public interface SkipListener extends StepListener { + + void onSkipInRead(Throwable t); + void onSkipInProcess(T item, Throwable t); + void onSkipInWrite(S item, Throwable t); + +} +``` + +`onSkipInRead` is called whenever an item is skipped while reading. It should be noted +that rollbacks may cause the same item to be registered as skipped more than once.`onSkipInWrite` is called when an item is skipped while writing. Because the item has +been read successfully (and not skipped), it is also provided the item itself as an +argument. + +The annotations corresponding to this interface are: + +* `@OnSkipInRead` + +* `@OnSkipInWrite` + +* `@OnSkipInProcess` + +###### SkipListeners and Transactions + +One of the most common use cases for a `SkipListener` is to log out a skipped item, so +that another batch process or even human process can be used to evaluate and fix the +issue leading to the skip. Because there are many cases in which the original transaction +may be rolled back, Spring Batch makes two guarantees: + +1. The appropriate skip method (depending on when the error happened) is called only once + per item. + +2. The `SkipListener` is always called just before the transaction is committed. This is + to ensure that any transactional resources call by the listener are not rolled back by a + failure within the `ItemWriter`. + +### `TaskletStep` + +[Chunk-oriented processing](#chunkOrientedProcessing) is not the only way to process in a`Step`. What if a `Step` must consist of a simple stored procedure call? You could +implement the call as an `ItemReader` and return null after the procedure finishes. +However, doing so is a bit unnatural, since there would need to be a no-op `ItemWriter`. +Spring Batch provides the `TaskletStep` for this scenario. + +`Tasklet` is a simple interface that has one method, `execute`, which is called +repeatedly by the `TaskletStep` until it either returns `RepeatStatus.FINISHED` or throws +an exception to signal a failure. Each call to a `Tasklet` is wrapped in a transaction.`Tasklet` implementors might call a stored procedure, a script, or a simple SQL update +statement. + +To create a `TaskletStep` in XML, the 'ref' attribute of the `` element should +reference a bean that defines a `Tasklet` object. No `` element should be used +within the ``. The following example shows a simple tasklet: + +``` + + + +``` + +To create a `TaskletStep` in Java, the bean passed to the `tasklet` method of the builder +should implement the `Tasklet` interface. No call to `chunk` should be called when +building a `TaskletStep`. The following example shows a simple tasklet: + +``` +@Bean +public Step step1() { + return this.stepBuilderFactory.get("step1") + .tasklet(myTasklet()) + .build(); +} +``` + +| |`TaskletStep` automatically registers the
tasklet as a `StepListener` if it implements the `StepListener`interface.| +|---|-----------------------------------------------------------------------------------------------------------------------| + +#### `TaskletAdapter` + +As with other adapters for the `ItemReader` and `ItemWriter` interfaces, the `Tasklet`interface contains an implementation that allows for adapting itself to any pre-existing +class: `TaskletAdapter`. An example where this may be useful is an existing DAO that is +used to update a flag on a set of records. The `TaskletAdapter` can be used to call this +class without having to write an adapter for the `Tasklet` interface. + +The following example shows how to define a `TaskletAdapter` in XML: + +XML Configuration + +``` + + + + + + +``` + +The following example shows how to define a `TaskletAdapter` in Java: + +Java Configuration + +``` +@Bean +public MethodInvokingTaskletAdapter myTasklet() { + MethodInvokingTaskletAdapter adapter = new MethodInvokingTaskletAdapter(); + + adapter.setTargetObject(fooDao()); + adapter.setTargetMethod("updateFoo"); + + return adapter; +} +``` + +#### Example `Tasklet` Implementation + +Many batch jobs contain steps that must be done before the main processing begins in +order to set up various resources or after processing has completed to cleanup those +resources. In the case of a job that works heavily with files, it is often necessary to +delete certain files locally after they have been uploaded successfully to another +location. The following example (taken from the[Spring +Batch samples project](https://github.com/spring-projects/spring-batch/tree/master/spring-batch-samples)) is a `Tasklet` implementation with just such a responsibility: + +``` +public class FileDeletingTasklet implements Tasklet, InitializingBean { + + private Resource directory; + + public RepeatStatus execute(StepContribution contribution, + ChunkContext chunkContext) throws Exception { + File dir = directory.getFile(); + Assert.state(dir.isDirectory()); + + File[] files = dir.listFiles(); + for (int i = 0; i < files.length; i++) { + boolean deleted = files[i].delete(); + if (!deleted) { + throw new UnexpectedJobExecutionException("Could not delete file " + + files[i].getPath()); + } + } + return RepeatStatus.FINISHED; + } + + public void setDirectoryResource(Resource directory) { + this.directory = directory; + } + + public void afterPropertiesSet() throws Exception { + Assert.notNull(directory, "directory must be set"); + } +} +``` + +The preceding `tasklet` implementation deletes all files within a given directory. It +should be noted that the `execute` method is called only once. All that is left is to +reference the `tasklet` from the `step`. + +The following example shows how to reference the `tasklet` from the `step` in XML: + +XML Configuration + +``` + + + + + + + + + + + + + +``` + +The following example shows how to reference the `tasklet` from the `step` in Java: + +Java Configuration + +``` +@Bean +public Job taskletJob() { + return this.jobBuilderFactory.get("taskletJob") + .start(deleteFilesInDir()) + .build(); +} + +@Bean +public Step deleteFilesInDir() { + return this.stepBuilderFactory.get("deleteFilesInDir") + .tasklet(fileDeletingTasklet()) + .build(); +} + +@Bean +public FileDeletingTasklet fileDeletingTasklet() { + FileDeletingTasklet tasklet = new FileDeletingTasklet(); + + tasklet.setDirectoryResource(new FileSystemResource("target/test-outputs/test-dir")); + + return tasklet; +} +``` + +### Controlling Step Flow + +With the ability to group steps together within an owning job comes the need to be able +to control how the job "flows" from one step to another. The failure of a `Step` does not +necessarily mean that the `Job` should fail. Furthermore, there may be more than one type +of 'success' that determines which `Step` should be executed next. Depending upon how a +group of `Steps` is configured, certain steps may not even be processed at all. + +#### Sequential Flow + +The simplest flow scenario is a job where all of the steps execute sequentially, as shown +in the following image: + +![Sequential Flow](https://docs.spring.io/spring-batch/docs/current/reference/html/images/sequential-flow.png) + +Figure 4. Sequential Flow + +This can be achieved by using the 'next' in a `step`. + +The following example shows how to use the `next` attribute in XML: + +XML Configuration + +``` + + + + + +``` + +The following example shows how to use the `next()` method in Java: + +Java Configuration + +``` +@Bean +public Job job() { + return this.jobBuilderFactory.get("job") + .start(stepA()) + .next(stepB()) + .next(stepC()) + .build(); +} +``` + +In the scenario above, 'step A' runs first because it is the first `Step` listed. If +'step A' completes normally, then 'step B' runs, and so on. However, if 'step A' fails, +then the entire `Job` fails and 'step B' does not execute. + +| |With the Spring Batch XML namespace, the first step listed in the configuration is*always* the first step run by the `Job`. The order of the other step elements does not
matter, but the first step must always appear first in the xml.| +|---|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +#### Conditional Flow + +In the example above, there are only two possibilities: + +1. The `step` is successful and the next `step` should be executed. + +2. The `step` failed and, thus, the `job` should fail. + +In many cases, this may be sufficient. However, what about a scenario in which the +failure of a `step` should trigger a different `step`, rather than causing failure? The +following image shows such a flow: + +![Conditional Flow](https://docs.spring.io/spring-batch/docs/current/reference/html/images/conditional-flow.png) + +Figure 5. Conditional Flow + +In order to handle more complex scenarios, the Spring Batch XML namespace allows transitions +elements to be defined within the step element. One such transition is the `next`element. Like the `next` attribute, the `next` element tells the `Job` which `Step` to +execute next. However, unlike the attribute, any number of `next` elements are allowed on +a given `Step`, and there is no default behavior in the case of failure. This means that, if +transition elements are used, then all of the behavior for the `Step` transitions must be +defined explicitly. Note also that a single step cannot have both a `next` attribute and +a `transition` element. + +The `next` element specifies a pattern to match and the step to execute next, as shown in +the following example: + +XML Configuration + +``` + + + + + + + + +``` + +The Java API offers a fluent set of methods that let you specify the flow and what to do +when a step fails. The following example shows how to specify one step (`stepA`) and then +proceed to either of two different steps (`stepB` and `stepC`), depending on whether`stepA` succeeds: + +Java Configuration + +``` +@Bean +public Job job() { + return this.jobBuilderFactory.get("job") + .start(stepA()) + .on("*").to(stepB()) + .from(stepA()).on("FAILED").to(stepC()) + .end() + .build(); +} +``` + +When using XML configuration, the `on` attribute of a transition element uses a simple +pattern-matching scheme to match the `ExitStatus` that results from the execution of the`Step`. + +When using java configuration, the `on()` method uses a simple pattern-matching scheme to +match the `ExitStatus` that results from the execution of the `Step`. + +Only two special characters are allowed in the pattern: + +* "\*" matches zero or more characters + +* "?" matches exactly one character + +For example, "c\*t" matches "cat" and "count", while "c?t" matches "cat" but not "count". + +While there is no limit to the number of transition elements on a `Step`, if the `Step`execution results in an `ExitStatus` that is not covered by an element, then the +framework throws an exception and the `Job` fails. The framework automatically orders +transitions from most specific to least specific. This means that, even if the ordering +were swapped for "stepA" in the example above, an `ExitStatus` of "FAILED" would still go +to "stepC". + +##### Batch Status Versus Exit Status + +When configuring a `Job` for conditional flow, it is important to understand the +difference between `BatchStatus` and `ExitStatus`. `BatchStatus` is an enumeration that +is a property of both `JobExecution` and `StepExecution` and is used by the framework to +record the status of a `Job` or `Step`. It can be one of the following values:`COMPLETED`, `STARTING`, `STARTED`, `STOPPING`, `STOPPED`, `FAILED`, `ABANDONED`, or`UNKNOWN`. Most of them are self explanatory: `COMPLETED` is the status set when a step +or job has completed successfully, `FAILED` is set when it fails, and so on. + +The following example contains the 'next' element when using XML configuration: + +``` + +``` + +The following example contains the 'on' element when using Java Configuration: + +``` +... +.from(stepA()).on("FAILED").to(stepB()) +... +``` + +At first glance, it would appear that 'on' references the `BatchStatus` of the `Step` to +which it belongs. However, it actually references the `ExitStatus` of the `Step`. As the +name implies, `ExitStatus` represents the status of a `Step` after it finishes execution. + +More specifically, when using XML configuration, the 'next' element shown in the +preceding XML configuration example references the exit code of `ExitStatus`. + +When using Java configuration, the 'on()' method shown in the preceding +Java configuration example references the exit code of `ExitStatus`. + +In English, it says: "go to stepB if the exit code is `FAILED` ". By default, the exit +code is always the same as the `BatchStatus` for the `Step`, which is why the entry above +works. However, what if the exit code needs to be different? A good example comes from +the skip sample job within the samples project: + +The following example shows how to work with a different exit code in XML: + +XML Configuration + +``` + + + + + +``` + +The following example shows how to work with a different exit code in Java: + +Java Configuration + +``` +@Bean +public Job job() { + return this.jobBuilderFactory.get("job") + .start(step1()).on("FAILED").end() + .from(step1()).on("COMPLETED WITH SKIPS").to(errorPrint1()) + .from(step1()).on("*").to(step2()) + .end() + .build(); +} +``` + +`step1` has three possibilities: + +1. The `Step` failed, in which case the job should fail. + +2. The `Step` completed successfully. + +3. The `Step` completed successfully but with an exit code of 'COMPLETED WITH SKIPS'. In + this case, a different step should be run to handle the errors. + +The preceding configuration works. However, something needs to change the exit code based on +the condition of the execution having skipped records, as shown in the following example: + +``` +public class SkipCheckingListener extends StepExecutionListenerSupport { + public ExitStatus afterStep(StepExecution stepExecution) { + String exitCode = stepExecution.getExitStatus().getExitCode(); + if (!exitCode.equals(ExitStatus.FAILED.getExitCode()) && + stepExecution.getSkipCount() > 0) { + return new ExitStatus("COMPLETED WITH SKIPS"); + } + else { + return null; + } + } +} +``` + +The above code is a `StepExecutionListener` that first checks to make sure the `Step` was +successful and then checks to see if the skip count on the `StepExecution` is higher than +0. If both conditions are met, a new `ExitStatus` with an exit code of`COMPLETED WITH SKIPS` is returned. + +#### Configuring for Stop + +After the discussion of [BatchStatus and ExitStatus](#batchStatusVsExitStatus), +one might wonder how the `BatchStatus` and `ExitStatus` are determined for the `Job`. +While these statuses are determined for the `Step` by the code that is executed, the +statuses for the `Job` are determined based on the configuration. + +So far, all of the job configurations discussed have had at least one final `Step` with +no transitions. + +In the following XML example, after the `step` executes, the `Job` ends: + +``` + +``` + +In the following Java example, after the `step` executes, the `Job` ends: + +``` +@Bean +public Job job() { + return this.jobBuilderFactory.get("job") + .start(step1()) + .build(); +} +``` + +If no transitions are defined for a `Step`, then the status of the `Job` is defined as +follows: + +* If the `Step` ends with `ExitStatus` FAILED, then the `BatchStatus` and `ExitStatus` of + the `Job` are both `FAILED`. + +* Otherwise, the `BatchStatus` and `ExitStatus` of the `Job` are both `COMPLETED`. + +While this method of terminating a batch job is sufficient for some batch jobs, such as a +simple sequential step job, custom defined job-stopping scenarios may be required. For +this purpose, Spring Batch provides three transition elements to stop a `Job` (in +addition to the [`next` element](#nextElement) that we discussed previously). +Each of these stopping elements stops a `Job` with a particular `BatchStatus`. It is +important to note that the stop transition elements have no effect on either the`BatchStatus` or `ExitStatus` of any `Steps` in the `Job`. These elements affect only the +final statuses of the `Job`. For example, it is possible for every step in a job to have +a status of `FAILED` but for the job to have a status of `COMPLETED`. + +##### Ending at a Step + +Configuring a step end instructs a `Job` to stop with a `BatchStatus` of `COMPLETED`. A`Job` that has finished with status `COMPLETED` cannot be restarted (the framework throws +a `JobInstanceAlreadyCompleteException`). + +When using XML configuration, the 'end' element is used for this task. The `end` element +also allows for an optional 'exit-code' attribute that can be used to customize the`ExitStatus` of the `Job`. If no 'exit-code' attribute is given, then the `ExitStatus` is`COMPLETED` by default, to match the `BatchStatus`. + +When using Java configuration, the 'end' method is used for this task. The `end` method +also allows for an optional 'exitStatus' parameter that can be used to customize the`ExitStatus` of the `Job`. If no 'exitStatus' value is provided, then the `ExitStatus` is`COMPLETED` by default, to match the `BatchStatus`. + +Consider the following scenario: if `step2` fails, then the `Job` stops with a`BatchStatus` of `COMPLETED` and an `ExitStatus` of `COMPLETED` and `step3` does not run. +Otherwise, execution moves to `step3`. Note that if `step2` fails, the `Job` is not +restartable (because the status is `COMPLETED`). + +The following example shows the scenario in XML: + +``` + + + + + + + + +``` + +The following example shows the scenario in Java: + +``` +@Bean +public Job job() { + return this.jobBuilderFactory.get("job") + .start(step1()) + .next(step2()) + .on("FAILED").end() + .from(step2()).on("*").to(step3()) + .end() + .build(); +} +``` + +##### Failing a Step + +Configuring a step to fail at a given point instructs a `Job` to stop with a`BatchStatus` of `FAILED`. Unlike end, the failure of a `Job` does not prevent the `Job`from being restarted. + +When using XML configuration, the 'fail' element also allows for an optional 'exit-code' +attribute that can be used to customize the `ExitStatus` of the `Job`. If no 'exit-code' +attribute is given, then the `ExitStatus` is `FAILED` by default, to match the`BatchStatus`. + +Consider the following scenario if `step2` fails, then the `Job` stops with a`BatchStatus` of `FAILED` and an `ExitStatus` of `EARLY TERMINATION` and `step3` does not +execute. Otherwise, execution moves to `step3`. Additionally, if `step2` fails and the`Job` is restarted, then execution begins again on `step2`. + +The following example shows the scenario in XML: + +XML Configuration + +``` + + + + + + + + +``` + +The following example shows the scenario in Java: + +Java Configuration + +``` +@Bean +public Job job() { + return this.jobBuilderFactory.get("job") + .start(step1()) + .next(step2()).on("FAILED").fail() + .from(step2()).on("*").to(step3()) + .end() + .build(); +} +``` + +##### Stopping a Job at a Given Step + +Configuring a job to stop at a particular step instructs a `Job` to stop with a`BatchStatus` of `STOPPED`. Stopping a `Job` can provide a temporary break in processing, +so that the operator can take some action before restarting the `Job`. + +When using XML configuration, a 'stop' element requires a 'restart' attribute that specifies +the step where execution should pick up when the Job is restarted. + +When using Java configuration, the `stopAndRestart` method requires a 'restart' attribute +that specifies the step where execution should pick up when the Job is restarted. + +Consider the following scenario: if `step1` finishes with `COMPLETE`, then the job then +stops. Once it is restarted, execution begins on `step2`. + +The following listing shows the scenario in XML: + +``` + + + + + +``` + +The following example shows the scenario in Java: + +``` +@Bean +public Job job() { + return this.jobBuilderFactory.get("job") + .start(step1()).on("COMPLETED").stopAndRestart(step2()) + .end() + .build(); +} +``` + +#### Programmatic Flow Decisions + +In some situations, more information than the `ExitStatus` may be required to decide +which step to execute next. In this case, a `JobExecutionDecider` can be used to assist +in the decision, as shown in the following example: + +``` +public class MyDecider implements JobExecutionDecider { + public FlowExecutionStatus decide(JobExecution jobExecution, StepExecution stepExecution) { + String status; + if (someCondition()) { + status = "FAILED"; + } + else { + status = "COMPLETED"; + } + return new FlowExecutionStatus(status); + } +} +``` + +In the following sample job configuration, a `decision` specifies the decider to use as +well as all of the transitions: + +XML Configuration + +``` + + + + + + + + + + + + + +``` + +In the following example, a bean implementing the `JobExecutionDecider` is passed +directly to the `next` call when using Java configuration. + +Java Configuration + +``` +@Bean +public Job job() { + return this.jobBuilderFactory.get("job") + .start(step1()) + .next(decider()).on("FAILED").to(step2()) + .from(decider()).on("COMPLETED").to(step3()) + .end() + .build(); +} +``` + +#### Split Flows + +Every scenario described so far has involved a `Job` that executes its steps one at a +time in a linear fashion. In addition to this typical style, Spring Batch also allows +for a job to be configured with parallel flows. + +The XML namespace allows you to use the 'split' element. As the following example shows, +the 'split' element contains one or more 'flow' elements, where entire separate flows can +be defined. A 'split' element may also contain any of the previously discussed transition +elements, such as the 'next' attribute or the 'next', 'end' or 'fail' elements. + +``` + + + + + + + + + + +``` + +Java based configuration lets you configure splits through the provided builders. As the +following example shows, the 'split' element contains one or more 'flow' elements, where +entire separate flows can be defined. A 'split' element may also contain any of the +previously discussed transition elements, such as the 'next' attribute or the 'next', +'end' or 'fail' elements. + +``` +@Bean +public Flow flow1() { + return new FlowBuilder("flow1") + .start(step1()) + .next(step2()) + .build(); +} + +@Bean +public Flow flow2() { + return new FlowBuilder("flow2") + .start(step3()) + .build(); +} + +@Bean +public Job job(Flow flow1, Flow flow2) { + return this.jobBuilderFactory.get("job") + .start(flow1) + .split(new SimpleAsyncTaskExecutor()) + .add(flow2) + .next(step4()) + .end() + .build(); +} +``` + +#### Externalizing Flow Definitions and Dependencies Between Jobs + +Part of the flow in a job can be externalized as a separate bean definition and then +re-used. There are two ways to do so. The first is to simply declare the flow as a +reference to one defined elsewhere. + +The following example shows how to declare a flow as a reference to a flow defined +elsewhere in XML: + +XML Configuration + +``` + + + + + + + + + +``` + +The following example shows how to declare a flow as a reference to a flow defined +elsewhere in Java: + +Java Configuration + +``` +@Bean +public Job job() { + return this.jobBuilderFactory.get("job") + .start(flow1()) + .next(step3()) + .end() + .build(); +} + +@Bean +public Flow flow1() { + return new FlowBuilder("flow1") + .start(step1()) + .next(step2()) + .build(); +} +``` + +The effect of defining an external flow as shown in the preceding example is to insert +the steps from the external flow into the job as if they had been declared inline. In +this way, many jobs can refer to the same template flow and compose such templates into +different logical flows. This is also a good way to separate the integration testing of +the individual flows. + +The other form of an externalized flow is to use a `JobStep`. A `JobStep` is similar to a`FlowStep` but actually creates and launches a separate job execution for the steps in +the flow specified. + +The following example hows an example of a `JobStep` in XML: + +XML Configuration + +``` + + + + + + +... + + + + +``` + +The following example shows an example of a `JobStep` in Java: + +Java Configuration + +``` +@Bean +public Job jobStepJob() { + return this.jobBuilderFactory.get("jobStepJob") + .start(jobStepJobStep1(null)) + .build(); +} + +@Bean +public Step jobStepJobStep1(JobLauncher jobLauncher) { + return this.stepBuilderFactory.get("jobStepJobStep1") + .job(job()) + .launcher(jobLauncher) + .parametersExtractor(jobParametersExtractor()) + .build(); +} + +@Bean +public Job job() { + return this.jobBuilderFactory.get("job") + .start(step1()) + .build(); +} + +@Bean +public DefaultJobParametersExtractor jobParametersExtractor() { + DefaultJobParametersExtractor extractor = new DefaultJobParametersExtractor(); + + extractor.setKeys(new String[]{"input.file"}); + + return extractor; +} +``` + +The job parameters extractor is a strategy that determines how the `ExecutionContext` for +the `Step` is converted into `JobParameters` for the `Job` that is run. The `JobStep` is +useful when you want to have some more granular options for monitoring and reporting on +jobs and steps. Using `JobStep` is also often a good answer to the question: "How do I +create dependencies between jobs?" It is a good way to break up a large system into +smaller modules and control the flow of jobs. + +### Late Binding of `Job` and `Step` Attributes + +Both the XML and flat file examples shown earlier use the Spring `Resource` abstraction +to obtain a file. This works because `Resource` has a `getFile` method, which returns a`java.io.File`. Both XML and flat file resources can be configured using standard Spring +constructs: + +The following example shows late binding in XML: + +XML Configuration + +``` + + + +``` + +The following example shows late binding in Java: + +Java Configuration + +``` +@Bean +public FlatFileItemReader flatFileItemReader() { + FlatFileItemReader reader = new FlatFileItemReaderBuilder() + .name("flatFileItemReader") + .resource(new FileSystemResource("file://outputs/file.txt")) + ... +} +``` + +The preceding `Resource` loads the file from the specified file system location. Note +that absolute locations have to start with a double slash (`//`). In most Spring +applications, this solution is good enough, because the names of these resources are +known at compile time. However, in batch scenarios, the file name may need to be +determined at runtime as a parameter to the job. This can be solved using '-D' parameters +to read a system property. + +The following example shows how to read a file name from a property in XML: + +XML Configuration + +``` + + + +``` + +The following shows how to read a file name from a property in Java: + +Java Configuration + +``` +@Bean +public FlatFileItemReader flatFileItemReader(@Value("${input.file.name}") String name) { + return new FlatFileItemReaderBuilder() + .name("flatFileItemReader") + .resource(new FileSystemResource(name)) + ... +} +``` + +All that would be required for this solution to work would be a system argument (such as`-Dinput.file.name="file://outputs/file.txt"`). + +| |Although a `PropertyPlaceholderConfigurer` can be used here, it is not
necessary if the system property is always set because the `ResourceEditor` in Spring
already filters and does placeholder replacement on system properties.| +|---|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +Often, in a batch setting, it is preferable to parametrize the file name in the`JobParameters` of the job, instead of through system properties, and access them that +way. To accomplish this, Spring Batch allows for the late binding of various `Job` and`Step` attributes. + +The following example shows how to parameterize a file name in XML: + +XML Configuration + +``` + + + +``` + +The following example shows how to parameterize a file name in Java: + +Java Configuration + +``` +@StepScope +@Bean +public FlatFileItemReader flatFileItemReader(@Value("#{jobParameters['input.file.name']}") String name) { + return new FlatFileItemReaderBuilder() + .name("flatFileItemReader") + .resource(new FileSystemResource(name)) + ... +} +``` + +Both the `JobExecution` and `StepExecution` level `ExecutionContext` can be accessed in +the same way. + +The following example shows how to access the `ExecutionContext` in XML: + +XML Configuration + +``` + + + +``` + +XML Configuration + +``` + + + +``` + +The following example shows how to access the `ExecutionContext` in Java: + +Java Configuration + +``` +@StepScope +@Bean +public FlatFileItemReader flatFileItemReader(@Value("#{jobExecutionContext['input.file.name']}") String name) { + return new FlatFileItemReaderBuilder() + .name("flatFileItemReader") + .resource(new FileSystemResource(name)) + ... +} +``` + +Java Configuration + +``` +@StepScope +@Bean +public FlatFileItemReader flatFileItemReader(@Value("#{stepExecutionContext['input.file.name']}") String name) { + return new FlatFileItemReaderBuilder() + .name("flatFileItemReader") + .resource(new FileSystemResource(name)) + ... +} +``` + +| |Any bean that uses late-binding must be declared with scope="step". See[Step Scope](#step-scope) for more information. It should be noted
that a `Step` bean should not be step-scoped. If late-binding is needed in a step
definition, the components of that step (ie tasklet, item reader/writer, etc)
are the ones that should be scoped instead.| +|---|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +| |If you are using Spring 3.0 (or above), the expressions in step-scoped beans are in the
Spring Expression Language, a powerful general purpose language with many interesting
features. To provide backward compatibility, if Spring Batch detects the presence of
older versions of Spring, it uses a native expression language that is less powerful and
that has slightly different parsing rules. The main difference is that the map keys in
the example above do not need to be quoted with Spring 2.5, but the quotes are mandatory
in Spring 3.0.| +|---|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +#### Step Scope + +All of the late binding examples shown earlier have a scope of “step” declared on the +bean definition. + +The following example shows an example of binding to step scope in XML: + +XML Configuration + +``` + + + +``` + +The following example shows an example of binding to step scope in Java: + +Java Configuration + +``` +@StepScope +@Bean +public FlatFileItemReader flatFileItemReader(@Value("#{jobParameters[input.file.name]}") String name) { + return new FlatFileItemReaderBuilder() + .name("flatFileItemReader") + .resource(new FileSystemResource(name)) + ... +} +``` + +Using a scope of `Step` is required in order to use late binding, because the bean cannot +actually be instantiated until the `Step` starts, to allow the attributes to be found. +Because it is not part of the Spring container by default, the scope must be added +explicitly, by using the `batch` namespace or by including a bean definition explicitly +for the `StepScope`, or by using the `@EnableBatchProcessing` annotation. Use only one of +those methods. The following example uses the `batch` namespace: + +``` + + +... + +``` + +The following example includes the bean definition explicitly: + +``` + +``` + +#### Job Scope + +`Job` scope, introduced in Spring Batch 3.0, is similar to `Step` scope in configuration +but is a Scope for the `Job` context, so that there is only one instance of such a bean +per running job. Additionally, support is provided for late binding of references +accessible from the `JobContext` using `#{..}` placeholders. Using this feature, bean +properties can be pulled from the job or job execution context and the job parameters. + +The following example shows an example of binding to job scope in XML: + +XML Configuration + +``` + + + +``` + +XML Configuration + +``` + + + +``` + +The following example shows an example of binding to job scope in Java: + +Java Configurtation + +``` +@JobScope +@Bean +public FlatFileItemReader flatFileItemReader(@Value("#{jobParameters[input]}") String name) { + return new FlatFileItemReaderBuilder() + .name("flatFileItemReader") + .resource(new FileSystemResource(name)) + ... +} +``` + +Java Configuration + +``` +@JobScope +@Bean +public FlatFileItemReader flatFileItemReader(@Value("#{jobExecutionContext['input.name']}") String name) { + return new FlatFileItemReaderBuilder() + .name("flatFileItemReader") + .resource(new FileSystemResource(name)) + ... +} +``` + +Because it is not part of the Spring container by default, the scope must be added +explicitly, by using the `batch` namespace, by including a bean definition explicitly for +the JobScope, or using the `@EnableBatchProcessing` annotation (but not all of them). +The following example uses the `batch` namespace: + +``` + + + +... + +``` + +The following example includes a bean that explicitly defines the `JobScope`: + +``` + +``` + +| |There are some practical limitations of using job-scoped beans in multi-threaded
or partitioned steps. Spring Batch does not control the threads spawned in these
use cases, so it is not possible to set them up correctly to use such beans. Hence,
it is not recommended to use job-scoped beans in multi-threaded or partitioned steps.| +|---|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| \ No newline at end of file diff --git a/docs/en/spring-batch/testing.md b/docs/en/spring-batch/testing.md new file mode 100644 index 0000000000000000000000000000000000000000..9a563a9c40f9955268d9261dda8356737d65314a --- /dev/null +++ b/docs/en/spring-batch/testing.md @@ -0,0 +1,324 @@ +# Unit Testing + +## Unit Testing + +XMLJavaBoth + +As with other application styles, it is extremely important to unit test any code written +as part of a batch job. The Spring core documentation covers how to unit and integration +test with Spring in great detail, so it is not be repeated here. It is important, however, +to think about how to 'end to end' test a batch job, which is what this chapter covers. +The spring-batch-test project includes classes that facilitate this end-to-end test +approach. + +### Creating a Unit Test Class + +In order for the unit test to run a batch job, the framework must load the job’s +ApplicationContext. Two annotations are used to trigger this behavior: + +* `@RunWith(SpringJUnit4ClassRunner.class)`: Indicates that the class should use Spring’s + JUnit facilities + +* `@ContextConfiguration(…​)`: Indicates which resources to configure the`ApplicationContext` with. + +Starting from v4.1, it is also possible to inject Spring Batch test utilities +like the `JobLauncherTestUtils` and `JobRepositoryTestUtils` in the test context +using the `@SpringBatchTest` annotation. + +| |It should be noted that `JobLauncherTestUtils` requires a `Job` bean and that`JobRepositoryTestUtils` requires a `DataSource` bean. Since `@SpringBatchTest`registers a `JobLauncherTestUtils` and a `JobRepositoryTestUtils` in the test
context, it is expected that the test context contains a single autowire candidate
for a `Job` and a `DataSource` (either a single bean definition or one that is
annotated with `org.springframework.context.annotation.Primary`).| +|---|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +The following Java example shows the annotations in use: + +Using Java Configuration + +``` +@SpringBatchTest +@RunWith(SpringRunner.class) +@ContextConfiguration(classes=SkipSampleConfiguration.class) +public class SkipSampleFunctionalTests { ... } +``` + +The following XML example shows the annotations in use: + +Using XML Configuration + +``` +@SpringBatchTest +@RunWith(SpringRunner.class) +@ContextConfiguration(locations = { "/simple-job-launcher-context.xml", + "/jobs/skipSampleJob.xml" }) +public class SkipSampleFunctionalTests { ... } +``` + +### End-To-End Testing of Batch Jobs + +'End To End' testing can be defined as testing the complete run of a batch job from +beginning to end. This allows for a test that sets up a test condition, executes the job, +and verifies the end result. + +Consider an example of a batch job that reads from the database and writes to a flat file. +The test method begins by setting up the database with test data. It clears the CUSTOMER +table and then inserts 10 new records. The test then launches the `Job` by using the`launchJob()` method. The `launchJob()` method is provided by the `JobLauncherTestUtils`class. The `JobLauncherTestUtils` class also provides the `launchJob(JobParameters)`method, which allows the test to give particular parameters. The `launchJob()` method +returns the `JobExecution` object, which is useful for asserting particular information +about the `Job` run. In the following case, the test verifies that the `Job` ended with +status "COMPLETED". + +The following listing shows the example in XML: + +XML Based Configuration + +``` +@SpringBatchTest +@RunWith(SpringRunner.class) +@ContextConfiguration(locations = { "/simple-job-launcher-context.xml", + "/jobs/skipSampleJob.xml" }) +public class SkipSampleFunctionalTests { + + @Autowired + private JobLauncherTestUtils jobLauncherTestUtils; + + private SimpleJdbcTemplate simpleJdbcTemplate; + + @Autowired + public void setDataSource(DataSource dataSource) { + this.simpleJdbcTemplate = new SimpleJdbcTemplate(dataSource); + } + + @Test + public void testJob() throws Exception { + simpleJdbcTemplate.update("delete from CUSTOMER"); + for (int i = 1; i <= 10; i++) { + simpleJdbcTemplate.update("insert into CUSTOMER values (?, 0, ?, 100000)", + i, "customer" + i); + } + + JobExecution jobExecution = jobLauncherTestUtils.launchJob(); + + Assert.assertEquals("COMPLETED", jobExecution.getExitStatus().getExitCode()); + } +} +``` + +The following listing shows the example in Java: + +Java Based Configuration + +``` +@SpringBatchTest +@RunWith(SpringRunner.class) +@ContextConfiguration(classes=SkipSampleConfiguration.class) +public class SkipSampleFunctionalTests { + + @Autowired + private JobLauncherTestUtils jobLauncherTestUtils; + + private SimpleJdbcTemplate simpleJdbcTemplate; + + @Autowired + public void setDataSource(DataSource dataSource) { + this.simpleJdbcTemplate = new SimpleJdbcTemplate(dataSource); + } + + @Test + public void testJob() throws Exception { + simpleJdbcTemplate.update("delete from CUSTOMER"); + for (int i = 1; i <= 10; i++) { + simpleJdbcTemplate.update("insert into CUSTOMER values (?, 0, ?, 100000)", + i, "customer" + i); + } + + JobExecution jobExecution = jobLauncherTestUtils.launchJob(); + + Assert.assertEquals("COMPLETED", jobExecution.getExitStatus().getExitCode()); + } +} +``` + +### Testing Individual Steps + +For complex batch jobs, test cases in the end-to-end testing approach may become +unmanageable. It these cases, it may be more useful to have test cases to test individual +steps on their own. The `AbstractJobTests` class contains a method called `launchStep`, +which takes a step name and runs just that particular `Step`. This approach allows for +more targeted tests letting the test set up data for only that step and to validate its +results directly. The following example shows how to use the `launchStep` method to load a`Step` by name: + +``` +JobExecution jobExecution = jobLauncherTestUtils.launchStep("loadFileStep"); +``` + +### Testing Step-Scoped Components + +Often, the components that are configured for your steps at runtime use step scope and +late binding to inject context from the step or job execution. These are tricky to test as +standalone components, unless you have a way to set the context as if they were in a step +execution. That is the goal of two components in Spring Batch:`StepScopeTestExecutionListener` and `StepScopeTestUtils`. + +The listener is declared at the class level, and its job is to create a step execution +context for each test method, as shown in the following example: + +``` +@ContextConfiguration +@TestExecutionListeners( { DependencyInjectionTestExecutionListener.class, + StepScopeTestExecutionListener.class }) +@RunWith(SpringRunner.class) +public class StepScopeTestExecutionListenerIntegrationTests { + + // This component is defined step-scoped, so it cannot be injected unless + // a step is active... + @Autowired + private ItemReader reader; + + public StepExecution getStepExecution() { + StepExecution execution = MetaDataInstanceFactory.createStepExecution(); + execution.getExecutionContext().putString("input.data", "foo,bar,spam"); + return execution; + } + + @Test + public void testReader() { + // The reader is initialized and bound to the input data + assertNotNull(reader.read()); + } + +} +``` + +There are two `TestExecutionListeners`. One is the regular Spring Test framework, which +handles dependency injection from the configured application context to inject the reader. +The other is the Spring Batch `StepScopeTestExecutionListener`. It works by looking for a +factory method in the test case for a `StepExecution`, using that as the context for the +test method, as if that execution were active in a `Step` at runtime. The factory method +is detected by its signature (it must return a `StepExecution`). If a factory method is +not provided, then a default `StepExecution` is created. + +Starting from v4.1, the `StepScopeTestExecutionListener` and`JobScopeTestExecutionListener` are imported as test execution listeners +if the test class is annotated with `@SpringBatchTest`. The preceding test +example can be configured as follows: + +``` +@SpringBatchTest +@RunWith(SpringRunner.class) +@ContextConfiguration +public class StepScopeTestExecutionListenerIntegrationTests { + + // This component is defined step-scoped, so it cannot be injected unless + // a step is active... + @Autowired + private ItemReader reader; + + public StepExecution getStepExecution() { + StepExecution execution = MetaDataInstanceFactory.createStepExecution(); + execution.getExecutionContext().putString("input.data", "foo,bar,spam"); + return execution; + } + + @Test + public void testReader() { + // The reader is initialized and bound to the input data + assertNotNull(reader.read()); + } + +} +``` + +The listener approach is convenient if you want the duration of the step scope to be the +execution of the test method. For a more flexible but more invasive approach, you can use +the `StepScopeTestUtils`. The following example counts the number of items available in +the reader shown in the previous example: + +``` +int count = StepScopeTestUtils.doInStepScope(stepExecution, + new Callable() { + public Integer call() throws Exception { + + int count = 0; + + while (reader.read() != null) { + count++; + } + return count; + } +}); +``` + +### Validating Output Files + +When a batch job writes to the database, it is easy to query the database to verify that +the output is as expected. However, if the batch job writes to a file, it is equally +important that the output be verified. Spring Batch provides a class called `AssertFile`to facilitate the verification of output files. The method called `assertFileEquals` takes +two `File` objects (or two `Resource` objects) and asserts, line by line, that the two +files have the same content. Therefore, it is possible to create a file with the expected +output and to compare it to the actual result, as shown in the following example: + +``` +private static final String EXPECTED_FILE = "src/main/resources/data/input.txt"; +private static final String OUTPUT_FILE = "target/test-outputs/output.txt"; + +AssertFile.assertFileEquals(new FileSystemResource(EXPECTED_FILE), + new FileSystemResource(OUTPUT_FILE)); +``` + +### Mocking Domain Objects + +Another common issue encountered while writing unit and integration tests for Spring Batch +components is how to mock domain objects. A good example is a `StepExecutionListener`, as +illustrated in the following code snippet: + +``` +public class NoWorkFoundStepExecutionListener extends StepExecutionListenerSupport { + + public ExitStatus afterStep(StepExecution stepExecution) { + if (stepExecution.getReadCount() == 0) { + return ExitStatus.FAILED; + } + return null; + } +} +``` + +The preceding listener example is provided by the framework and checks a `StepExecution`for an empty read count, thus signifying that no work was done. While this example is +fairly simple, it serves to illustrate the types of problems that may be encountered when +attempting to unit test classes that implement interfaces requiring Spring Batch domain +objects. Consider the following unit test for the listener’s in the preceding example: + +``` +private NoWorkFoundStepExecutionListener tested = new NoWorkFoundStepExecutionListener(); + +@Test +public void noWork() { + StepExecution stepExecution = new StepExecution("NoProcessingStep", + new JobExecution(new JobInstance(1L, new JobParameters(), + "NoProcessingJob"))); + + stepExecution.setExitStatus(ExitStatus.COMPLETED); + stepExecution.setReadCount(0); + + ExitStatus exitStatus = tested.afterStep(stepExecution); + assertEquals(ExitStatus.FAILED.getExitCode(), exitStatus.getExitCode()); +} +``` + +Because the Spring Batch domain model follows good object-oriented principles, the`StepExecution` requires a `JobExecution`, which requires a `JobInstance` and`JobParameters`, to create a valid `StepExecution`. While this is good in a solid domain +model, it does make creating stub objects for unit testing verbose. To address this issue, +the Spring Batch test module includes a factory for creating domain objects:`MetaDataInstanceFactory`. Given this factory, the unit test can be updated to be more +concise, as shown in the following example: + +``` +private NoWorkFoundStepExecutionListener tested = new NoWorkFoundStepExecutionListener(); + +@Test +public void testAfterStep() { + StepExecution stepExecution = MetaDataInstanceFactory.createStepExecution(); + + stepExecution.setExitStatus(ExitStatus.COMPLETED); + stepExecution.setReadCount(0); + + ExitStatus exitStatus = tested.afterStep(stepExecution); + assertEquals(ExitStatus.FAILED.getExitCode(), exitStatus.getExitCode()); +} +``` + +The preceding method for creating a simple `StepExecution` is just one convenience method +available within the factory. A full method listing can be found in its[Javadoc](https://docs.spring.io/spring-batch/apidocs/org/springframework/batch/test/MetaDataInstanceFactory.html). \ No newline at end of file diff --git a/docs/en/spring-batch/transaction-appendix.md b/docs/en/spring-batch/transaction-appendix.md new file mode 100644 index 0000000000000000000000000000000000000000..17ecb1280f2d80f63ac59d1195e0faf32d1b41d6 --- /dev/null +++ b/docs/en/spring-batch/transaction-appendix.md @@ -0,0 +1,310 @@ +# Batch Processing and Transactions + +## Appendix A: Batch Processing and Transactions + +### Simple Batching with No Retry + +Consider the following simple example of a nested batch with no retries. It shows a +common scenario for batch processing: An input source is processed until exhausted, and +we commit periodically at the end of a "chunk" of processing. + +``` +1 | REPEAT(until=exhausted) { +| +2 | TX { +3 | REPEAT(size=5) { +3.1 | input; +3.2 | output; +| } +| } +| +| } +``` + +The input operation (3.1) could be a message-based receive (such as from JMS), or a +file-based read, but to recover and continue processing with a chance of completing the +whole job, it must be transactional. The same applies to the operation at 3.2. It must +be either transactional or idempotent. + +If the chunk at `REPEAT` (3) fails because of a database exception at 3.2, then `TX` (2) +must roll back the whole chunk. + +### Simple Stateless Retry + +It is also useful to use a retry for an operation which is not transactional, such as a +call to a web-service or other remote resource, as shown in the following example: + +``` +0 | TX { +1 | input; +1.1 | output; +2 | RETRY { +2.1 | remote access; +| } +| } +``` + +This is actually one of the most useful applications of a retry, since a remote call is +much more likely to fail and be retryable than a database update. As long as the remote +access (2.1) eventually succeeds, the transaction, `TX` (0), commits. If the remote +access (2.1) eventually fails, then the transaction, `TX` (0), is guaranteed to roll +back. + +### Typical Repeat-Retry Pattern + +The most typical batch processing pattern is to add a retry to the inner block of the +chunk, as shown in the following example: + +``` +1 | REPEAT(until=exhausted, exception=not critical) { +| +2 | TX { +3 | REPEAT(size=5) { +| +4 | RETRY(stateful, exception=deadlock loser) { +4.1 | input; +5 | } PROCESS { +5.1 | output; +6 | } SKIP and RECOVER { +| notify; +| } +| +| } +| } +| +| } +``` + +The inner `RETRY` (4) block is marked as "stateful". See [the +typical use case](#transactionsNoRetry) for a description of a stateful retry. This means that if the +retry `PROCESS` (5) block fails, the behavior of the `RETRY` (4) is as follows: + +1. Throw an exception, rolling back the transaction, `TX` (2), at the chunk level, and + allowing the item to be re-presented to the input queue. + +2. When the item re-appears, it might be retried depending on the retry policy in place, + executing `PROCESS` (5) again. The second and subsequent attempts might fail again and + re-throw the exception. + +3. Eventually, the item reappears for the final time. The retry policy disallows another + attempt, so `PROCESS` (5) is never executed. In this case, we follow the `RECOVER` (6) + path, effectively "skipping" the item that was received and is being processed. + +Note that the notation used for the `RETRY` (4) in the plan above explicitly shows that +the input step (4.1) is part of the retry. It also makes clear that there are two +alternate paths for processing: the normal case, as denoted by `PROCESS` (5), and the +recovery path, as denoted in a separate block by `RECOVER` (6). The two alternate paths +are completely distinct. Only one is ever taken in normal circumstances. + +In special cases (such as a special `TransactionValidException` type), the retry policy +might be able to determine that the `RECOVER` (6) path can be taken on the last attempt +after `PROCESS` (5) has just failed, instead of waiting for the item to be re-presented. +This is not the default behavior, because it requires detailed knowledge of what has +happened inside the `PROCESS` (5) block, which is not usually available. For example, if +the output included write access before the failure, then the exception should be +re-thrown to ensure transactional integrity. + +The completion policy in the outer `REPEAT` (1) is crucial to the success of the above +plan. If the output (5.1) fails, it may throw an exception (it usually does, as +described), in which case the transaction, `TX` (2), fails, and the exception could +propagate up through the outer batch `REPEAT` (1). We do not want the whole batch to +stop, because the `RETRY` (4) might still be successful if we try again, so we add`exception=not critical` to the outer `REPEAT` (1). + +Note, however, that if the `TX` (2) fails and we *do* try again, by virtue of the outer +completion policy, the item that is next processed in the inner `REPEAT` (3) is not +guaranteed to be the one that just failed. It might be, but it depends on the +implementation of the input (4.1). Thus, the output (5.1) might fail again on either a +new item or the old one. The client of the batch should not assume that each `RETRY` (4) +attempt is going to process the same items as the last one that failed. For example, if +the termination policy for `REPEAT` (1) is to fail after 10 attempts, it fails after 10 +consecutive attempts but not necessarily at the same item. This is consistent with the +overall retry strategy. The inner `RETRY` (4) is aware of the history of each item and +can decide whether or not to have another attempt at it. + +### Asynchronous Chunk Processing + +The inner batches or chunks in the [typical example](#repeatRetry) can be executed +concurrently by configuring the outer batch to use an `AsyncTaskExecutor`. The outer +batch waits for all the chunks to complete before completing. The following example shows +asynchronous chunk processing: + +``` +1 | REPEAT(until=exhausted, concurrent, exception=not critical) { +| +2 | TX { +3 | REPEAT(size=5) { +| +4 | RETRY(stateful, exception=deadlock loser) { +4.1 | input; +5 | } PROCESS { +| output; +6 | } RECOVER { +| recover; +| } +| +| } +| } +| +| } +``` + +### Asynchronous Item Processing + +The individual items in chunks in the [typical example](#repeatRetry) can also, in +principle, be processed concurrently. In this case, the transaction boundary has to move +to the level of the individual item, so that each transaction is on a single thread, as +shown in the following example: + +``` +1 | REPEAT(until=exhausted, exception=not critical) { +| +2 | REPEAT(size=5, concurrent) { +| +3 | TX { +4 | RETRY(stateful, exception=deadlock loser) { +4.1 | input; +5 | } PROCESS { +| output; +6 | } RECOVER { +| recover; +| } +| } +| +| } +| +| } +``` + +This plan sacrifices the optimization benefit, which the simple plan had, of having all +the transactional resources chunked together. It is only useful if the cost of the +processing (5) is much higher than the cost of transaction management (3). + +### Interactions Between Batching and Transaction Propagation + +There is a tighter coupling between batch-retry and transaction management than we would +ideally like. In particular, a stateless retry cannot be used to retry database +operations with a transaction manager that does not support NESTED propagation. + +The following example uses retry without repeat: + +``` +1 | TX { +| +1.1 | input; +2.2 | database access; +2 | RETRY { +3 | TX { +3.1 | database access; +| } +| } +| +| } +``` + +Again, and for the same reason, the inner transaction, `TX` (3), can cause the outer +transaction, `TX` (1), to fail, even if the `RETRY` (2) is eventually successful. + +Unfortunately, the same effect percolates from the retry block up to the surrounding +repeat batch if there is one, as shown in the following example: + +``` +1 | TX { +| +2 | REPEAT(size=5) { +2.1 | input; +2.2 | database access; +3 | RETRY { +4 | TX { +4.1 | database access; +| } +| } +| } +| +| } +``` + +Now, if TX (3) rolls back, it can pollute the whole batch at TX (1) and force it to roll +back at the end. + +What about non-default propagation? + +* In the preceding example, `PROPAGATION_REQUIRES_NEW` at `TX` (3) prevents the outer`TX` (1) from being polluted if both transactions are eventually successful. But if `TX`(3) commits and `TX` (1) rolls back, then `TX` (3) stays committed, so we violate the + transaction contract for `TX` (1). If `TX` (3) rolls back, `TX` (1) does not necessarily + (but it probably does in practice, because the retry throws a roll back exception). + +* `PROPAGATION_NESTED` at `TX` (3) works as we require in the retry case (and for a + batch with skips): `TX` (3) can commit but subsequently be rolled back by the outer + transaction, `TX` (1). If `TX` (3) rolls back, `TX` (1) rolls back in practice. This + option is only available on some platforms, not including Hibernate or + JTA, but it is the only one that consistently works. + +Consequently, the `NESTED` pattern is best if the retry block contains any database +access. + +### Special Case: Transactions with Orthogonal Resources + +Default propagation is always OK for simple cases where there are no nested database +transactions. Consider the following example, where the `SESSION` and `TX` are not +global `XA` resources, so their resources are orthogonal: + +``` +0 | SESSION { +1 | input; +2 | RETRY { +3 | TX { +3.1 | database access; +| } +| } +| } +``` + +Here there is a transactional message `SESSION` (0), but it does not participate in other +transactions with `PlatformTransactionManager`, so it does not propagate when `TX` (3) +starts. There is no database access outside the `RETRY` (2) block. If `TX` (3) fails and +then eventually succeeds on a retry, `SESSION` (0) can commit (independently of a `TX`block). This is similar to the vanilla "best-efforts-one-phase-commit" scenario. The +worst that can happen is a duplicate message when the `RETRY` (2) succeeds and the`SESSION` (0) cannot commit (for example, because the message system is unavailable). + +### Stateless Retry Cannot Recover + +The distinction between a stateless and a stateful retry in the typical example above is +important. It is actually ultimately a transactional constraint that forces the +distinction, and this constraint also makes it obvious why the distinction exists. + +We start with the observation that there is no way to skip an item that failed and +successfully commit the rest of the chunk unless we wrap the item processing in a +transaction. Consequently, we simplify the typical batch execution plan to be as +follows: + +``` +0 | REPEAT(until=exhausted) { +| +1 | TX { +2 | REPEAT(size=5) { +| +3 | RETRY(stateless) { +4 | TX { +4.1 | input; +4.2 | database access; +| } +5 | } RECOVER { +5.1 | skip; +| } +| +| } +| } +| +| } +``` + +The preceding example shows a stateless `RETRY` (3) with a `RECOVER` (5) path that kicks +in after the final attempt fails. The `stateless` label means that the block is repeated +without re-throwing any exception up to some limit. This only works if the transaction,`TX` (4), has propagation NESTED. + +If the inner `TX` (4) has default propagation properties and rolls back, it pollutes the +outer `TX` (1). The inner transaction is assumed by the transaction manager to have +corrupted the transactional resource, so it cannot be used again. + +Support for NESTED propagation is sufficiently rare that we choose not to support +recovery with stateless retries in the current versions of Spring Batch. The same effect +can always be achieved (at the expense of repeating more processing) by using the +typical pattern above. \ No newline at end of file diff --git a/docs/en/spring-batch/whatsnew.md b/docs/en/spring-batch/whatsnew.md new file mode 100644 index 0000000000000000000000000000000000000000..b0ce201f64ccfa192c53e21159bc27c5531911b5 --- /dev/null +++ b/docs/en/spring-batch/whatsnew.md @@ -0,0 +1,145 @@ +# What’s New in Spring Batch 4.3 + +## What’s New in Spring Batch 4.3 + +This release comes with a number of new features, performance improvements, +dependency updates and API deprecations. This section describes the most +important changes. For a complete list of changes, please refer to the[release notes](https://github.com/spring-projects/spring-batch/releases/tag/4.3.0). + +### New features + +#### New synchronized ItemStreamWriter + +Similar to the `SynchronizedItemStreamReader`, this release introduces a`SynchronizedItemStreamWriter`. This feature is useful in multi-threaded steps +where concurrent threads need to be synchronized to not override each other’s writes. + +#### New JpaQueryProvider for named queries + +This release introduces a new `JpaNamedQueryProvider` next to the`JpaNativeQueryProvider` to ease the configuration of JPA named queries when +using the `JpaPagingItemReader`: + +``` +JpaPagingItemReader reader = new JpaPagingItemReaderBuilder() + .name("fooReader") + .queryProvider(new JpaNamedQueryProvider("allFoos", Foo.class)) + // set other properties on the reader + .build(); +``` + +#### New JpaCursorItemReader Implementation + +JPA 2.2 added the ability to stream results as a cursor instead of only paging. +This release introduces a new JPA item reader that uses this feature to +stream results in a cursor-based fashion similar to the `JdbcCursorItemReader`and `HibernateCursorItemReader`. + +#### New JobParametersIncrementer implementation + +Similar to the `RunIdIncrementer`, this release adds a new `JobParametersIncrementer`that is based on a `DataFieldMaxValueIncrementer` from Spring Framework. + +#### GraalVM Support + +This release adds initial support to run Spring Batch applications on GraalVM. +The support is still experimental and will be improved in future releases. + +#### Java records Support + +This release adds support to use Java records as items in chunk-oriented steps. +The newly added `RecordFieldSetMapper` supports data mapping from flat files to +Java records, as shown in the following example: + +``` +@Bean +public FlatFileItemReader itemReader() { + return new FlatFileItemReaderBuilder() + .name("personReader") + .resource(new FileSystemResource("persons.csv")) + .delimited() + .names("id", "name") + .fieldSetMapper(new RecordFieldSetMapper<>(Person.class)) + .build(); +} +``` + +In this example, the `Person` type is a Java record defined as follows: + +``` +public record Person(int id, String name) { } +``` + +The `FlatFileItemReader` uses the new `RecordFieldSetMapper` to map data from +the `persons.csv` file to records of type `Person`. + +### Performance improvements + +#### Use bulk writes in RepositoryItemWriter + +Up to version 4.2, in order to use `CrudRepository#saveAll` in `RepositoryItemWriter`, +it was required to extend the writer and override `write(List)`. + +In this release, the `RepositoryItemWriter` has been updated to use`CrudRepository#saveAll` by default. + +#### Use bulk writes in MongoItemWriter + +The `MongoItemWriter` used `MongoOperations#save()` in a for loop +to save items to the database. In this release, this writer has been +updated to use `org.springframework.data.mongodb.core.BulkOperations` instead. + +#### Job start/restart time improvement + +The implementation of `JobRepository#getStepExecutionCount()` used to load +all job executions and step executions in-memory to do the count on the framework +side. In this release, the implementation has been changed to do a single call to +the database with a SQL count query in order to count step executions. + +### Dependency updates + +This release updates dependent Spring projects to the following versions: + +* Spring Framework 5.3 + +* Spring Data 2020.0 + +* Spring Integration 5.4 + +* Spring AMQP 2.3 + +* Spring for Apache Kafka 2.6 + +* Micrometer 1.5 + +### Deprecations + +#### API deprecation + +The following is a list of APIs that have been deprecated in this release: + +* `org.springframework.batch.core.repository.support.MapJobRepositoryFactoryBean` + +* `org.springframework.batch.core.explore.support.MapJobExplorerFactoryBean` + +* `org.springframework.batch.core.repository.dao.MapJobInstanceDao` + +* `org.springframework.batch.core.repository.dao.MapJobExecutionDao` + +* `org.springframework.batch.core.repository.dao.MapStepExecutionDao` + +* `org.springframework.batch.core.repository.dao.MapExecutionContextDao` + +* `org.springframework.batch.item.data.AbstractNeo4jItemReader` + +* `org.springframework.batch.item.file.transform.Alignment` + +* `org.springframework.batch.item.xml.StaxUtils` + +* `org.springframework.batch.core.launch.support.ScheduledJobParametersFactory` + +* `org.springframework.batch.item.file.MultiResourceItemReader#getCurrentResource()` + +* `org.springframework.batch.core.JobExecution#stop()` + +Suggested replacements can be found in the Javadoc of each deprecated API. + +#### SQLFire support deprecation + +SQLFire has been in [EOL](https://www.vmware.com/latam/products/pivotal-sqlfire.html)since November 1st, 2014. This release deprecates the support of using SQLFire +as a job repository and schedules it for removal in version 5.0. \ No newline at end of file diff --git a/docs/en/spring-boot/README.md b/docs/en/spring-boot/README.md new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/docs/en/spring-boot/actuator.md b/docs/en/spring-boot/actuator.md new file mode 100644 index 0000000000000000000000000000000000000000..6f7ba832d48de7150337e88831bbbcf1333b9a86 --- /dev/null +++ b/docs/en/spring-boot/actuator.md @@ -0,0 +1,3195 @@ +# Production-ready Features + +Spring Boot includes a number of additional features to help you monitor and manage your application when you push it to production. +You can choose to manage and monitor your application by using HTTP endpoints or with JMX. +Auditing, health, and metrics gathering can also be automatically applied to your application. + +## 1. Enabling Production-ready Features + +The [`spring-boot-actuator`](https://github.com/spring-projects/spring-boot/tree/v2.6.4/spring-boot-project/spring-boot-actuator) module provides all of Spring Boot’s production-ready features. +The recommended way to enable the features is to add a dependency on the `spring-boot-starter-actuator` “Starter”. + +Definition of Actuator + +An actuator is a manufacturing term that refers to a mechanical device for moving or controlling something. +Actuators can generate a large amount of motion from a small change. + +To add the actuator to a Maven-based project, add the following ‘Starter’ dependency: + +``` + + + org.springframework.boot + spring-boot-starter-actuator + + +``` + +For Gradle, use the following declaration: + +``` +dependencies { + implementation 'org.springframework.boot:spring-boot-starter-actuator' +} +``` + +## 2. Endpoints + +Actuator endpoints let you monitor and interact with your application. +Spring Boot includes a number of built-in endpoints and lets you add your own. +For example, the `health` endpoint provides basic application health information. + +You can [enable or disable](#actuator.endpoints.enabling) each individual endpoint and [expose them (make them remotely accessible) over HTTP or JMX](#actuator.endpoints.exposing). +An endpoint is considered to be available when it is both enabled and exposed. +The built-in endpoints are auto-configured only when they are available. +Most applications choose exposure over HTTP, where the ID of the endpoint and a prefix of `/actuator` is mapped to a URL. +For example, by default, the `health` endpoint is mapped to `/actuator/health`. + +| |To learn more about the Actuator’s endpoints and their request and response formats, see the separate API documentation ([HTML](https://docs.spring.io/spring-boot/docs/2.6.4/actuator-api/htmlsingle) or [PDF](https://docs.spring.io/spring-boot/docs/2.6.4/actuator-api/pdf/spring-boot-actuator-web-api.pdf)).| +|---|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +The following technology-agnostic endpoints are available: + +| ID | Description | +|------------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| `auditevents` | Exposes audit events information for the current application.
Requires an `AuditEventRepository` bean. | +| `beans` | Displays a complete list of all the Spring beans in your application. | +| `caches` | Exposes available caches. | +| `conditions` | Shows the conditions that were evaluated on configuration and auto-configuration classes and the reasons why they did or did not match. | +| `configprops` | Displays a collated list of all `@ConfigurationProperties`. | +| `env` | Exposes properties from Spring’s `ConfigurableEnvironment`. | +| `flyway` | Shows any Flyway database migrations that have been applied.
Requires one or more `Flyway` beans. | +| `health` | Shows application health information. | +| `httptrace` | Displays HTTP trace information (by default, the last 100 HTTP request-response exchanges).
Requires an `HttpTraceRepository` bean. | +| `info` | Displays arbitrary application info. | +|`integrationgraph`| Shows the Spring Integration graph.
Requires a dependency on `spring-integration-core`. | +| `loggers` | Shows and modifies the configuration of loggers in the application. | +| `liquibase` | Shows any Liquibase database migrations that have been applied.
Requires one or more `Liquibase` beans. | +| `metrics` | Shows “metrics” information for the current application. | +| `mappings` | Displays a collated list of all `@RequestMapping` paths. | +| `quartz` | Shows information about Quartz Scheduler jobs. | +| `scheduledtasks` | Displays the scheduled tasks in your application. | +| `sessions` | Allows retrieval and deletion of user sessions from a Spring Session-backed session store.
Requires a servlet-based web application that uses Spring Session. | +| `shutdown` | Lets the application be gracefully shutdown.
Disabled by default. | +| `startup` |Shows the [startup steps data](features.html#features.spring-application.startup-tracking) collected by the `ApplicationStartup`.
Requires the `SpringApplication` to be configured with a `BufferingApplicationStartup`.| +| `threaddump` | Performs a thread dump. | + +If your application is a web application (Spring MVC, Spring WebFlux, or Jersey), you can use the following additional endpoints: + +| ID | Description | +|------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| `heapdump` | Returns a heap dump file.
On a HotSpot JVM, an `HPROF`-format file is returned.
On an OpenJ9 JVM, a `PHD`-format file is returned. | +| `jolokia` | Exposes JMX beans over HTTP when Jolokia is on the classpath (not available for WebFlux).
Requires a dependency on `jolokia-core`. | +| `logfile` |Returns the contents of the logfile (if the `logging.file.name` or the `logging.file.path` property has been set).
Supports the use of the HTTP `Range` header to retrieve part of the log file’s content.| +|`prometheus`| Exposes metrics in a format that can be scraped by a Prometheus server.
Requires a dependency on `micrometer-registry-prometheus`. | + +### 2.1. Enabling Endpoints + +By default, all endpoints except for `shutdown` are enabled. +To configure the enablement of an endpoint, use its `management.endpoint..enabled` property. +The following example enables the `shutdown` endpoint: + +Properties + +``` +management.endpoint.shutdown.enabled=true +``` + +Yaml + +``` +management: + endpoint: + shutdown: + enabled: true +``` + +If you prefer endpoint enablement to be opt-in rather than opt-out, set the `management.endpoints.enabled-by-default` property to `false` and use individual endpoint `enabled` properties to opt back in. +The following example enables the `info` endpoint and disables all other endpoints: + +Properties + +``` +management.endpoints.enabled-by-default=false +management.endpoint.info.enabled=true +``` + +Yaml + +``` +management: + endpoints: + enabled-by-default: false + endpoint: + info: + enabled: true +``` + +| |Disabled endpoints are removed entirely from the application context.
If you want to change only the technologies over which an endpoint is exposed, use the [`include` and `exclude` properties](#actuator.endpoints.exposing) instead.| +|---|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +### 2.2. Exposing Endpoints + +Since Endpoints may contain sensitive information, you should carefully consider when to expose them. +The following table shows the default exposure for the built-in endpoints: + +| ID |JMX|Web| +|------------------|---|---| +| `auditevents` |Yes|No | +| `beans` |Yes|No | +| `caches` |Yes|No | +| `conditions` |Yes|No | +| `configprops` |Yes|No | +| `env` |Yes|No | +| `flyway` |Yes|No | +| `health` |Yes|Yes| +| `heapdump` |N/A|No | +| `httptrace` |Yes|No | +| `info` |Yes|No | +|`integrationgraph`|Yes|No | +| `jolokia` |N/A|No | +| `logfile` |N/A|No | +| `loggers` |Yes|No | +| `liquibase` |Yes|No | +| `metrics` |Yes|No | +| `mappings` |Yes|No | +| `prometheus` |N/A|No | +| `quartz` |Yes|No | +| `scheduledtasks` |Yes|No | +| `sessions` |Yes|No | +| `shutdown` |Yes|No | +| `startup` |Yes|No | +| `threaddump` |Yes|No | + +To change which endpoints are exposed, use the following technology-specific `include` and `exclude` properties: + +| Property |Default | +|-------------------------------------------|--------| +|`management.endpoints.jmx.exposure.exclude`| | +|`management.endpoints.jmx.exposure.include`| `*` | +|`management.endpoints.web.exposure.exclude`| | +|`management.endpoints.web.exposure.include`|`health`| + +The `include` property lists the IDs of the endpoints that are exposed. +The `exclude` property lists the IDs of the endpoints that should not be exposed. +The `exclude` property takes precedence over the `include` property. +You can configure both the `include` and the `exclude` properties with a list of endpoint IDs. + +For example, to stop exposing all endpoints over JMX and only expose the `health` and `info` endpoints, use the following property: + +Properties + +``` +management.endpoints.jmx.exposure.include=health,info +``` + +Yaml + +``` +management: + endpoints: + jmx: + exposure: + include: "health,info" +``` + +`*` can be used to select all endpoints. +For example, to expose everything over HTTP except the `env` and `beans` endpoints, use the following properties: + +Properties + +``` +management.endpoints.web.exposure.include=* +management.endpoints.web.exposure.exclude=env,beans +``` + +Yaml + +``` +management: + endpoints: + web: + exposure: + include: "*" + exclude: "env,beans" +``` + +| |`*` has a special meaning in YAML, so be sure to add quotation marks if you want to include (or exclude) all endpoints.| +|---|-----------------------------------------------------------------------------------------------------------------------| + +| |If your application is exposed publicly, we strongly recommend that you also [secure your endpoints](#actuator.endpoints.security).| +|---|-----------------------------------------------------------------------------------------------------------------------------------| + +| |If you want to implement your own strategy for when endpoints are exposed, you can register an `EndpointFilter` bean.| +|---|---------------------------------------------------------------------------------------------------------------------| + +### 2.3. Security + +For security purposes, all actuators other than `/health` are disabled by default. +You can use the `management.endpoints.web.exposure.include` property to enable the actuators. + +| |Before setting the `management.endpoints.web.exposure.include`, ensure that the exposed actuators do not contain sensitive information, are secured by placing them behind a firewall, or are secured by something like Spring Security.| +|---|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +If Spring Security is on the classpath and no other `WebSecurityConfigurerAdapter` or `SecurityFilterChain` bean is present, all actuators other than `/health` are secured by Spring Boot auto-configuration. +If you define a custom `WebSecurityConfigurerAdapter` or `SecurityFilterChain` bean, Spring Boot auto-configuration backs off and lets you fully control the actuator access rules. + +If you wish to configure custom security for HTTP endpoints (for example, to allow only users with a certain role to access them), Spring Boot provides some convenient `RequestMatcher` objects that you can use in combination with Spring Security. + +A typical Spring Security configuration might look something like the following example: + +``` +import org.springframework.boot.actuate.autoconfigure.security.servlet.EndpointRequest; +import org.springframework.context.annotation.Bean; +import org.springframework.context.annotation.Configuration; +import org.springframework.security.config.annotation.web.builders.HttpSecurity; +import org.springframework.security.web.SecurityFilterChain; + +@Configuration(proxyBeanMethods = false) +public class MySecurityConfiguration { + + @Bean + public SecurityFilterChain securityFilterChain(HttpSecurity http) throws Exception { + http.requestMatcher(EndpointRequest.toAnyEndpoint()) + .authorizeRequests((requests) -> requests.anyRequest().hasRole("ENDPOINT_ADMIN")); + http.httpBasic(); + return http.build(); + } + +} + +``` + +The preceding example uses `EndpointRequest.toAnyEndpoint()` to match a request to any endpoint and then ensures that all have the `ENDPOINT_ADMIN` role. +Several other matcher methods are also available on `EndpointRequest`. +See the API documentation ([HTML](https://docs.spring.io/spring-boot/docs/2.6.4/actuator-api/htmlsingle) or [PDF](https://docs.spring.io/spring-boot/docs/2.6.4/actuator-api/pdf/spring-boot-actuator-web-api.pdf)) for details. + +If you deploy applications behind a firewall, you may prefer that all your actuator endpoints can be accessed without requiring authentication. +You can do so by changing the `management.endpoints.web.exposure.include` property, as follows: + +Properties + +``` +management.endpoints.web.exposure.include=* +``` + +Yaml + +``` +management: + endpoints: + web: + exposure: + include: "*" +``` + +Additionally, if Spring Security is present, you would need to add custom security configuration that allows unauthenticated access to the endpoints, as the following example shows: + +``` +import org.springframework.boot.actuate.autoconfigure.security.servlet.EndpointRequest; +import org.springframework.context.annotation.Bean; +import org.springframework.context.annotation.Configuration; +import org.springframework.security.config.annotation.web.builders.HttpSecurity; +import org.springframework.security.web.SecurityFilterChain; + +@Configuration(proxyBeanMethods = false) +public class MySecurityConfiguration { + + @Bean + public SecurityFilterChain securityFilterChain(HttpSecurity http) throws Exception { + http.requestMatcher(EndpointRequest.toAnyEndpoint()) + .authorizeRequests((requests) -> requests.anyRequest().permitAll()); + return http.build(); + } + +} + +``` + +| |In both of the preceding examples, the configuration applies only to the actuator endpoints.
Since Spring Boot’s security configuration backs off completely in the presence of any `SecurityFilterChain` bean, you need to configure an additional `SecurityFilterChain` bean with rules that apply to the rest of the application.| +|---|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +#### 2.3.1. Cross Site Request Forgery Protection + +Since Spring Boot relies on Spring Security’s defaults, CSRF protection is turned on by default. +This means that the actuator endpoints that require a `POST` (shutdown and loggers endpoints), a `PUT`, or a `DELETE` get a 403 (forbidden) error when the default security configuration is in use. + +| |We recommend disabling CSRF protection completely only if you are creating a service that is used by non-browser clients.| +|---|-------------------------------------------------------------------------------------------------------------------------| + +You can find additional information about CSRF protection in the [Spring Security Reference Guide](https://docs.spring.io/spring-security/reference/5.6.2/features/exploits/csrf.html). + +### 2.4. Configuring Endpoints + +Endpoints automatically cache responses to read operations that do not take any parameters. +To configure the amount of time for which an endpoint caches a response, use its `cache.time-to-live` property. +The following example sets the time-to-live of the `beans` endpoint’s cache to 10 seconds: + +Properties + +``` +management.endpoint.beans.cache.time-to-live=10s +``` + +Yaml + +``` +management: + endpoint: + beans: + cache: + time-to-live: "10s" +``` + +| |The `management.endpoint.` prefix uniquely identifies the endpoint that is being configured.| +|---|--------------------------------------------------------------------------------------------------| + +### 2.5. Hypermedia for Actuator Web Endpoints + +A “discovery page” is added with links to all the endpoints. +The “discovery page” is available on `/actuator` by default. + +To disable the “discovery page”, add the following property to your application properties: + +Properties + +``` +management.endpoints.web.discovery.enabled=false +``` + +Yaml + +``` +management: + endpoints: + web: + discovery: + enabled: false +``` + +When a custom management context path is configured, the “discovery page” automatically moves from `/actuator` to the root of the management context. +For example, if the management context path is `/management`, the discovery page is available from `/management`. +When the management context path is set to `/`, the discovery page is disabled to prevent the possibility of a clash with other mappings. + +### 2.6. CORS Support + +[Cross-origin resource sharing](https://en.wikipedia.org/wiki/Cross-origin_resource_sharing) (CORS) is a [W3C specification](https://www.w3.org/TR/cors/) that lets you specify in a flexible way what kind of cross-domain requests are authorized. +If you use Spring MVC or Spring WebFlux, you can configure Actuator’s web endpoints to support such scenarios. + +CORS support is disabled by default and is only enabled once you have set the `management.endpoints.web.cors.allowed-origins` property. +The following configuration permits `GET` and `POST` calls from the `example.com` domain: + +Properties + +``` +management.endpoints.web.cors.allowed-origins=https://example.com +management.endpoints.web.cors.allowed-methods=GET,POST +``` + +Yaml + +``` +management: + endpoints: + web: + cors: + allowed-origins: "https://example.com" + allowed-methods: "GET,POST" +``` + +| |See [`CorsEndpointProperties`](https://github.com/spring-projects/spring-boot/tree/v2.6.4/spring-boot-project/spring-boot-actuator-autoconfigure/src/main/java/org/springframework/boot/actuate/autoconfigure/endpoint/web/CorsEndpointProperties.java) for a complete list of options.| +|---|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +### 2.7. Implementing Custom Endpoints + +If you add a `@Bean` annotated with `@Endpoint`, any methods annotated with `@ReadOperation`, `@WriteOperation`, or `@DeleteOperation` are automatically exposed over JMX and, in a web application, over HTTP as well. +Endpoints can be exposed over HTTP by using Jersey, Spring MVC, or Spring WebFlux. +If both Jersey and Spring MVC are available, Spring MVC is used. + +The following example exposes a read operation that returns a custom object: + +``` +@ReadOperation +public CustomData getData() { + return new CustomData("test", 5); +} + +``` + +You can also write technology-specific endpoints by using `@JmxEndpoint` or `@WebEndpoint`. +These endpoints are restricted to their respective technologies. +For example, `@WebEndpoint` is exposed only over HTTP and not over JMX. + +You can write technology-specific extensions by using `@EndpointWebExtension` and `@EndpointJmxExtension`. +These annotations let you provide technology-specific operations to augment an existing endpoint. + +Finally, if you need access to web-framework-specific functionality, you can implement servlet or Spring `@Controller` and `@RestController` endpoints at the cost of them not being available over JMX or when using a different web framework. + +#### 2.7.1. Receiving Input + +Operations on an endpoint receive input through their parameters. +When exposed over the web, the values for these parameters are taken from the URL’s query parameters and from the JSON request body. +When exposed over JMX, the parameters are mapped to the parameters of the MBean’s operations. +Parameters are required by default. +They can be made optional by annotating them with either `@javax.annotation.Nullable` or `@org.springframework.lang.Nullable`. + +You can map each root property in the JSON request body to a parameter of the endpoint. +Consider the following JSON request body: + +``` +{ + "name": "test", + "counter": 42 +} +``` + +You can use this to invoke a write operation that takes `String name` and `int counter` parameters, as the following example shows: + +``` +@WriteOperation +public void updateData(String name, int counter) { + // injects "test" and 42 +} + +``` + +| |Because endpoints are technology agnostic, only simple types can be specified in the method signature.
In particular, declaring a single parameter with a `CustomData` type that defines a `name` and `counter` properties is not supported.| +|---|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +| |To let the input be mapped to the operation method’s parameters, Java code that implements an endpoint should be compiled with `-parameters`, and Kotlin code that implements an endpoint should be compiled with `-java-parameters`.
This will happen automatically if you use Spring Boot’s Gradle plugin or if you use Maven and `spring-boot-starter-parent`.| +|---|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +##### Input Type Conversion + +The parameters passed to endpoint operation methods are, if necessary, automatically converted to the required type. +Before calling an operation method, the input received over JMX or HTTP is converted to the required types by using an instance of `ApplicationConversionService` as well as any `Converter` or `GenericConverter` beans qualified with `@EndpointConverter`. + +#### 2.7.2. Custom Web Endpoints + +Operations on an `@Endpoint`, `@WebEndpoint`, or `@EndpointWebExtension` are automatically exposed over HTTP using Jersey, Spring MVC, or Spring WebFlux. +If both Jersey and Spring MVC are available, Spring MVC is used. + +##### Web Endpoint Request Predicates + +A request predicate is automatically generated for each operation on a web-exposed endpoint. + +##### Path + +The path of the predicate is determined by the ID of the endpoint and the base path of the web-exposed endpoints. +The default base path is `/actuator`. +For example, an endpoint with an ID of `sessions` uses `/actuator/sessions` as its path in the predicate. + +You can further customize the path by annotating one or more parameters of the operation method with `@Selector`. +Such a parameter is added to the path predicate as a path variable. +The variable’s value is passed into the operation method when the endpoint operation is invoked. +If you want to capture all remaining path elements, you can add `@Selector(Match=ALL_REMAINING)` to the last parameter and make it a type that is conversion-compatible with a `String[]`. + +##### HTTP method + +The HTTP method of the predicate is determined by the operation type, as shown in the following table: + +| Operation |HTTP method| +|------------------|-----------| +| `@ReadOperation` | `GET` | +|`@WriteOperation` | `POST` | +|`@DeleteOperation`| `DELETE` | + +##### Consumes + +For a `@WriteOperation` (HTTP `POST`) that uses the request body, the `consumes` clause of the predicate is `application/vnd.spring-boot.actuator.v2+json, application/json`. +For all other operations, the `consumes` clause is empty. + +##### Produces + +The `produces` clause of the predicate can be determined by the `produces` attribute of the `@DeleteOperation`, `@ReadOperation`, and `@WriteOperation` annotations. +The attribute is optional. +If it is not used, the `produces` clause is determined automatically. + +If the operation method returns `void` or `Void`, the `produces` clause is empty. +If the operation method returns a `org.springframework.core.io.Resource`, the `produces` clause is `application/octet-stream`. +For all other operations, the `produces` clause is `application/vnd.spring-boot.actuator.v2+json, application/json`. + +##### Web Endpoint Response Status + +The default response status for an endpoint operation depends on the operation type (read, write, or delete) and what, if anything, the operation returns. + +If a `@ReadOperation` returns a value, the response status will be 200 (OK). +If it does not return a value, the response status will be 404 (Not Found). + +If a `@WriteOperation` or `@DeleteOperation` returns a value, the response status will be 200 (OK). +If it does not return a value, the response status will be 204 (No Content). + +If an operation is invoked without a required parameter or with a parameter that cannot be converted to the required type, the operation method is not called, and the response status will be 400 (Bad Request). + +##### Web Endpoint Range Requests + +You can use an HTTP range request to request part of an HTTP resource. +When using Spring MVC or Spring Web Flux, operations that return a `org.springframework.core.io.Resource` automatically support range requests. + +| |Range requests are not supported when using Jersey.| +|---|---------------------------------------------------| + +##### Web Endpoint Security + +An operation on a web endpoint or a web-specific endpoint extension can receive the current `java.security.Principal` or `org.springframework.boot.actuate.endpoint.SecurityContext` as a method parameter. +The former is typically used in conjunction with `@Nullable` to provide different behavior for authenticated and unauthenticated users. +The latter is typically used to perform authorization checks by using its `isUserInRole(String)` method. + +#### 2.7.3. Servlet Endpoints + +A servlet can be exposed as an endpoint by implementing a class annotated with `@ServletEndpoint` that also implements `Supplier`. +Servlet endpoints provide deeper integration with the servlet container but at the expense of portability. +They are intended to be used to expose an existing servlet as an endpoint. +For new endpoints, the `@Endpoint` and `@WebEndpoint` annotations should be preferred whenever possible. + +#### 2.7.4. Controller Endpoints + +You can use `@ControllerEndpoint` and `@RestControllerEndpoint` to implement an endpoint that is exposed only by Spring MVC or Spring WebFlux. +Methods are mapped by using the standard annotations for Spring MVC and Spring WebFlux, such as `@RequestMapping` and `@GetMapping`, with the endpoint’s ID being used as a prefix for the path. +Controller endpoints provide deeper integration with Spring’s web frameworks but at the expense of portability. +The `@Endpoint` and `@WebEndpoint` annotations should be preferred whenever possible. + +### 2.8. Health Information + +You can use health information to check the status of your running application. +It is often used by monitoring software to alert someone when a production system goes down. +The information exposed by the `health` endpoint depends on the `management.endpoint.health.show-details` and `management.endpoint.health.show-components` properties, which can be configured with one of the following values: + +| Name | Description | +|-----------------|-------------------------------------------------------------------------------------------------------------------------------| +| `never` | Details are never shown. | +|`when-authorized`|Details are shown only to authorized users.
Authorized roles can be configured by using `management.endpoint.health.roles`.| +| `always` | Details are shown to all users. | + +The default value is `never`. +A user is considered to be authorized when they are in one or more of the endpoint’s roles. +If the endpoint has no configured roles (the default), all authenticated users are considered to be authorized. +You can configure the roles by using the `management.endpoint.health.roles` property. + +| |If you have secured your application and wish to use `always`, your security configuration must permit access to the health endpoint for both authenticated and unauthenticated users.| +|---|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +Health information is collected from the content of a [`HealthContributorRegistry`](https://github.com/spring-projects/spring-boot/tree/v2.6.4/spring-boot-project/spring-boot-actuator/src/main/java/org/springframework/boot/actuate/health/HealthContributorRegistry.java) (by default, all [`HealthContributor`](https://github.com/spring-projects/spring-boot/tree/v2.6.4/spring-boot-project/spring-boot-actuator/src/main/java/org/springframework/boot/actuate/health/HealthContributor.java) instances defined in your `ApplicationContext`). +Spring Boot includes a number of auto-configured `HealthContributors`, and you can also write your own. + +A `HealthContributor` can be either a `HealthIndicator` or a `CompositeHealthContributor`. +A `HealthIndicator` provides actual health information, including a `Status`. +A `CompositeHealthContributor` provides a composite of other `HealthContributors`. +Taken together, contributors form a tree structure to represent the overall system health. + +By default, the final system health is derived by a `StatusAggregator`, which sorts the statuses from each `HealthIndicator` based on an ordered list of statuses. +The first status in the sorted list is used as the overall health status. +If no `HealthIndicator` returns a status that is known to the `StatusAggregator`, an `UNKNOWN` status is used. + +| |You can use the `HealthContributorRegistry` to register and unregister health indicators at runtime.| +|---|----------------------------------------------------------------------------------------------------| + +#### 2.8.1. Auto-configured HealthIndicators + +When appropriate, Spring Boot auto-configures the `HealthIndicators` listed in the following table. +You can also enable or disable selected indicators by configuring `management.health.key.enabled`, +with the `key` listed in the following table: + +| Key | Name | Description | +|---------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|---------------------------------------------------------| +| `cassandra` | [`CassandraDriverHealthIndicator`](https://github.com/spring-projects/spring-boot/tree/v2.6.4/spring-boot-project/spring-boot-actuator/src/main/java/org/springframework/boot/actuate/cassandra/CassandraDriverHealthIndicator.java) | Checks that a Cassandra database is up. | +| `couchbase` | [`CouchbaseHealthIndicator`](https://github.com/spring-projects/spring-boot/tree/v2.6.4/spring-boot-project/spring-boot-actuator/src/main/java/org/springframework/boot/actuate/couchbase/CouchbaseHealthIndicator.java) | Checks that a Couchbase cluster is up. | +| `db` | [`DataSourceHealthIndicator`](https://github.com/spring-projects/spring-boot/tree/v2.6.4/spring-boot-project/spring-boot-actuator/src/main/java/org/springframework/boot/actuate/jdbc/DataSourceHealthIndicator.java) |Checks that a connection to `DataSource` can be obtained.| +| `diskspace` | [`DiskSpaceHealthIndicator`](https://github.com/spring-projects/spring-boot/tree/v2.6.4/spring-boot-project/spring-boot-actuator/src/main/java/org/springframework/boot/actuate/system/DiskSpaceHealthIndicator.java) | Checks for low disk space. | +|`elasticsearch`|[`ElasticsearchRestHealthIndicator`](https://github.com/spring-projects/spring-boot/tree/v2.6.4/spring-boot-project/spring-boot-actuator/src/main/java/org/springframework/boot/actuate/elasticsearch/ElasticsearchRestHealthIndicator.java)| Checks that an Elasticsearch cluster is up. | +| `hazelcast` | [`HazelcastHealthIndicator`](https://github.com/spring-projects/spring-boot/tree/v2.6.4/spring-boot-project/spring-boot-actuator/src/main/java/org/springframework/boot/actuate/hazelcast/HazelcastHealthIndicator.java) | Checks that a Hazelcast server is up. | +| `influxdb` | [`InfluxDbHealthIndicator`](https://github.com/spring-projects/spring-boot/tree/v2.6.4/spring-boot-project/spring-boot-actuator/src/main/java/org/springframework/boot/actuate/influx/InfluxDbHealthIndicator.java) | Checks that an InfluxDB server is up. | +| `jms` | [`JmsHealthIndicator`](https://github.com/spring-projects/spring-boot/tree/v2.6.4/spring-boot-project/spring-boot-actuator/src/main/java/org/springframework/boot/actuate/jms/JmsHealthIndicator.java) | Checks that a JMS broker is up. | +| `ldap` | [`LdapHealthIndicator`](https://github.com/spring-projects/spring-boot/tree/v2.6.4/spring-boot-project/spring-boot-actuator/src/main/java/org/springframework/boot/actuate/ldap/LdapHealthIndicator.java) | Checks that an LDAP server is up. | +| `mail` | [`MailHealthIndicator`](https://github.com/spring-projects/spring-boot/tree/v2.6.4/spring-boot-project/spring-boot-actuator/src/main/java/org/springframework/boot/actuate/mail/MailHealthIndicator.java) | Checks that a mail server is up. | +| `mongo` | [`MongoHealthIndicator`](https://github.com/spring-projects/spring-boot/tree/v2.6.4/spring-boot-project/spring-boot-actuator/src/main/java/org/springframework/boot/actuate/mongo/MongoHealthIndicator.java) | Checks that a Mongo database is up. | +| `neo4j` | [`Neo4jHealthIndicator`](https://github.com/spring-projects/spring-boot/tree/v2.6.4/spring-boot-project/spring-boot-actuator/src/main/java/org/springframework/boot/actuate/neo4j/Neo4jHealthIndicator.java) | Checks that a Neo4j database is up. | +| `ping` | [`PingHealthIndicator`](https://github.com/spring-projects/spring-boot/tree/v2.6.4/spring-boot-project/spring-boot-actuator/src/main/java/org/springframework/boot/actuate/health/PingHealthIndicator.java) | Always responds with `UP`. | +| `rabbit` | [`RabbitHealthIndicator`](https://github.com/spring-projects/spring-boot/tree/v2.6.4/spring-boot-project/spring-boot-actuator/src/main/java/org/springframework/boot/actuate/amqp/RabbitHealthIndicator.java) | Checks that a Rabbit server is up. | +| `redis` | [`RedisHealthIndicator`](https://github.com/spring-projects/spring-boot/tree/v2.6.4/spring-boot-project/spring-boot-actuator/src/main/java/org/springframework/boot/actuate/redis/RedisHealthIndicator.java) | Checks that a Redis server is up. | +| `solr` | [`SolrHealthIndicator`](https://github.com/spring-projects/spring-boot/tree/v2.6.4/spring-boot-project/spring-boot-actuator/src/main/java/org/springframework/boot/actuate/solr/SolrHealthIndicator.java) | Checks that a Solr server is up. | + +| |You can disable them all by setting the `management.health.defaults.enabled` property.| +|---|--------------------------------------------------------------------------------------| + +Additional `HealthIndicators` are available but are not enabled by default: + +| Key | Name | Description | +|----------------|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|-------------------------------------------------------| +|`livenessstate` | [`LivenessStateHealthIndicator`](https://github.com/spring-projects/spring-boot/tree/v2.6.4/spring-boot-project/spring-boot-actuator/src/main/java/org/springframework/boot/actuate/availability/LivenessStateHealthIndicator.java) |Exposes the “Liveness” application availability state. | +|`readinessstate`|[`ReadinessStateHealthIndicator`](https://github.com/spring-projects/spring-boot/tree/v2.6.4/spring-boot-project/spring-boot-actuator/src/main/java/org/springframework/boot/actuate/availability/ReadinessStateHealthIndicator.java)|Exposes the “Readiness” application availability state.| + +#### 2.8.2. Writing Custom HealthIndicators + +To provide custom health information, you can register Spring beans that implement the [`HealthIndicator`](https://github.com/spring-projects/spring-boot/tree/v2.6.4/spring-boot-project/spring-boot-actuator/src/main/java/org/springframework/boot/actuate/health/HealthIndicator.java) interface. +You need to provide an implementation of the `health()` method and return a `Health` response. +The `Health` response should include a status and can optionally include additional details to be displayed. +The following code shows a sample `HealthIndicator` implementation: + +``` +import org.springframework.boot.actuate.health.Health; +import org.springframework.boot.actuate.health.HealthIndicator; +import org.springframework.stereotype.Component; + +@Component +public class MyHealthIndicator implements HealthIndicator { + + @Override + public Health health() { + int errorCode = check(); + if (errorCode != 0) { + return Health.down().withDetail("Error Code", errorCode).build(); + } + return Health.up().build(); + } + + private int check() { + // perform some specific health check + return ... + } + +} + +``` + +| |The identifier for a given `HealthIndicator` is the name of the bean without the `HealthIndicator` suffix, if it exists.
In the preceding example, the health information is available in an entry named `my`.| +|---|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +In addition to Spring Boot’s predefined [`Status`](https://github.com/spring-projects/spring-boot/tree/v2.6.4/spring-boot-project/spring-boot-actuator/src/main/java/org/springframework/boot/actuate/health/Status.java) types, `Health` can return a custom `Status` that represents a new system state. +In such cases, you also need to provide a custom implementation of the [`StatusAggregator`](https://github.com/spring-projects/spring-boot/tree/v2.6.4/spring-boot-project/spring-boot-actuator/src/main/java/org/springframework/boot/actuate/health/StatusAggregator.java) interface, or you must configure the default implementation by using the `management.endpoint.health.status.order` configuration property. + +For example, assume a new `Status` with a code of `FATAL` is being used in one of your `HealthIndicator` implementations. +To configure the severity order, add the following property to your application properties: + +Properties + +``` +management.endpoint.health.status.order=fatal,down,out-of-service,unknown,up +``` + +Yaml + +``` +management: + endpoint: + health: + status: + order: "fatal,down,out-of-service,unknown,up" +``` + +The HTTP status code in the response reflects the overall health status. +By default, `OUT_OF_SERVICE` and `DOWN` map to 503. +Any unmapped health statuses, including `UP`, map to 200. +You might also want to register custom status mappings if you access the health endpoint over HTTP. +Configuring a custom mapping disables the defaults mappings for `DOWN` and `OUT_OF_SERVICE`. +If you want to retain the default mappings, you must explicitly configure them, alongside any custom mappings. +For example, the following property maps `FATAL` to 503 (service unavailable) and retains the default mappings for `DOWN` and `OUT_OF_SERVICE`: + +Properties + +``` +management.endpoint.health.status.http-mapping.down=503 +management.endpoint.health.status.http-mapping.fatal=503 +management.endpoint.health.status.http-mapping.out-of-service=503 +``` + +Yaml + +``` +management: + endpoint: + health: + status: + http-mapping: + down: 503 + fatal: 503 + out-of-service: 503 +``` + +| |If you need more control, you can define your own `HttpCodeStatusMapper` bean.| +|---|------------------------------------------------------------------------------| + +The following table shows the default status mappings for the built-in statuses: + +| Status | Mapping | +|----------------|----------------------------------------------| +| `DOWN` | `SERVICE_UNAVAILABLE` (`503`) | +|`OUT_OF_SERVICE`| `SERVICE_UNAVAILABLE` (`503`) | +| `UP` |No mapping by default, so HTTP status is `200`| +| `UNKNOWN` |No mapping by default, so HTTP status is `200`| + +#### 2.8.3. Reactive Health Indicators + +For reactive applications, such as those that use Spring WebFlux, `ReactiveHealthContributor` provides a non-blocking contract for getting application health. +Similar to a traditional `HealthContributor`, health information is collected from the content of a [`ReactiveHealthContributorRegistry`](https://github.com/spring-projects/spring-boot/tree/v2.6.4/spring-boot-project/spring-boot-actuator/src/main/java/org/springframework/boot/actuate/health/ReactiveHealthContributorRegistry.java) (by default, all [`HealthContributor`](https://github.com/spring-projects/spring-boot/tree/v2.6.4/spring-boot-project/spring-boot-actuator/src/main/java/org/springframework/boot/actuate/health/HealthContributor.java) and [`ReactiveHealthContributor`](https://github.com/spring-projects/spring-boot/tree/v2.6.4/spring-boot-project/spring-boot-actuator/src/main/java/org/springframework/boot/actuate/health/ReactiveHealthContributor.java) instances defined in your `ApplicationContext`). +Regular `HealthContributors` that do not check against a reactive API are executed on the elastic scheduler. + +| |In a reactive application, you should use the `ReactiveHealthContributorRegistry` to register and unregister health indicators at runtime.
If you need to register a regular `HealthContributor`, you should wrap it with `ReactiveHealthContributor#adapt`.| +|---|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +To provide custom health information from a reactive API, you can register Spring beans that implement the [`ReactiveHealthIndicator`](https://github.com/spring-projects/spring-boot/tree/v2.6.4/spring-boot-project/spring-boot-actuator/src/main/java/org/springframework/boot/actuate/health/ReactiveHealthIndicator.java) interface. +The following code shows a sample `ReactiveHealthIndicator` implementation: + +``` +import reactor.core.publisher.Mono; + +import org.springframework.boot.actuate.health.Health; +import org.springframework.boot.actuate.health.ReactiveHealthIndicator; +import org.springframework.stereotype.Component; + +@Component +public class MyReactiveHealthIndicator implements ReactiveHealthIndicator { + + @Override + public Mono health() { + return doHealthCheck().onErrorResume((exception) -> + Mono.just(new Health.Builder().down(exception).build())); + } + + private Mono doHealthCheck() { + // perform some specific health check + return ... + } + +} + +``` + +| |To handle the error automatically, consider extending from `AbstractReactiveHealthIndicator`.| +|---|---------------------------------------------------------------------------------------------| + +#### 2.8.4. Auto-configured ReactiveHealthIndicators #### + +When appropriate, Spring Boot auto-configures the following `ReactiveHealthIndicators`: + +| Key | Name | Description | +|---------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|-------------------------------------------| +| `cassandra` |[`CassandraDriverReactiveHealthIndicator`](https://github.com/spring-projects/spring-boot/tree/v2.6.4/spring-boot-project/spring-boot-actuator/src/main/java/org/springframework/boot/actuate/cassandra/CassandraDriverReactiveHealthIndicator.java)| Checks that a Cassandra database is up. | +| `couchbase` | [`CouchbaseReactiveHealthIndicator`](https://github.com/spring-projects/spring-boot/tree/v2.6.4/spring-boot-project/spring-boot-actuator/src/main/java/org/springframework/boot/actuate/couchbase/CouchbaseReactiveHealthIndicator.java) | Checks that a Couchbase cluster is up. | +|`elasticsearch`|[`ElasticsearchReactiveHealthIndicator`](https://github.com/spring-projects/spring-boot/tree/v2.6.4/spring-boot-project/spring-boot-actuator/src/main/java/org/springframework/boot/actuate/elasticsearch/ElasticsearchReactiveHealthIndicator.java)|Checks that an Elasticsearch cluster is up.| +| `mongo` | [`MongoReactiveHealthIndicator`](https://github.com/spring-projects/spring-boot/tree/v2.6.4/spring-boot-project/spring-boot-actuator/src/main/java/org/springframework/boot/actuate/mongo/MongoReactiveHealthIndicator.java) | Checks that a Mongo database is up. | +| `neo4j` | [`Neo4jReactiveHealthIndicator`](https://github.com/spring-projects/spring-boot/tree/v2.6.4/spring-boot-project/spring-boot-actuator/src/main/java/org/springframework/boot/actuate/neo4j/Neo4jReactiveHealthIndicator.java) | Checks that a Neo4j database is up. | +| `redis` | [`RedisReactiveHealthIndicator`](https://github.com/spring-projects/spring-boot/tree/v2.6.4/spring-boot-project/spring-boot-actuator/src/main/java/org/springframework/boot/actuate/redis/RedisReactiveHealthIndicator.java) | Checks that a Redis server is up. | + +| |If necessary, reactive indicators replace the regular ones.
Also, any `HealthIndicator` that is not handled explicitly is wrapped automatically.| +|---|----------------------------------------------------------------------------------------------------------------------------------------------------| + +#### 2.8.5. Health Groups + +It is sometimes useful to organize health indicators into groups that you can use for different purposes. + +To create a health indicator group, you can use the `management.endpoint.health.group.` property and specify a list of health indicator IDs to `include` or `exclude`. +For example, to create a group that includes only database indicators you can define the following: + +Properties + +``` +management.endpoint.health.group.custom.include=db +``` + +Yaml + +``` +management: + endpoint: + health: + group: + custom: + include: "db" +``` + +You can then check the result by hitting `[localhost:8080/actuator/health/custom](http://localhost:8080/actuator/health/custom)`. + +Similarly, to create a group that excludes the database indicators from the group and includes all the other indicators, you can define the following: + +Properties + +``` +management.endpoint.health.group.custom.exclude=db +``` + +Yaml + +``` +management: + endpoint: + health: + group: + custom: + exclude: "db" +``` + +By default, groups inherit the same `StatusAggregator` and `HttpCodeStatusMapper` settings as the system health. +However, you can also define these on a per-group basis. +You can also override the `show-details` and `roles` properties if required: + +Properties + +``` +management.endpoint.health.group.custom.show-details=when-authorized +management.endpoint.health.group.custom.roles=admin +management.endpoint.health.group.custom.status.order=fatal,up +management.endpoint.health.group.custom.status.http-mapping.fatal=500 +management.endpoint.health.group.custom.status.http-mapping.out-of-service=500 +``` + +Yaml + +``` +management: + endpoint: + health: + group: + custom: + show-details: "when-authorized" + roles: "admin" + status: + order: "fatal,up" + http-mapping: + fatal: 500 + out-of-service: 500 +``` + +| |You can use `@Qualifier("groupname")` if you need to register custom `StatusAggregator` or `HttpCodeStatusMapper` beans for use with the group.| +|---|-----------------------------------------------------------------------------------------------------------------------------------------------| + +A health group can also include/exclude a `CompositeHealthContributor`. +You can also include/exclude only a certain component of a `CompositeHealthContributor`. +This can be done using the fully qualified name of the component as follows: + +``` +management.endpoint.health.group.custom.include="test/primary" +management.endpoint.health.group.custom.exclude="test/primary/b" +``` + +In the example above, the `custom` group will include the `HealthContributor` with the name `primary` which is a component of the composite `test`. +Here, `primary` itself is a composite and the `HealthContributor` with the name `b` will be excluded from the `custom` group. + +Health groups can be made available at an additional path on either the main or management port. +This is useful in cloud environments such as Kubernetes, where it is quite common to use a separate management port for the actuator endpoints for security purposes. +Having a separate port could lead to unreliable health checks because the main application might not work properly even if the health check is successful. +The health group can be configured with an additional path as follows: + +``` +management.endpoint.health.group.live.additional-path="server:/healthz" +``` + +This would make the `live` health group available on the main server port at `/healthz`. +The prefix is mandatory and must be either `server:` (represents the main server port) or `management:` (represents the management port, if configured.) +The path must be a single path segment. + +#### 2.8.6. DataSource Health + +The `DataSource` health indicator shows the health of both standard data sources and routing data source beans. +The health of a routing data source includes the health of each of its target data sources. +In the health endpoint’s response, each of a routing data source’s targets is named by using its routing key. +If you prefer not to include routing data sources in the indicator’s output, set `management.health.db.ignore-routing-data-sources` to `true`. + +### 2.9. Kubernetes Probes + +Applications deployed on Kubernetes can provide information about their internal state with [Container Probes](https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#container-probes). +Depending on [your Kubernetes configuration](https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/), the kubelet calls those probes and reacts to the result. + +By default, Spring Boot manages your [Application Availability State](features.html#features.spring-application.application-availability). +If deployed in a Kubernetes environment, actuator gathers the “Liveness” and “Readiness” information from the `ApplicationAvailability` interface and uses that information in dedicated [health indicators](#actuator.endpoints.health.auto-configured-health-indicators): `LivenessStateHealthIndicator` and `ReadinessStateHealthIndicator`. +These indicators are shown on the global health endpoint (`"/actuator/health"`). +They are also exposed as separate HTTP Probes by using [health groups](#actuator.endpoints.health.groups): `"/actuator/health/liveness"` and `"/actuator/health/readiness"`. + +You can then configure your Kubernetes infrastructure with the following endpoint information: + +``` +livenessProbe: + httpGet: + path: "/actuator/health/liveness" + port: + failureThreshold: ... + periodSeconds: ... + +readinessProbe: + httpGet: + path: "/actuator/health/readiness" + port: + failureThreshold: ... + periodSeconds: ... +``` + +| |`` should be set to the port that the actuator endpoints are available on.
It could be the main web server port or a separate management port if the `"management.server.port"` property has been set.| +|---|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +These health groups are automatically enabled only if the application [runs in a Kubernetes environment](deployment.html#deployment.cloud.kubernetes). +You can enable them in any environment by using the `management.endpoint.health.probes.enabled` configuration property. + +| |If an application takes longer to start than the configured liveness period, Kubernetes mentions the `"startupProbe"` as a possible solution.
The `"startupProbe"` is not necessarily needed here, as the `"readinessProbe"` fails until all startup tasks are done. See the section that describes [how probes behave during the application lifecycle](#actuator.endpoints.kubernetes-probes.lifecycle).| +|---|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +If your Actuator endpoints are deployed on a separate management context, the endpoints do not use the same web infrastructure (port, connection pools, framework components) as the main application. +In this case, a probe check could be successful even if the main application does not work properly (for example, it cannot accept new connections). +For this reason, is it a good idea to make the `liveness` and `readiness` health groups available on the main server port. +This can be done by setting the following property: + +``` +management.endpoint.health.probes.add-additional-paths=true +``` + +This would make `liveness` available at `/livez` and `readiness` at `readyz` on the main server port. + +#### 2.9.1. Checking External State with Kubernetes Probes #### + +Actuator configures the “liveness” and “readiness” probes as Health Groups. +This means that all the [health groups features](#actuator.endpoints.health.groups) are available for them. +You can, for example, configure additional Health Indicators: + +Properties + +``` +management.endpoint.health.group.readiness.include=readinessState,customCheck +``` + +Yaml + +``` +management: + endpoint: + health: + group: + readiness: + include: "readinessState,customCheck" +``` + +By default, Spring Boot does not add other health indicators to these groups. + +The “liveness” probe should not depend on health checks for external systems. +If the [liveness state of an application](features.html#features.spring-application.application-availability.liveness) is broken, Kubernetes tries to solve that problem by restarting the application instance. +This means that if an external system (such as a database, a Web API, or an external cache) fails, Kubernetes might restart all application instances and create cascading failures. + +As for the “readiness” probe, the choice of checking external systems must be made carefully by the application developers. +For this reason, Spring Boot does not include any additional health checks in the readiness probe. +If the [readiness state of an application instance](features.html#features.spring-application.application-availability.readiness) is unready, Kubernetes does not route traffic to that instance. +Some external systems might not be shared by application instances, in which case they could be included in a readiness probe. +Other external systems might not be essential to the application (the application could have circuit breakers and fallbacks), in which case they definitely should not be included. +Unfortunately, an external system that is shared by all application instances is common, and you have to make a judgement call: Include it in the readiness probe and expect that the application is taken out of service when the external service is down or leave it out and deal with failures higher up the stack, perhaps by using a circuit breaker in the caller. + +| |If all instances of an application are unready, a Kubernetes Service with `type=ClusterIP` or `NodePort` does not accept any incoming connections.
There is no HTTP error response (503 and so on), since there is no connection.
A service with `type=LoadBalancer` might or might not accept connections, depending on the provider.
A service that has an explicit [ingress](https://kubernetes.io/docs/concepts/services-networking/ingress/) also responds in a way that depends on the implementation — the ingress service itself has to decide how to handle the “connection refused” from downstream.
HTTP 503 is quite likely in the case of both load balancer and ingress.| +|---|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +Also, if an application uses Kubernetes [autoscaling](https://kubernetes.io/docs/tasks/run-application/horizontal-pod-autoscale/), it may react differently to applications being taken out of the load-balancer, depending on its autoscaler configuration. + +#### 2.9.2. Application Lifecycle and Probe States + +An important aspect of the Kubernetes Probes support is its consistency with the application lifecycle. +There is a significant difference between the `AvailabilityState` (which is the in-memory, internal state of the application) +and the actual probe (which exposes that state). +Depending on the phase of application lifecycle, the probe might not be available. + +Spring Boot publishes [application events during startup and shutdown](features.html#features.spring-application.application-events-and-listeners), +and probes can listen to such events and expose the `AvailabilityState` information. + +The following tables show the `AvailabilityState` and the state of HTTP connectors at different stages. + +When a Spring Boot application starts: + +|Startup phase|LivenessState| ReadinessState | HTTP server | Notes | +|-------------|-------------|-------------------|----------------|--------------------------------------------------------------------------------------------------------------| +| Starting | `BROKEN` |`REFUSING_TRAFFIC` | Not started | Kubernetes checks the "liveness" Probe and restarts the application if it takes too long. | +| Started | `CORRECT` |`REFUSING_TRAFFIC` |Refuses requests|The application context is refreshed. The application performs startup tasks and does not receive traffic yet.| +| Ready | `CORRECT` |`ACCEPTING_TRAFFIC`|Accepts requests| Startup tasks are finished. The application is receiving traffic. | + +When a Spring Boot application shuts down: + +| Shutdown phase |Liveness State| Readiness State | HTTP server | Notes | +|-----------------|--------------|-------------------|-------------------------|---------------------------------------------------------------------------------------------| +| Running | `CORRECT` |`ACCEPTING_TRAFFIC`| Accepts requests | Shutdown has been requested. | +|Graceful shutdown| `CORRECT` |`REFUSING_TRAFFIC` |New requests are rejected|If enabled, [graceful shutdown processes in-flight requests](web.html#web.graceful-shutdown).| +|Shutdown complete| N/A | N/A | Server is shut down | The application context is closed and the application is shut down. | + +| |See [Kubernetes container lifecycle section](deployment.html#deployment.cloud.kubernetes.container-lifecycle) for more information about Kubernetes deployment.| +|---|---------------------------------------------------------------------------------------------------------------------------------------------------------------| + +### 2.10. Application Information + +Application information exposes various information collected from all [`InfoContributor`](https://github.com/spring-projects/spring-boot/tree/v2.6.4/spring-boot-project/spring-boot-actuator/src/main/java/org/springframework/boot/actuate/info/InfoContributor.java) beans defined in your `ApplicationContext`. +Spring Boot includes a number of auto-configured `InfoContributor` beans, and you can write your own. + +#### 2.10.1. Auto-configured InfoContributors + +When appropriate, Spring auto-configures the following `InfoContributor` beans: + +| ID | Name | Description | Prerequisites | +|-------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|---------------------------------------------------------------------------|--------------------------------------------| +|`build`| [`BuildInfoContributor`](https://github.com/spring-projects/spring-boot/tree/v2.6.4/spring-boot-project/spring-boot-actuator/src/main/java/org/springframework/boot/actuate/info/BuildInfoContributor.java) | Exposes build information. |A `META-INF/build-info.properties` resource.| +| `env` |[`EnvironmentInfoContributor`](https://github.com/spring-projects/spring-boot/tree/v2.6.4/spring-boot-project/spring-boot-actuator/src/main/java/org/springframework/boot/actuate/info/EnvironmentInfoContributor.java)|Exposes any property from the `Environment` whose name starts with `info.`.| None. | +| `git` | [`GitInfoContributor`](https://github.com/spring-projects/spring-boot/tree/v2.6.4/spring-boot-project/spring-boot-actuator/src/main/java/org/springframework/boot/actuate/info/GitInfoContributor.java) | Exposes git information. | A `git.properties` resource. | +|`java` | [`JavaInfoContributor`](https://github.com/spring-projects/spring-boot/tree/v2.6.4/spring-boot-project/spring-boot-actuator/src/main/java/org/springframework/boot/actuate/info/JavaInfoContributor.java) | Exposes Java runtime information. | None. | + +Whether or not an individual contributor is enabled is controlled by its `management.info..enabled` property. +Different contributors have different defaults for this property, depending on their prerequisites and the nature of the information that they expose. + +With no prerequisites to indicate that they should be enabled, the `env` and `java` contributors are disabled by default. +You can enable them by setting the `management.info.env.enabled` or `management.info.java.enabled` properties to `true`. + +The `build` and `git` info contributors are enabled by default. +Each can be disabled by setting its `management.info..enabled` property to `false`. +Alternatively, to disable every contributor that is usually enabled by default, set the `management.info.defaults.enabled` property to `false`. + +#### 2.10.2. Custom Application Information + +When the `env` contributor is enabled, you can customize the data exposed by the `info` endpoint by setting `info.*` Spring properties. +All `Environment` properties under the `info` key are automatically exposed. +For example, you could add the following settings to your `application.properties` file: + +Properties + +``` +info.app.encoding=UTF-8 +info.app.java.source=11 +info.app.java.target=11 +``` + +Yaml + +``` +info: + app: + encoding: "UTF-8" + java: + source: "11" + target: "11" +``` + +| |Rather than hardcoding those values, you could also [expand info properties at build time](howto.html#howto.properties-and-configuration.expand-properties).

Assuming you use Maven, you could rewrite the preceding example as follows:

Properties

```
[email protected]@
[email protected]@
[email protected]@
```

Yaml

```
info:
app:
encoding: "@[email protected]"
java:
source: "@[email protected]"
target: "@[email protected]"
```| +|---|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +#### 2.10.3. Git Commit Information + +Another useful feature of the `info` endpoint is its ability to publish information about the state of your `git` source code repository when the project was built. +If a `GitProperties` bean is available, you can use the `info` endpoint to expose these properties. + +| |A `GitProperties` bean is auto-configured if a `git.properties` file is available at the root of the classpath.
See "[how to generate git information](howto.html#howto.build.generate-git-info)" for more detail.| +|---|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +By default, the endpoint exposes `git.branch`, `git.commit.id`, and `git.commit.time` properties, if present. +If you do not want any of these properties in the endpoint response, they need to be excluded from the `git.properties` file. +If you want to display the full git information (that is, the full content of `git.properties`), use the `management.info.git.mode` property, as follows: + +Properties + +``` +management.info.git.mode=full +``` + +Yaml + +``` +management: + info: + git: + mode: "full" +``` + +To disable the git commit information from the `info` endpoint completely, set the `management.info.git.enabled` property to `false`, as follows: + +Properties + +``` +management.info.git.enabled=false +``` + +Yaml + +``` +management: + info: + git: + enabled: false +``` + +#### 2.10.4. Build Information + +If a `BuildProperties` bean is available, the `info` endpoint can also publish information about your build. +This happens if a `META-INF/build-info.properties` file is available in the classpath. + +| |The Maven and Gradle plugins can both generate that file.
See "[how to generate build information](howto.html#howto.build.generate-info)" for more details.| +|---|---------------------------------------------------------------------------------------------------------------------------------------------------------------| + +#### 2.10.5. Java Information + +The `info` endpoint publishes information about your Java runtime environment, see [`JavaInfo`](https://docs.spring.io/spring-boot/docs/2.6.4/api/org/springframework/boot/info/JavaInfo.html) for more details. + +#### 2.10.6. Writing Custom InfoContributors + +To provide custom application information, you can register Spring beans that implement the [`InfoContributor`](https://github.com/spring-projects/spring-boot/tree/v2.6.4/spring-boot-project/spring-boot-actuator/src/main/java/org/springframework/boot/actuate/info/InfoContributor.java) interface. + +The following example contributes an `example` entry with a single value: + +``` +import java.util.Collections; + +import org.springframework.boot.actuate.info.Info; +import org.springframework.boot.actuate.info.InfoContributor; +import org.springframework.stereotype.Component; + +@Component +public class MyInfoContributor implements InfoContributor { + + @Override + public void contribute(Info.Builder builder) { + builder.withDetail("example", Collections.singletonMap("key", "value")); + } + +} + +``` + +If you reach the `info` endpoint, you should see a response that contains the following additional entry: + +``` +{ + "example": { + "key" : "value" + } +} +``` + +## 3. Monitoring and Management over HTTP + +If you are developing a web application, Spring Boot Actuator auto-configures all enabled endpoints to be exposed over HTTP. +The default convention is to use the `id` of the endpoint with a prefix of `/actuator` as the URL path. +For example, `health` is exposed as `/actuator/health`. + +| |Actuator is supported natively with Spring MVC, Spring WebFlux, and Jersey.
If both Jersey and Spring MVC are available, Spring MVC is used.| +|---|------------------------------------------------------------------------------------------------------------------------------------------------| + +| |Jackson is a required dependency in order to get the correct JSON responses as documented in the API documentation ([HTML](https://docs.spring.io/spring-boot/docs/2.6.4/actuator-api/htmlsingle) or [PDF](https://docs.spring.io/spring-boot/docs/2.6.4/actuator-api/pdf/spring-boot-actuator-web-api.pdf)).| +|---|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +### 3.1. Customizing the Management Endpoint Paths ### + +Sometimes, it is useful to customize the prefix for the management endpoints. +For example, your application might already use `/actuator` for another purpose. +You can use the `management.endpoints.web.base-path` property to change the prefix for your management endpoint, as the following example shows: + +Properties + +``` +management.endpoints.web.base-path=/manage +``` + +Yaml + +``` +management: + endpoints: + web: + base-path: "/manage" +``` + +The preceding `application.properties` example changes the endpoint from `/actuator/{id}` to `/manage/{id}` (for example, `/manage/info`). + +| |Unless the management port has been configured to [expose endpoints by using a different HTTP port](#actuator.monitoring.customizing-management-server-port), `management.endpoints.web.base-path` is relative to `server.servlet.context-path` (for servlet web applications) or `spring.webflux.base-path` (for reactive web applications).
If `management.server.port` is configured, `management.endpoints.web.base-path` is relative to `management.server.base-path`.| +|---|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +If you want to map endpoints to a different path, you can use the `management.endpoints.web.path-mapping` property. + +The following example remaps `/actuator/health` to `/healthcheck`: + +Properties + +``` +management.endpoints.web.base-path=/ +management.endpoints.web.path-mapping.health=healthcheck +``` + +Yaml + +``` +management: + endpoints: + web: + base-path: "/" + path-mapping: + health: "healthcheck" +``` + +### 3.2. Customizing the Management Server Port + +Exposing management endpoints by using the default HTTP port is a sensible choice for cloud-based deployments. +If, however, your application runs inside your own data center, you may prefer to expose endpoints by using a different HTTP port. + +You can set the `management.server.port` property to change the HTTP port, as the following example shows: + +Properties + +``` +management.server.port=8081 +``` + +Yaml + +``` +management: + server: + port: 8081 +``` + +| |On Cloud Foundry, by default, applications receive requests only on port 8080 for both HTTP and TCP routing.
If you want to use a custom management port on Cloud Foundry, you need to explicitly set up the application’s routes to forward traffic to the custom port.| +|---|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +### 3.3. Configuring Management-specific SSL + +When configured to use a custom port, you can also configure the management server with its own SSL by using the various `management.server.ssl.*` properties. +For example, doing so lets a management server be available over HTTP while the main application uses HTTPS, as the following property settings show: + +Properties + +``` +server.port=8443 +server.ssl.enabled=true +server.ssl.key-store=classpath:store.jks +server.ssl.key-password=secret +management.server.port=8080 +management.server.ssl.enabled=false +``` + +Yaml + +``` +server: + port: 8443 + ssl: + enabled: true + key-store: "classpath:store.jks" + key-password: "secret" +management: + server: + port: 8080 + ssl: + enabled: false +``` + +Alternatively, both the main server and the management server can use SSL but with different key stores, as follows: + +Properties + +``` +server.port=8443 +server.ssl.enabled=true +server.ssl.key-store=classpath:main.jks +server.ssl.key-password=secret +management.server.port=8080 +management.server.ssl.enabled=true +management.server.ssl.key-store=classpath:management.jks +management.server.ssl.key-password=secret +``` + +Yaml + +``` +server: + port: 8443 + ssl: + enabled: true + key-store: "classpath:main.jks" + key-password: "secret" +management: + server: + port: 8080 + ssl: + enabled: true + key-store: "classpath:management.jks" + key-password: "secret" +``` + +### 3.4. Customizing the Management Server Address + +You can customize the address on which the management endpoints are available by setting the `management.server.address` property. +Doing so can be useful if you want to listen only on an internal or ops-facing network or to listen only for connections from `localhost`. + +| |You can listen on a different address only when the port differs from the main server port.| +|---|-------------------------------------------------------------------------------------------| + +The following example `application.properties` does not allow remote management connections: + +Properties + +``` +management.server.port=8081 +management.server.address=127.0.0.1 +``` + +Yaml + +``` +management: + server: + port: 8081 + address: "127.0.0.1" +``` + +### 3.5. Disabling HTTP Endpoints + +If you do not want to expose endpoints over HTTP, you can set the management port to `-1`, as the following example shows: + +Properties + +``` +management.server.port=-1 +``` + +Yaml + +``` +management: + server: + port: -1 +``` + +You can also achieve this by using the `management.endpoints.web.exposure.exclude` property, as the following example shows: + +Properties + +``` +management.endpoints.web.exposure.exclude=* +``` + +Yaml + +``` +management: + endpoints: + web: + exposure: + exclude: "*" +``` + +## 4. Monitoring and Management over JMX + +Java Management Extensions (JMX) provide a standard mechanism to monitor and manage applications. +By default, this feature is not enabled. +You can turn it on by setting the `spring.jmx.enabled` configuration property to `true`. +Spring Boot exposes the most suitable `MBeanServer` as a bean with an ID of `mbeanServer`. +Any of your beans that are annotated with Spring JMX annotations (`@ManagedResource`, `@ManagedAttribute`, or `@ManagedOperation`) are exposed to it. + +If your platform provides a standard `MBeanServer`, Spring Boot uses that and defaults to the VM `MBeanServer`, if necessary. +If all that fails, a new `MBeanServer` is created. + +See the [`JmxAutoConfiguration`](https://github.com/spring-projects/spring-boot/tree/v2.6.4/spring-boot-project/spring-boot-autoconfigure/src/main/java/org/springframework/boot/autoconfigure/jmx/JmxAutoConfiguration.java) class for more details. + +By default, Spring Boot also exposes management endpoints as JMX MBeans under the `org.springframework.boot` domain. +To take full control over endpoint registration in the JMX domain, consider registering your own `EndpointObjectNameFactory` implementation. + +### 4.1. Customizing MBean Names + +The name of the MBean is usually generated from the `id` of the endpoint. +For example, the `health` endpoint is exposed as `org.springframework.boot:type=Endpoint,name=Health`. + +If your application contains more than one Spring `ApplicationContext`, you may find that names clash. +To solve this problem, you can set the `spring.jmx.unique-names` property to `true` so that MBean names are always unique. + +You can also customize the JMX domain under which endpoints are exposed. +The following settings show an example of doing so in `application.properties`: + +Properties + +``` +spring.jmx.unique-names=true +management.endpoints.jmx.domain=com.example.myapp +``` + +Yaml + +``` +spring: + jmx: + unique-names: true +management: + endpoints: + jmx: + domain: "com.example.myapp" +``` + +### 4.2. Disabling JMX Endpoints + +If you do not want to expose endpoints over JMX, you can set the `management.endpoints.jmx.exposure.exclude` property to `*`, as the following example shows: + +Properties + +``` +management.endpoints.jmx.exposure.exclude=* +``` + +Yaml + +``` +management: + endpoints: + jmx: + exposure: + exclude: "*" +``` + +### 4.3. Using Jolokia for JMX over HTTP + +Jolokia is a JMX-HTTP bridge that provides an alternative method of accessing JMX beans. +To use Jolokia, include a dependency to `org.jolokia:jolokia-core`. +For example, with Maven, you would add the following dependency: + +``` + + org.jolokia + jolokia-core + +``` + +You can then expose the Jolokia endpoint by adding `jolokia` or `*` to the `management.endpoints.web.exposure.include` property. +You can then access it by using `/actuator/jolokia` on your management HTTP server. + +| |The Jolokia endpoint exposes Jolokia’s servlet as an actuator endpoint.
As a result, it is specific to servlet environments, such as Spring MVC and Jersey.
The endpoint is not available in a WebFlux application.| +|---|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +#### 4.3.1. Customizing Jolokia + +Jolokia has a number of settings that you would traditionally configure by setting servlet parameters. +With Spring Boot, you can use your `application.properties` file. +To do so, prefix the parameter with `management.endpoint.jolokia.config.`, as the following example shows: + +Properties + +``` +management.endpoint.jolokia.config.debug=true +``` + +Yaml + +``` +management: + endpoint: + jolokia: + config: + debug: true +``` + +#### 4.3.2. Disabling Jolokia + +If you use Jolokia but do not want Spring Boot to configure it, set the `management.endpoint.jolokia.enabled` property to `false`, as follows: + +Properties + +``` +management.endpoint.jolokia.enabled=false +``` + +Yaml + +``` +management: + endpoint: + jolokia: + enabled: false +``` + +## 5. Loggers + +Spring Boot Actuator includes the ability to view and configure the log levels of your application at runtime. +You can view either the entire list or an individual logger’s configuration, which is made up of both the explicitly configured logging level as well as the effective logging level given to it by the logging framework. +These levels can be one of: + +* `TRACE` + +* `DEBUG` + +* `INFO` + +* `WARN` + +* `ERROR` + +* `FATAL` + +* `OFF` + +* `null` + +`null` indicates that there is no explicit configuration. + +### 5.1. Configure a Logger + +To configure a given logger, `POST` a partial entity to the resource’s URI, as the following example shows: + +``` +{ + "configuredLevel": "DEBUG" +} +``` + +| |To “reset” the specific level of the logger (and use the default configuration instead), you can pass a value of `null` as the `configuredLevel`.| +|---|-------------------------------------------------------------------------------------------------------------------------------------------------| + +## 6. Metrics + +Spring Boot Actuator provides dependency management and auto-configuration for [Micrometer](https://micrometer.io), an application metrics facade that supports [numerous monitoring systems](https://micrometer.io/docs), including: + +* [AppOptics](#actuator.metrics.export.appoptics) + +* [Atlas](#actuator.metrics.export.atlas) + +* [Datadog](#actuator.metrics.export.datadog) + +* [Dynatrace](#actuator.metrics.export.dynatrace) + +* [Elastic](#actuator.metrics.export.elastic) + +* [Ganglia](#actuator.metrics.export.ganglia) + +* [Graphite](#actuator.metrics.export.graphite) + +* [Humio](#actuator.metrics.export.humio) + +* [Influx](#actuator.metrics.export.influx) + +* [JMX](#actuator.metrics.export.jmx) + +* [KairosDB](#actuator.metrics.export.kairos) + +* [New Relic](#actuator.metrics.export.newrelic) + +* [Prometheus](#actuator.metrics.export.prometheus) + +* [SignalFx](#actuator.metrics.export.signalfx) + +* [Simple (in-memory)](#actuator.metrics.export.simple) + +* [Stackdriver](#actuator.metrics.export.stackdriver) + +* [StatsD](#actuator.metrics.export.statsd) + +* [Wavefront](#actuator.metrics.export.wavefront) + +| |To learn more about Micrometer’s capabilities, see its [reference documentation](https://micrometer.io/docs), in particular the [concepts section](https://micrometer.io/docs/concepts).| +|---|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +### 6.1. Getting started + +Spring Boot auto-configures a composite `MeterRegistry` and adds a registry to the composite for each of the supported implementations that it finds on the classpath. +Having a dependency on `micrometer-registry-{system}` in your runtime classpath is enough for Spring Boot to configure the registry. + +Most registries share common features. +For instance, you can disable a particular registry even if the Micrometer registry implementation is on the classpath. +The following example disables Datadog: + +Properties + +``` +management.metrics.export.datadog.enabled=false +``` + +Yaml + +``` +management: + metrics: + export: + datadog: + enabled: false +``` + +You can also disable all registries unless stated otherwise by the registry-specific property, as the following example shows: + +Properties + +``` +management.metrics.export.defaults.enabled=false +``` + +Yaml + +``` +management: + metrics: + export: + defaults: + enabled: false +``` + +Spring Boot also adds any auto-configured registries to the global static composite registry on the `Metrics` class, unless you explicitly tell it not to: + +Properties + +``` +management.metrics.use-global-registry=false +``` + +Yaml + +``` +management: + metrics: + use-global-registry: false +``` + +You can register any number of `MeterRegistryCustomizer` beans to further configure the registry, such as applying common tags, before any meters are registered with the registry: + +``` +import io.micrometer.core.instrument.MeterRegistry; + +import org.springframework.boot.actuate.autoconfigure.metrics.MeterRegistryCustomizer; +import org.springframework.context.annotation.Bean; +import org.springframework.context.annotation.Configuration; + +@Configuration(proxyBeanMethods = false) +public class MyMeterRegistryConfiguration { + + @Bean + public MeterRegistryCustomizer metricsCommonTags() { + return (registry) -> registry.config().commonTags("region", "us-east-1"); + } + +} + +``` + +You can apply customizations to particular registry implementations by being more specific about the generic type: + +``` +import io.micrometer.core.instrument.Meter; +import io.micrometer.core.instrument.config.NamingConvention; +import io.micrometer.graphite.GraphiteMeterRegistry; + +import org.springframework.boot.actuate.autoconfigure.metrics.MeterRegistryCustomizer; +import org.springframework.context.annotation.Bean; +import org.springframework.context.annotation.Configuration; + +@Configuration(proxyBeanMethods = false) +public class MyMeterRegistryConfiguration { + + @Bean + public MeterRegistryCustomizer graphiteMetricsNamingConvention() { + return (registry) -> registry.config().namingConvention(this::name); + } + + private String name(String name, Meter.Type type, String baseUnit) { + return ... + } + +} + +``` + +Spring Boot also [configures built-in instrumentation](#actuator.metrics.supported) that you can control through configuration or dedicated annotation markers. + +### 6.2. Supported Monitoring Systems + +This section briefly describes each of the supported monitoring systems. + +#### 6.2.1. AppOptics + +By default, the AppOptics registry periodically pushes metrics to `[api.appoptics.com/v1/measurements](https://api.appoptics.com/v1/measurements)`. +To export metrics to SaaS [AppOptics](https://micrometer.io/docs/registry/appOptics), your API token must be provided: + +Properties + +``` +management.metrics.export.appoptics.api-token=YOUR_TOKEN +``` + +Yaml + +``` +management: + metrics: + export: + appoptics: + api-token: "YOUR_TOKEN" +``` + +#### 6.2.2. Atlas + +By default, metrics are exported to [Atlas](https://micrometer.io/docs/registry/atlas) running on your local machine. +You can provide the location of the [Atlas server](https://github.com/Netflix/atlas): + +Properties + +``` +management.metrics.export.atlas.uri=https://atlas.example.com:7101/api/v1/publish +``` + +Yaml + +``` +management: + metrics: + export: + atlas: + uri: "https://atlas.example.com:7101/api/v1/publish" +``` + +#### 6.2.3. Datadog + +A Datadog registry periodically pushes metrics to [datadoghq](https://www.datadoghq.com). +To export metrics to [Datadog](https://micrometer.io/docs/registry/datadog), you must provide your API key: + +Properties + +``` +management.metrics.export.datadog.api-key=YOUR_KEY +``` + +Yaml + +``` +management: + metrics: + export: + datadog: + api-key: "YOUR_KEY" +``` + +You can also change the interval at which metrics are sent to Datadog: + +Properties + +``` +management.metrics.export.datadog.step=30s +``` + +Yaml + +``` +management: + metrics: + export: + datadog: + step: "30s" +``` + +#### 6.2.4. Dynatrace + +Dynatrace offers two metrics ingest APIs, both of which are implemented for [Micrometer](https://micrometer.io/docs/registry/dynatrace). +Configuration properties in the `v1` namespace apply only when exporting to the [Timeseries v1 API](https://www.dynatrace.com/support/help/dynatrace-api/environment-api/metric-v1/). +Configuration properties in the `v2` namespace apply only when exporting to the [Metrics v2 API](https://www.dynatrace.com/support/help/dynatrace-api/environment-api/metric-v2/post-ingest-metrics/). +Note that this integration can export only to either the `v1` or `v2` version of the API at a time. +If the `device-id` (required for v1 but not used in v2) is set in the `v1` namespace, metrics are exported to the `v1` endpoint. +Otherwise, `v2` is assumed. + +##### v2 API + +You can use the v2 API in two ways. + +If a local OneAgent is running on the host, metrics are automatically exported to the [local OneAgent ingest endpoint](https://www.dynatrace.com/support/help/how-to-use-dynatrace/metrics/metric-ingestion/ingestion-methods/local-api/). +The ingest endpoint forwards the metrics to the Dynatrace backend. +This is the default behavior and requires no special setup beyond a dependency on `io.micrometer:micrometer-registry-dynatrace`. + +If no local OneAgent is running, the endpoint of the [Metrics v2 API](https://www.dynatrace.com/support/help/dynatrace-api/environment-api/metric-v2/post-ingest-metrics/) and an API token are required. +The [API token](https://www.dynatrace.com/support/help/dynatrace-api/basics/dynatrace-api-authentication/) must have the “Ingest metrics” (`metrics.ingest`) permission set. +We recommend limiting the scope of the token to this one permission. +You must ensure that the endpoint URI contains the path (for example, `/api/v2/metrics/ingest`): + +The URL of the Metrics API v2 ingest endpoint is different according to your deployment option: + +* SaaS: `https://{your-environment-id}.live.dynatrace.com/api/v2/metrics/ingest` + +* Managed deployments: `https://{your-domain}/e/{your-environment-id}/api/v2/metrics/ingest` + +The example below configures metrics export using the `example` environment id: + +Properties + +``` +management.metrics.export.dynatrace.uri=https://example.live.dynatrace.com/api/v2/metrics/ingest +management.metrics.export.dynatrace.api-token=YOUR_TOKEN +``` + +Yaml + +``` +management: + metrics: + export: + dynatrace: + uri: "https://example.live.dynatrace.com/api/v2/metrics/ingest" + api-token: "YOUR_TOKEN" +``` + +When using the Dynatrace v2 API, the following optional features are available: + +* Metric key prefix: Sets a prefix that is prepended to all exported metric keys. + +* Enrich with Dynatrace metadata: If a OneAgent or Dynatrace operator is running, enrich metrics with additional metadata (for example, about the host, process, or pod). + +* Default dimensions: Specify key-value pairs that are added to all exported metrics. + If tags with the same key are specified with Micrometer, they overwrite the default dimensions. + +It is possible to not specify a URI and API token, as shown in the following example. +In this scenario, the local OneAgent endpoint is used: + +Properties + +``` +management.metrics.export.dynatrace.v2.metric-key-prefix=your.key.prefix +management.metrics.export.dynatrace.v2.enrich-with-dynatrace-metadata=true +management.metrics.export.dynatrace.v2.default-dimensions.key1=value1 +management.metrics.export.dynatrace.v2.default-dimensions.key2=value2 +``` + +Yaml + +``` +management: + metrics: + export: + dynatrace: + # Specify uri and api-token here if not using the local OneAgent endpoint. + v2: + metric-key-prefix: "your.key.prefix" + enrich-with-dynatrace-metadata: true + default-dimensions: + key1: "value1" + key2: "value2" +``` + +##### v1 API (Legacy) + +The Dynatrace v1 API metrics registry pushes metrics to the configured URI periodically by using the [Timeseries v1 API](https://www.dynatrace.com/support/help/dynatrace-api/environment-api/metric-v1/). +For backwards-compatibility with existing setups, when `device-id` is set (required for v1, but not used in v2), metrics are exported to the Timeseries v1 endpoint. +To export metrics to [Dynatrace](https://micrometer.io/docs/registry/dynatrace), your API token, device ID, and URI must be provided: + +Properties + +``` +management.metrics.export.dynatrace.uri=https://{your-environment-id}.live.dynatrace.com +management.metrics.export.dynatrace.api-token=YOUR_TOKEN +management.metrics.export.dynatrace.v1.device-id=YOUR_DEVICE_ID +``` + +Yaml + +``` +management: + metrics: + export: + dynatrace: + uri: "https://{your-environment-id}.live.dynatrace.com" + api-token: "YOUR_TOKEN" + v1: + device-id: "YOUR_DEVICE_ID" +``` + +For the v1 API, you must specify the base environment URI without a path, as the v1 endpoint path is added automatically. + +##### Version-independent Settings + +In addition to the API endpoint and token, you can also change the interval at which metrics are sent to Dynatrace. +The default export interval is `60s`. +The following example sets the export interval to 30 seconds: + +Properties + +``` +management.metrics.export.dynatrace.step=30s +``` + +Yaml + +``` +management: + metrics: + export: + dynatrace: + step: "30s" +``` + +You can find more information on how to set up the Dynatrace exporter for Micrometer in [the Micrometer documentation](https://micrometer.io/docs/registry/dynatrace). + +#### 6.2.5. Elastic + +By default, metrics are exported to [Elastic](https://micrometer.io/docs/registry/elastic) running on your local machine. +You can provide the location of the Elastic server to use by using the following property: + +Properties + +``` +management.metrics.export.elastic.host=https://elastic.example.com:8086 +``` + +Yaml + +``` +management: + metrics: + export: + elastic: + host: "https://elastic.example.com:8086" +``` + +#### 6.2.6. Ganglia + +By default, metrics are exported to [Ganglia](https://micrometer.io/docs/registry/ganglia) running on your local machine. +You can provide the [Ganglia server](http://ganglia.sourceforge.net) host and port, as the following example shows: + +Properties + +``` +management.metrics.export.ganglia.host=ganglia.example.com +management.metrics.export.ganglia.port=9649 +``` + +Yaml + +``` +management: + metrics: + export: + ganglia: + host: "ganglia.example.com" + port: 9649 +``` + +#### 6.2.7. Graphite + +By default, metrics are exported to [Graphite](https://micrometer.io/docs/registry/graphite) running on your local machine. +You can provide the [Graphite server](https://graphiteapp.org) host and port, as the following example shows: + +Properties + +``` +management.metrics.export.graphite.host=graphite.example.com +management.metrics.export.graphite.port=9004 +``` + +Yaml + +``` +management: + metrics: + export: + graphite: + host: "graphite.example.com" + port: 9004 +``` + +Micrometer provides a default `HierarchicalNameMapper` that governs how a dimensional meter ID is [mapped to flat hierarchical names](https://micrometer.io/docs/registry/graphite#_hierarchical_name_mapping). + +| |To take control over this behavior, define your `GraphiteMeterRegistry` and supply your own `HierarchicalNameMapper`.
An auto-configured `GraphiteConfig` and `Clock` beans are provided unless you define your own:

```
import io.micrometer.core.instrument.Clock;
import io.micrometer.core.instrument.Meter;
import io.micrometer.core.instrument.config.NamingConvention;
import io.micrometer.core.instrument.util.HierarchicalNameMapper;
import io.micrometer.graphite.GraphiteConfig;
import io.micrometer.graphite.GraphiteMeterRegistry;

import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Configuration;

@Configuration(proxyBeanMethods = false)
public class MyGraphiteConfiguration {

@Bean
public GraphiteMeterRegistry graphiteMeterRegistry(GraphiteConfig config, Clock clock) {
return new GraphiteMeterRegistry(config, clock, this::toHierarchicalName);
}

private String toHierarchicalName(Meter.Id id, NamingConvention convention) {
return ...
}

}

```| +|---|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +#### 6.2.8. Humio + +By default, the Humio registry periodically pushes metrics to [cloud.humio.com](https://cloud.humio.com). +To export metrics to SaaS [Humio](https://micrometer.io/docs/registry/humio), you must provide your API token: + +Properties + +``` +management.metrics.export.humio.api-token=YOUR_TOKEN +``` + +Yaml + +``` +management: + metrics: + export: + humio: + api-token: "YOUR_TOKEN" +``` + +You should also configure one or more tags to identify the data source to which metrics are pushed: + +Properties + +``` +management.metrics.export.humio.tags.alpha=a +management.metrics.export.humio.tags.bravo=b +``` + +Yaml + +``` +management: + metrics: + export: + humio: + tags: + alpha: "a" + bravo: "b" +``` + +#### 6.2.9. Influx + +By default, metrics are exported to an [Influx](https://micrometer.io/docs/registry/influx) v1 instance running on your local machine with the default configuration. +To export metrics to InfluxDB v2, configure the `org`, `bucket`, and authentication `token` for writing metrics. +You can provide the location of the [Influx server](https://www.influxdata.com) to use by using: + +Properties + +``` +management.metrics.export.influx.uri=https://influx.example.com:8086 +``` + +Yaml + +``` +management: + metrics: + export: + influx: + uri: "https://influx.example.com:8086" +``` + +#### 6.2.10. JMX + +Micrometer provides a hierarchical mapping to [JMX](https://micrometer.io/docs/registry/jmx), primarily as a cheap and portable way to view metrics locally. +By default, metrics are exported to the `metrics` JMX domain. +You can provide the domain to use by using: + +Properties + +``` +management.metrics.export.jmx.domain=com.example.app.metrics +``` + +Yaml + +``` +management: + metrics: + export: + jmx: + domain: "com.example.app.metrics" +``` + +Micrometer provides a default `HierarchicalNameMapper` that governs how a dimensional meter ID is [mapped to flat hierarchical names](https://micrometer.io/docs/registry/jmx#_hierarchical_name_mapping). + +| |To take control over this behavior, define your `JmxMeterRegistry` and supply your own `HierarchicalNameMapper`.
An auto-configured `JmxConfig` and `Clock` beans are provided unless you define your own:

```
import io.micrometer.core.instrument.Clock;
import io.micrometer.core.instrument.Meter;
import io.micrometer.core.instrument.config.NamingConvention;
import io.micrometer.core.instrument.util.HierarchicalNameMapper;
import io.micrometer.jmx.JmxConfig;
import io.micrometer.jmx.JmxMeterRegistry;

import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Configuration;

@Configuration(proxyBeanMethods = false)
public class MyJmxConfiguration {

@Bean
public JmxMeterRegistry jmxMeterRegistry(JmxConfig config, Clock clock) {
return new JmxMeterRegistry(config, clock, this::toHierarchicalName);
}

private String toHierarchicalName(Meter.Id id, NamingConvention convention) {
return ...
}

}

```| +|---|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +#### 6.2.11. KairosDB + +By default, metrics are exported to [KairosDB](https://micrometer.io/docs/registry/kairos) running on your local machine. +You can provide the location of the [KairosDB server](https://kairosdb.github.io/) to use by using: + +Properties + +``` +management.metrics.export.kairos.uri=https://kairosdb.example.com:8080/api/v1/datapoints +``` + +Yaml + +``` +management: + metrics: + export: + kairos: + uri: "https://kairosdb.example.com:8080/api/v1/datapoints" +``` + +#### 6.2.12. New Relic + +A New Relic registry periodically pushes metrics to [New Relic](https://micrometer.io/docs/registry/new-relic). +To export metrics to [New Relic](https://newrelic.com), you must provide your API key and account ID: + +Properties + +``` +management.metrics.export.newrelic.api-key=YOUR_KEY +management.metrics.export.newrelic.account-id=YOUR_ACCOUNT_ID +``` + +Yaml + +``` +management: + metrics: + export: + newrelic: + api-key: "YOUR_KEY" + account-id: "YOUR_ACCOUNT_ID" +``` + +You can also change the interval at which metrics are sent to New Relic: + +Properties + +``` +management.metrics.export.newrelic.step=30s +``` + +Yaml + +``` +management: + metrics: + export: + newrelic: + step: "30s" +``` + +By default, metrics are published through REST calls, but you can also use the Java Agent API if you have it on the classpath: + +Properties + +``` +management.metrics.export.newrelic.client-provider-type=insights-agent +``` + +Yaml + +``` +management: + metrics: + export: + newrelic: + client-provider-type: "insights-agent" +``` + +Finally, you can take full control by defining your own `NewRelicClientProvider` bean. + +#### 6.2.13. Prometheus + +[Prometheus](https://micrometer.io/docs/registry/prometheus) expects to scrape or poll individual application instances for metrics. +Spring Boot provides an actuator endpoint at `/actuator/prometheus` to present a [Prometheus scrape](https://prometheus.io) with the appropriate format. + +| |By default, the endpoint is not available and must be exposed. See [exposing endpoints](#actuator.endpoints.exposing) for more details.| +|---|---------------------------------------------------------------------------------------------------------------------------------------| + +The following example `scrape_config` adds to `prometheus.yml`: + +``` +scrape_configs: + - job_name: "spring" + metrics_path: "/actuator/prometheus" + static_configs: + - targets: ["HOST:PORT"] +``` + +For ephemeral or batch jobs that may not exist long enough to be scraped, you can use [Prometheus Pushgateway](https://github.com/prometheus/pushgateway) support to expose the metrics to Prometheus. +To enable Prometheus Pushgateway support, add the following dependency to your project: + +``` + + io.prometheus + simpleclient_pushgateway + +``` + +When the Prometheus Pushgateway dependency is present on the classpath and the `management.metrics.export.prometheus.pushgateway.enabled` property is set to `true`, a `PrometheusPushGatewayManager` bean is auto-configured. +This manages the pushing of metrics to a Prometheus Pushgateway. + +You can tune the `PrometheusPushGatewayManager` by using properties under `management.metrics.export.prometheus.pushgateway`. +For advanced configuration, you can also provide your own `PrometheusPushGatewayManager` bean. + +#### 6.2.14. SignalFx + +SignalFx registry periodically pushes metrics to [SignalFx](https://micrometer.io/docs/registry/signalFx). +To export metrics to [SignalFx](https://www.signalfx.com), you must provide your access token: + +Properties + +``` +management.metrics.export.signalfx.access-token=YOUR_ACCESS_TOKEN +``` + +Yaml + +``` +management: + metrics: + export: + signalfx: + access-token: "YOUR_ACCESS_TOKEN" +``` + +You can also change the interval at which metrics are sent to SignalFx: + +Properties + +``` +management.metrics.export.signalfx.step=30s +``` + +Yaml + +``` +management: + metrics: + export: + signalfx: + step: "30s" +``` + +#### 6.2.15. Simple + +Micrometer ships with a simple, in-memory backend that is automatically used as a fallback if no other registry is configured. +This lets you see what metrics are collected in the [metrics endpoint](#actuator.metrics.endpoint). + +The in-memory backend disables itself as soon as you use any other available backend. +You can also disable it explicitly: + +Properties + +``` +management.metrics.export.simple.enabled=false +``` + +Yaml + +``` +management: + metrics: + export: + simple: + enabled: false +``` + +#### 6.2.16. Stackdriver + +The Stackdriver registry periodically pushes metrics to [Stackdriver](https://cloud.google.com/stackdriver/). +To export metrics to SaaS [Stackdriver](https://micrometer.io/docs/registry/stackdriver), you must provide your Google Cloud project ID: + +Properties + +``` +management.metrics.export.stackdriver.project-id=my-project +``` + +Yaml + +``` +management: + metrics: + export: + stackdriver: + project-id: "my-project" +``` + +You can also change the interval at which metrics are sent to Stackdriver: + +Properties + +``` +management.metrics.export.stackdriver.step=30s +``` + +Yaml + +``` +management: + metrics: + export: + stackdriver: + step: "30s" +``` + +#### 6.2.17. StatsD + +The StatsD registry eagerly pushes metrics over UDP to a StatsD agent. +By default, metrics are exported to a [StatsD](https://micrometer.io/docs/registry/statsD) agent running on your local machine. +You can provide the StatsD agent host, port, and protocol to use by using: + +Properties + +``` +management.metrics.export.statsd.host=statsd.example.com +management.metrics.export.statsd.port=9125 +management.metrics.export.statsd.protocol=udp +``` + +Yaml + +``` +management: + metrics: + export: + statsd: + host: "statsd.example.com" + port: 9125 + protocol: "udp" +``` + +You can also change the StatsD line protocol to use (it defaults to Datadog): + +Properties + +``` +management.metrics.export.statsd.flavor=etsy +``` + +Yaml + +``` +management: + metrics: + export: + statsd: + flavor: "etsy" +``` + +#### 6.2.18. Wavefront + +The Wavefront registry periodically pushes metrics to [Wavefront](https://micrometer.io/docs/registry/wavefront). +If you are exporting metrics to [Wavefront](https://www.wavefront.com/) directly, you must provide your API token: + +Properties + +``` +management.metrics.export.wavefront.api-token=YOUR_API_TOKEN +``` + +Yaml + +``` +management: + metrics: + export: + wavefront: + api-token: "YOUR_API_TOKEN" +``` + +Alternatively, you can use a Wavefront sidecar or an internal proxy in your environment to forward metrics data to the Wavefront API host: + +Properties + +``` +management.metrics.export.wavefront.uri=proxy://localhost:2878 +``` + +Yaml + +``` +management: + metrics: + export: + wavefront: + uri: "proxy://localhost:2878" +``` + +| |If you publish metrics to a Wavefront proxy (as described in [the Wavefront documentation](https://docs.wavefront.com/proxies_installing.html)), the host must be in the `proxy://HOST:PORT` format.| +|---|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +You can also change the interval at which metrics are sent to Wavefront: + +Properties + +``` +management.metrics.export.wavefront.step=30s +``` + +Yaml + +``` +management: + metrics: + export: + wavefront: + step: "30s" +``` + +### 6.3. Supported Metrics and Meters + +Spring Boot provides automatic meter registration for a wide variety of technologies. +In most situations, the defaults provide sensible metrics that can be published to any of the supported monitoring systems. + +#### 6.3.1. JVM Metrics + +Auto-configuration enables JVM Metrics by using core Micrometer classes. +JVM metrics are published under the `jvm.` meter name. + +The following JVM metrics are provided: + +* Various memory and buffer pool details + +* Statistics related to garbage collection + +* Thread utilization + +* The number of classes loaded and unloaded + +#### 6.3.2. System Metrics + +Auto-configuration enables system metrics by using core Micrometer classes. +System metrics are published under the `system.`, `process.`, and `disk.` meter names. + +The following system metrics are provided: + +* CPU metrics + +* File descriptor metrics + +* Uptime metrics (both the amount of time the application has been running and a fixed gauge of the absolute start time) + +* Disk space available + +#### 6.3.3. Application Startup Metrics + +Auto-configuration exposes application startup time metrics: + +* `application.started.time`: time taken to start the application. + +* `application.ready.time`: time taken for the application to be ready to service requests. + +Metrics are tagged by the fully qualified name of the application class. + +#### 6.3.4. Logger Metrics + +Auto-configuration enables the event metrics for both Logback and Log4J2. +The details are published under the `log4j2.events.` or `logback.events.` meter names. + +#### 6.3.5. Task Execution and Scheduling Metrics + +Auto-configuration enables the instrumentation of all available `ThreadPoolTaskExecutor` and `ThreadPoolTaskScheduler` beans, as long as the underling `ThreadPoolExecutor` is available. +Metrics are tagged by the name of the executor, which is derived from the bean name. + +#### 6.3.6. Spring MVC Metrics + +Auto-configuration enables the instrumentation of all requests handled by Spring MVC controllers and functional handlers. +By default, metrics are generated with the name, `http.server.requests`. +You can customized the name by setting the `management.metrics.web.server.request.metric-name` property. + +`@Timed` annotations are supported on `@Controller` classes and `@RequestMapping` methods (see [@Timed Annotation Support](#actuator.metrics.supported.timed-annotation) for details). +If you do not want to record metrics for all Spring MVC requests, you can set `management.metrics.web.server.request.autotime.enabled` to `false` and exclusively use `@Timed` annotations instead. + +By default, Spring MVC related metrics are tagged with the following information: + +| Tag | Description | +|-----------|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +|`exception`| The simple class name of any exception that was thrown while handling the request. | +| `method` | The request’s method (for example, `GET` or `POST`) | +| `outcome` |The request’s outcome, based on the status code of the response.
1xx is `INFORMATIONAL`, 2xx is `SUCCESS`, 3xx is `REDIRECTION`, 4xx is `CLIENT_ERROR`, and 5xx is `SERVER_ERROR`| +| `status` | The response’s HTTP status code (for example, `200` or `500`) | +| `uri` | The request’s URI template prior to variable substitution, if possible (for example, `/api/person/{id}`) | + +To add to the default tags, provide one or more `@Bean`s that implement `WebMvcTagsContributor`. +To replace the default tags, provide a `@Bean` that implements `WebMvcTagsProvider`. + +| |In some cases, exceptions handled in web controllers are not recorded as request metrics tags.
Applications can opt in and record exceptions by [setting handled exceptions as request attributes](web.html#web.servlet.spring-mvc.error-handling).| +|---|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +#### 6.3.7. Spring WebFlux Metrics + +Auto-configuration enables the instrumentation of all requests handled by Spring WebFlux controllers and functional handlers. +By default, metrics are generated with the name, `http.server.requests`. +You can customize the name by setting the `management.metrics.web.server.request.metric-name` property. + +`@Timed` annotations are supported on `@Controller` classes and `@RequestMapping` methods (see [@Timed Annotation Support](#actuator.metrics.supported.timed-annotation) for details). +If you do not want to record metrics for all Spring WebFlux requests, you can set `management.metrics.web.server.request.autotime.enabled` to `false` and exclusively use `@Timed` annotations instead. + +By default, WebFlux related metrics are tagged with the following information: + +| Tag | Description | +|-----------|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +|`exception`| The simple class name of any exception that was thrown while handling the request. | +| `method` | The request’s method (for example, `GET` or `POST`) | +| `outcome` |The request’s outcome, based on the status code of the response.
1xx is `INFORMATIONAL`, 2xx is `SUCCESS`, 3xx is `REDIRECTION`, 4xx is `CLIENT_ERROR`, and 5xx is `SERVER_ERROR`| +| `status` | The response’s HTTP status code (for example, `200` or `500`) | +| `uri` | The request’s URI template prior to variable substitution, if possible (for example, `/api/person/{id}`) | + +To add to the default tags, provide one or more beans that implement `WebFluxTagsContributor`. +To replace the default tags, provide a bean that implements `WebFluxTagsProvider`. + +| |In some cases, exceptions handled in controllers and handler functions are not recorded as request metrics tags.
Applications can opt in and record exceptions by [setting handled exceptions as request attributes](web.html#web.reactive.webflux.error-handling).| +|---|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +#### 6.3.8. Jersey Server Metrics + +Auto-configuration enables the instrumentation of all requests handled by the Jersey JAX-RS implementation. +By default, metrics are generated with the name, `http.server.requests`. +You can customize the name by setting the `management.metrics.web.server.request.metric-name` property. + +`@Timed` annotations are supported on request-handling classes and methods (see [@Timed Annotation Support](#actuator.metrics.supported.timed-annotation) for details). +If you do not want to record metrics for all Jersey requests, you can set `management.metrics.web.server.request.autotime.enabled` to `false` and exclusively use `@Timed` annotations instead. + +By default, Jersey server metrics are tagged with the following information: + +| Tag | Description | +|-----------|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +|`exception`| The simple class name of any exception that was thrown while handling the request. | +| `method` | The request’s method (for example, `GET` or `POST`) | +| `outcome` |The request’s outcome, based on the status code of the response.
1xx is `INFORMATIONAL`, 2xx is `SUCCESS`, 3xx is `REDIRECTION`, 4xx is `CLIENT_ERROR`, and 5xx is `SERVER_ERROR`| +| `status` | The response’s HTTP status code (for example, `200` or `500`) | +| `uri` | The request’s URI template prior to variable substitution, if possible (for example, `/api/person/{id}`) | + +To customize the tags, provide a `@Bean` that implements `JerseyTagsProvider`. + +#### 6.3.9. HTTP Client Metrics + +Spring Boot Actuator manages the instrumentation of both `RestTemplate` and `WebClient`. +For that, you have to inject the auto-configured builder and use it to create instances: + +* `RestTemplateBuilder` for `RestTemplate` + +* `WebClient.Builder` for `WebClient` + +You can also manually apply the customizers responsible for this instrumentation, namely `MetricsRestTemplateCustomizer` and `MetricsWebClientCustomizer`. + +By default, metrics are generated with the name, `http.client.requests`. +You can customize the name by setting the `management.metrics.web.client.request.metric-name` property. + +By default, metrics generated by an instrumented client are tagged with the following information: + +| Tag | Description | +|------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +|`clientName`| The host portion of the URI | +| `method` | The request’s method (for example, `GET` or `POST`) | +| `outcome` |The request’s outcome, based on the status code of the response.
1xx is `INFORMATIONAL`, 2xx is `SUCCESS`, 3xx is `REDIRECTION`, 4xx is `CLIENT_ERROR`, and 5xx is `SERVER_ERROR`. Otherwise, it is `UNKNOWN`.| +| `status` | The response’s HTTP status code if available (for example, `200` or `500`) or `IO_ERROR` in case of I/O issues. Otherwise, it is `CLIENT_ERROR`. | +| `uri` | The request’s URI template prior to variable substitution, if possible (for example, `/api/person/{id}`) | + +To customize the tags, and depending on your choice of client, you can provide a `@Bean` that implements `RestTemplateExchangeTagsProvider` or `WebClientExchangeTagsProvider`. +There are convenience static functions in `RestTemplateExchangeTags` and `WebClientExchangeTags`. + +#### 6.3.10. Tomcat Metrics + +Auto-configuration enables the instrumentation of Tomcat only when an `MBeanRegistry` is enabled. +By default, the `MBeanRegistry` is disabled, but you can enable it by setting `server.tomcat.mbeanregistry.enabled` to `true`. + +Tomcat metrics are published under the `tomcat.` meter name. + +#### 6.3.11. Cache Metrics + +Auto-configuration enables the instrumentation of all available `Cache` instances on startup, with metrics prefixed with `cache`. +Cache instrumentation is standardized for a basic set of metrics. +Additional, cache-specific metrics are also available. + +The following cache libraries are supported: + +* Caffeine + +* EhCache 2 + +* Hazelcast + +* Any compliant JCache (JSR-107) implementation + +* Redis + +Metrics are tagged by the name of the cache and by the name of the `CacheManager`, which is derived from the bean name. + +| |Only caches that are configured on startup are bound to the registry.
For caches not defined in the cache’s configuration, such as caches created on the fly or programmatically after the startup phase, an explicit registration is required.
A `CacheMetricsRegistrar` bean is made available to make that process easier.| +|---|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +#### 6.3.12. DataSource Metrics + +Auto-configuration enables the instrumentation of all available `DataSource` objects with metrics prefixed with `jdbc.connections`. +Data source instrumentation results in gauges that represent the currently active, idle, maximum allowed, and minimum allowed connections in the pool. + +Metrics are also tagged by the name of the `DataSource` computed based on the bean name. + +| |By default, Spring Boot provides metadata for all supported data sources.
You can add additional `DataSourcePoolMetadataProvider` beans if your favorite data source is not supported.
See `DataSourcePoolMetadataProvidersConfiguration` for examples.| +|---|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +Also, Hikari-specific metrics are exposed with a `hikaricp` prefix. +Each metric is tagged by the name of the pool (you can control it with `spring.datasource.name`). + +#### 6.3.13. Hibernate Metrics + +If `org.hibernate:hibernate-micrometer` is on the classpath, all available Hibernate `EntityManagerFactory` instances that have statistics enabled are instrumented with a metric named `hibernate`. + +Metrics are also tagged by the name of the `EntityManagerFactory`, which is derived from the bean name. + +To enable statistics, the standard JPA property `hibernate.generate_statistics` must be set to `true`. +You can enable that on the auto-configured `EntityManagerFactory`: + +Properties + +``` +spring.jpa.properties[hibernate.generate_statistics]=true +``` + +Yaml + +``` +spring: + jpa: + properties: + "[hibernate.generate_statistics]": true +``` + +#### 6.3.14. Spring Data Repository Metrics + +Auto-configuration enables the instrumentation of all Spring Data `Repository` method invocations. +By default, metrics are generated with the name, `spring.data.repository.invocations`. +You can customize the name by setting the `management.metrics.data.repository.metric-name` property. + +`@Timed` annotations are supported on `Repository` classes and methods (see [@Timed Annotation Support](#actuator.metrics.supported.timed-annotation) for details). +If you do not want to record metrics for all `Repository` invocations, you can set `management.metrics.data.repository.autotime.enabled` to `false` and exclusively use `@Timed` annotations instead. + +By default, repository invocation related metrics are tagged with the following information: + +| Tag | Description | +|------------|---------------------------------------------------------------------------| +|`repository`| The simple class name of the source `Repository`. | +| `method` | The name of the `Repository` method that was invoked. | +| `state` | The result state (`SUCCESS`, `ERROR`, `CANCELED`, or `RUNNING`). | +|`exception` |The simple class name of any exception that was thrown from the invocation.| + +To replace the default tags, provide a `@Bean` that implements `RepositoryTagsProvider`. + +#### 6.3.15. RabbitMQ Metrics + +Auto-configuration enables the instrumentation of all available RabbitMQ connection factories with a metric named `rabbitmq`. + +#### 6.3.16. Spring Integration Metrics + +Spring Integration automatically provides [Micrometer support](https://docs.spring.io/spring-integration/docs/5.5.9/reference/html/system-management.html#micrometer-integration) whenever a `MeterRegistry` bean is available. +Metrics are published under the `spring.integration.` meter name. + +#### 6.3.17. Kafka Metrics + +Auto-configuration registers a `MicrometerConsumerListener` and `MicrometerProducerListener` for the auto-configured consumer factory and producer factory, respectively. +It also registers a `KafkaStreamsMicrometerListener` for `StreamsBuilderFactoryBean`. +For more detail, see the [Micrometer Native Metrics](https://docs.spring.io/spring-kafka/docs/2.8.3/reference/html/#micrometer-native) section of the Spring Kafka documentation. + +#### 6.3.18. MongoDB Metrics + +This section briefly describes the available metrics for MongoDB. + +##### MongoDB Command Metrics + +Auto-configuration registers a `MongoMetricsCommandListener` with the auto-configured `MongoClient`. + +A timer metric named `mongodb.driver.commands` is created for each command issued to the underlying MongoDB driver. +Each metric is tagged with the following information by default: + +| Tag | Description | +|----------------|------------------------------------------------------------| +| `command` | The name of the command issued. | +| `cluster.id` |The identifier of the cluster to which the command was sent.| +|`server.address`| The address of the server to which the command was sent. | +| `status` | The outcome of the command (`SUCCESS` or `FAILED`). | + +To replace the default metric tags, define a `MongoCommandTagsProvider` bean, as the following example shows: + +``` +import io.micrometer.core.instrument.binder.mongodb.MongoCommandTagsProvider; + +import org.springframework.context.annotation.Bean; +import org.springframework.context.annotation.Configuration; + +@Configuration(proxyBeanMethods = false) +public class MyCommandTagsProviderConfiguration { + + @Bean + public MongoCommandTagsProvider customCommandTagsProvider() { + return new CustomCommandTagsProvider(); + } + +} + +``` + +To disable the auto-configured command metrics, set the following property: + +Properties + +``` +management.metrics.mongo.command.enabled=false +``` + +Yaml + +``` +management: + metrics: + mongo: + command: + enabled: false +``` + +##### MongoDB Connection Pool Metrics + +Auto-configuration registers a `MongoMetricsConnectionPoolListener` with the auto-configured `MongoClient`. + +The following gauge metrics are created for the connection pool: + +* `mongodb.driver.pool.size` reports the current size of the connection pool, including idle and and in-use members. + +* `mongodb.driver.pool.checkedout` reports the count of connections that are currently in use. + +* `mongodb.driver.pool.waitqueuesize` reports the current size of the wait queue for a connection from the pool. + +Each metric is tagged with the following information by default: + +| Tag | Description | +|----------------|-----------------------------------------------------------------------| +| `cluster.id` |The identifier of the cluster to which the connection pool corresponds.| +|`server.address`| The address of the server to which the connection pool corresponds. | + +To replace the default metric tags, define a `MongoConnectionPoolTagsProvider` bean: + +``` +import io.micrometer.core.instrument.binder.mongodb.MongoConnectionPoolTagsProvider; + +import org.springframework.context.annotation.Bean; +import org.springframework.context.annotation.Configuration; + +@Configuration(proxyBeanMethods = false) +public class MyConnectionPoolTagsProviderConfiguration { + + @Bean + public MongoConnectionPoolTagsProvider customConnectionPoolTagsProvider() { + return new CustomConnectionPoolTagsProvider(); + } + +} + +``` + +To disable the auto-configured connection pool metrics, set the following property: + +Properties + +``` +management.metrics.mongo.connectionpool.enabled=false +``` + +Yaml + +``` +management: + metrics: + mongo: + connectionpool: + enabled: false +``` + +#### 6.3.19. Jetty Metrics + +Auto-configuration binds metrics for Jetty’s `ThreadPool` by using Micrometer’s `JettyServerThreadPoolMetrics`. +Metrics for Jetty’s `Connector` instances are bound by using Micrometer’s `JettyConnectionMetrics` and, when `server.ssl.enabled` is set to `true`, Micrometer’s `JettySslHandshakeMetrics`. + +#### 6.3.20. @Timed Annotation Support + +You can use the `@Timed` annotation from the `io.micrometer.core.annotation` package with several of the supported technologies described earlier. +If supported, you can use the annotation at either the class level or the method level. + +For example, the following code shows how you can use the annotation to instrument all request mappings in a `@RestController`: + +``` +import java.util.List; + +import io.micrometer.core.annotation.Timed; + +import org.springframework.web.bind.annotation.GetMapping; +import org.springframework.web.bind.annotation.RestController; + +@RestController +@Timed +public class MyController { + + @GetMapping("/api/addresses") + public List
listAddress() { + return ... + } + + @GetMapping("/api/people") + public List listPeople() { + return ... + } + +} + +``` + +If you want only to instrument a single mapping, you can use the annotation on the method instead of the class: + +``` +import java.util.List; + +import io.micrometer.core.annotation.Timed; + +import org.springframework.web.bind.annotation.GetMapping; +import org.springframework.web.bind.annotation.RestController; + +@RestController +public class MyController { + + @GetMapping("/api/addresses") + public List
listAddress() { + return ... + } + + @GetMapping("/api/people") + @Timed + public List listPeople() { + return ... + } + +} + +``` + +You can also combine class-level and method-level annotations if you want to change the timing details for a specific method: + +``` +import java.util.List; + +import io.micrometer.core.annotation.Timed; + +import org.springframework.web.bind.annotation.GetMapping; +import org.springframework.web.bind.annotation.RestController; + +@RestController +@Timed +public class MyController { + + @GetMapping("/api/addresses") + public List
listAddress() { + return ... + } + + @GetMapping("/api/people") + @Timed(extraTags = { "region", "us-east-1" }) + @Timed(value = "all.people", longTask = true) + public List listPeople() { + return ... + } + +} + +``` + +| |A `@Timed` annotation with `longTask = true` enables a long task timer for the method.
Long task timers require a separate metric name and can be stacked with a short task timer.| +|---|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +#### 6.3.21. Redis Metrics + +Auto-configuration registers a `MicrometerCommandLatencyRecorder` for the auto-configured `LettuceConnectionFactory`. +For more detail, see the [Micrometer Metrics section](https://lettuce.io/core/6.1.6.RELEASE/reference/index.html#command.latency.metrics.micrometer) of the Lettuce documentation. + +### 6.4. Registering Custom Metrics + +To register custom metrics, inject `MeterRegistry` into your component: + +``` +import io.micrometer.core.instrument.MeterRegistry; +import io.micrometer.core.instrument.Tags; + +import org.springframework.stereotype.Component; + +@Component +public class MyBean { + + private final Dictionary dictionary; + + public MyBean(MeterRegistry registry) { + this.dictionary = Dictionary.load(); + registry.gauge("dictionary.size", Tags.empty(), this.dictionary.getWords().size()); + } + +} + +``` + +If your metrics depend on other beans, we recommend that you use a `MeterBinder` to register them: + +``` +import io.micrometer.core.instrument.Gauge; +import io.micrometer.core.instrument.binder.MeterBinder; + +import org.springframework.context.annotation.Bean; + +public class MyMeterBinderConfiguration { + + @Bean + public MeterBinder queueSize(Queue queue) { + return (registry) -> Gauge.builder("queueSize", queue::size).register(registry); + } + +} + +``` + +Using a `MeterBinder` ensures that the correct dependency relationships are set up and that the bean is available when the metric’s value is retrieved. +A `MeterBinder` implementation can also be useful if you find that you repeatedly instrument a suite of metrics across components or applications. + +| |By default, metrics from all `MeterBinder` beans are automatically bound to the Spring-managed `MeterRegistry`.| +|---|---------------------------------------------------------------------------------------------------------------| + +### 6.5. Customizing Individual Metrics + +If you need to apply customizations to specific `Meter` instances, you can use the `io.micrometer.core.instrument.config.MeterFilter` interface. + +For example, if you want to rename the `mytag.region` tag to `mytag.area` for all meter IDs beginning with `com.example`, you can do the following: + +``` +import io.micrometer.core.instrument.config.MeterFilter; + +import org.springframework.context.annotation.Bean; +import org.springframework.context.annotation.Configuration; + +@Configuration(proxyBeanMethods = false) +public class MyMetricsFilterConfiguration { + + @Bean + public MeterFilter renameRegionTagMeterFilter() { + return MeterFilter.renameTag("com.example", "mytag.region", "mytag.area"); + } + +} + +``` + +| |By default, all `MeterFilter` beans are automatically bound to the Spring-managed `MeterRegistry`.
Make sure to register your metrics by using the Spring-managed `MeterRegistry` and not any of the static methods on `Metrics`.
These use the global registry that is not Spring-managed.| +|---|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +#### 6.5.1. Common Tags + +Common tags are generally used for dimensional drill-down on the operating environment, such as host, instance, region, stack, and others. +Commons tags are applied to all meters and can be configured, as the following example shows: + +Properties + +``` +management.metrics.tags.region=us-east-1 +management.metrics.tags.stack=prod +``` + +Yaml + +``` +management: + metrics: + tags: + region: "us-east-1" + stack: "prod" +``` + +The preceding example adds `region` and `stack` tags to all meters with a value of `us-east-1` and `prod`, respectively. + +| |The order of common tags is important if you use Graphite.
As the order of common tags cannot be guaranteed by using this approach, Graphite users are advised to define a custom `MeterFilter` instead.| +|---|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +#### 6.5.2. Per-meter Properties + +In addition to `MeterFilter` beans, you can apply a limited set of customization on a per-meter basis by using properties. +Per-meter customizations apply to any meter IDs that start with the given name. +The following example disables any meters that have an ID starting with `example.remote` + +Properties + +``` +management.metrics.enable.example.remote=false +``` + +Yaml + +``` +management: + metrics: + enable: + example: + remote: false +``` + +The following properties allow per-meter customization: + +| Property | Description | +|------------------------------------------------------------------------------------------------------------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------| +| `management.metrics.enable` | Whether to prevent meters from emitting any metrics. | +| `management.metrics.distribution.percentiles-histogram` | Whether to publish a histogram suitable for computing aggregable (across dimension) percentile approximations. | +|`management.metrics.distribution.minimum-expected-value`, `management.metrics.distribution.maximum-expected-value`| Publish fewer histogram buckets by clamping the range of expected values. | +| `management.metrics.distribution.percentiles` | Publish percentile values computed in your application | +| `management.metrics.distribution.expiry`, `management.metrics.distribution.buffer-length` |Give greater weight to recent samples by accumulating them in ring buffers which rotate after a configurable expiry, with a
configurable buffer length.| +| `management.metrics.distribution.slo` | Publish a cumulative histogram with buckets defined by your service-level objectives. | + +For more details on the concepts behind `percentiles-histogram`, `percentiles`, and `slo`, see the [“Histograms and percentiles” section](https://micrometer.io/docs/concepts#_histograms_and_percentiles) of the Micrometer documentation. + +### 6.6. Metrics Endpoint + +Spring Boot provides a `metrics` endpoint that you can use diagnostically to examine the metrics collected by an application. +The endpoint is not available by default and must be exposed. See [exposing endpoints](#actuator.endpoints.exposing) for more details. + +Navigating to `/actuator/metrics` displays a list of available meter names. +You can drill down to view information about a particular meter by providing its name as a selector — for example, `/actuator/metrics/jvm.memory.max`. + +| |The name you use here should match the name used in the code, not the name after it has been naming-convention normalized for a monitoring system to which it is shipped.
In other words, if `jvm.memory.max` appears as `jvm_memory_max` in Prometheus because of its snake case naming convention, you should still use `jvm.memory.max` as the selector when inspecting the meter in the `metrics` endpoint.| +|---|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +You can also add any number of `tag=KEY:VALUE` query parameters to the end of the URL to dimensionally drill down on a meter — for example, `/actuator/metrics/jvm.memory.max?tag=area:nonheap`. + +| |The reported measurements are the *sum* of the statistics of all meters that match the meter name and any tags that have been applied.
In the preceding example, the returned `Value` statistic is the sum of the maximum memory footprints of the “Code Cache”, “Compressed Class Space”, and “Metaspace” areas of the heap.
If you wanted to see only the maximum size for the “Metaspace”, you could add an additional `tag=id:Metaspace` — that is, `/actuator/metrics/jvm.memory.max?tag=area:nonheap&tag=id:Metaspace`.| +|---|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +## 7. Auditing + +Once Spring Security is in play, Spring Boot Actuator has a flexible audit framework that publishes events (by default, “authentication success”, “failure” and “access denied” exceptions). +This feature can be very useful for reporting and for implementing a lock-out policy based on authentication failures. + +You can enable auditing by providing a bean of type `AuditEventRepository` in your application’s configuration. +For convenience, Spring Boot offers an `InMemoryAuditEventRepository`.`InMemoryAuditEventRepository` has limited capabilities, and we recommend using it only for development environments. +For production environments, consider creating your own alternative `AuditEventRepository` implementation. + +### 7.1. Custom Auditing + +To customize published security events, you can provide your own implementations of `AbstractAuthenticationAuditListener` and `AbstractAuthorizationAuditListener`. + +You can also use the audit services for your own business events. +To do so, either inject the `AuditEventRepository` bean into your own components and use that directly or publish an `AuditApplicationEvent` with the Spring `ApplicationEventPublisher` (by implementing `ApplicationEventPublisherAware`). + +## 8. HTTP Tracing + +You can enable HTTP Tracing by providing a bean of type `HttpTraceRepository` in your application’s configuration. +For convenience, Spring Boot offers `InMemoryHttpTraceRepository`, which stores traces for the last 100 (the default) request-response exchanges.`InMemoryHttpTraceRepository` is limited compared to other tracing solutions, and we recommend using it only for development environments. +For production environments, we recommend using a production-ready tracing or observability solution, such as Zipkin or Spring Cloud Sleuth. +Alternatively, you can create your own `HttpTraceRepository`. + +You can use the `httptrace` endpoint to obtain information about the request-response exchanges that are stored in the `HttpTraceRepository`. + +### 8.1. Custom HTTP tracing + +To customize the items that are included in each trace, use the `management.trace.http.include` configuration property. +For advanced customization, consider registering your own `HttpExchangeTracer` implementation. + +## 9. Process Monitoring + +In the `spring-boot` module, you can find two classes to create files that are often useful for process monitoring: + +* `ApplicationPidFileWriter` creates a file that contains the application PID (by default, in the application directory with a file name of `application.pid`). + +* `WebServerPortFileWriter` creates a file (or files) that contain the ports of the running web server (by default, in the application directory with a file name of `application.port`). + +By default, these writers are not activated, but you can enable them: + +* [By Extending Configuration](#actuator.process-monitoring.configuration) + +* [Programmatically Enabling Process Monitoring](#actuator.process-monitoring.programmatically) + +### 9.1. Extending Configuration + +In the `META-INF/spring.factories` file, you can activate the listener (or listeners) that writes a PID file: + +``` +org.springframework.context.ApplicationListener=\ +org.springframework.boot.context.ApplicationPidFileWriter,\ +org.springframework.boot.web.context.WebServerPortFileWriter +``` + +### 9.2. Programmatically Enabling Process Monitoring + +You can also activate a listener by invoking the `SpringApplication.addListeners(…​)` method and passing the appropriate `Writer` object. +This method also lets you customize the file name and path in the `Writer` constructor. + +## 10. Cloud Foundry Support + +Spring Boot’s actuator module includes additional support that is activated when you deploy to a compatible Cloud Foundry instance. +The `/cloudfoundryapplication` path provides an alternative secured route to all `@Endpoint` beans. + +The extended support lets Cloud Foundry management UIs (such as the web application that you can use to view deployed applications) be augmented with Spring Boot actuator information. +For example, an application status page can include full health information instead of the typical “running” or “stopped” status. + +| |The `/cloudfoundryapplication` path is not directly accessible to regular users.
To use the endpoint, you must pass a valid UAA token with the request.| +|---|-----------------------------------------------------------------------------------------------------------------------------------------------------------| + +### 10.1. Disabling Extended Cloud Foundry Actuator Support + +If you want to fully disable the `/cloudfoundryapplication` endpoints, you can add the following setting to your `application.properties` file: + +Properties + +``` +management.cloudfoundry.enabled=false +``` + +Yaml + +``` +management: + cloudfoundry: + enabled: false +``` + +### 10.2. Cloud Foundry Self-signed Certificates + +By default, the security verification for `/cloudfoundryapplication` endpoints makes SSL calls to various Cloud Foundry services. +If your Cloud Foundry UAA or Cloud Controller services use self-signed certificates, you need to set the following property: + +Properties + +``` +management.cloudfoundry.skip-ssl-validation=true +``` + +Yaml + +``` +management: + cloudfoundry: + skip-ssl-validation: true +``` + +### 10.3. Custom Context Path + +If the server’s context-path has been configured to anything other than `/`, the Cloud Foundry endpoints are not available at the root of the application. +For example, if `server.servlet.context-path=/app`, Cloud Foundry endpoints are available at `/app/cloudfoundryapplication/*`. + +If you expect the Cloud Foundry endpoints to always be available at `/cloudfoundryapplication/*`, regardless of the server’s context-path, you need to explicitly configure that in your application. +The configuration differs, depending on the web server in use. +For Tomcat, you can add the following configuration: + +``` +import java.io.IOException; +import java.util.Collections; + +import javax.servlet.GenericServlet; +import javax.servlet.Servlet; +import javax.servlet.ServletContainerInitializer; +import javax.servlet.ServletContext; +import javax.servlet.ServletException; +import javax.servlet.ServletRequest; +import javax.servlet.ServletResponse; + +import org.apache.catalina.Host; +import org.apache.catalina.core.StandardContext; +import org.apache.catalina.startup.Tomcat; + +import org.springframework.boot.web.embedded.tomcat.TomcatServletWebServerFactory; +import org.springframework.boot.web.servlet.ServletContextInitializer; +import org.springframework.context.annotation.Bean; +import org.springframework.context.annotation.Configuration; + +@Configuration(proxyBeanMethods = false) +public class MyCloudFoundryConfiguration { + + @Bean + public TomcatServletWebServerFactory servletWebServerFactory() { + return new TomcatServletWebServerFactory() { + + @Override + protected void prepareContext(Host host, ServletContextInitializer[] initializers) { + super.prepareContext(host, initializers); + StandardContext child = new StandardContext(); + child.addLifecycleListener(new Tomcat.FixContextListener()); + child.setPath("/cloudfoundryapplication"); + ServletContainerInitializer initializer = getServletContextInitializer(getContextPath()); + child.addServletContainerInitializer(initializer, Collections.emptySet()); + child.setCrossContext(true); + host.addChild(child); + } + + }; + } + + private ServletContainerInitializer getServletContextInitializer(String contextPath) { + return (classes, context) -> { + Servlet servlet = new GenericServlet() { + + @Override + public void service(ServletRequest req, ServletResponse res) throws ServletException, IOException { + ServletContext context = req.getServletContext().getContext(contextPath); + context.getRequestDispatcher("/cloudfoundryapplication").forward(req, res); + } + + }; + context.addServlet("cloudfoundry", servlet).addMapping("/*"); + }; + } + +} + +``` + +## 11. What to Read Next + +You might want to read about graphing tools such as [Graphite](https://graphiteapp.org). + +Otherwise, you can continue on to read about [“deployment options”](deployment.html#deployment) or jump ahead for some in-depth information about Spring Boot’s [build tool plugins](build-tool-plugins.html#build-tool-plugins). diff --git a/docs/en/spring-boot/build-tool-plugins.md b/docs/en/spring-boot/build-tool-plugins.md new file mode 100644 index 0000000000000000000000000000000000000000..abeb91f78aad0d0ddb5d6a876a4986579094c4bf --- /dev/null +++ b/docs/en/spring-boot/build-tool-plugins.md @@ -0,0 +1,211 @@ +# Build Tool Plugins + +Spring Boot provides build tool plugins for Maven and Gradle. +The plugins offer a variety of features, including the packaging of executable jars. +This section provides more details on both plugins as well as some help should you need to extend an unsupported build system. +If you are just getting started, you might want to read “[using.html](using.html#using.build-systems)” from the “[using.html](using.html#using)” section first. + +## 1.1 Spring Boot Maven Plugin + +The Spring Boot Maven Plugin provides Spring Boot support in Maven, letting you package executable jar or war archives and run an application “in-place”. +To use it, you must use Maven 3.2 (or later). + +See the plugin’s documentation to learn more: + +* Reference ([HTML](https://docs.spring.io/spring-boot/docs/2.6.4/maven-plugin/reference/htmlsingle/) and [PDF](https://docs.spring.io/spring-boot/docs/2.6.4/maven-plugin/reference/pdf/spring-boot-maven-plugin-reference.pdf)) + +* [API](https://docs.spring.io/spring-boot/docs/2.6.4/maven-plugin/api/) + +## 1.2. Spring Boot Gradle Plugin + +The Spring Boot Gradle Plugin provides Spring Boot support in Gradle, letting you package executable jar or war archives, run Spring Boot applications, and use the dependency management provided by `spring-boot-dependencies`. +It requires Gradle 6.8, 6.9, or 7.x. +See the plugin’s documentation to learn more: + +* Reference ([HTML](https://docs.spring.io/spring-boot/docs/2.6.4/gradle-plugin/reference/htmlsingle/) and [PDF](https://docs.spring.io/spring-boot/docs/2.6.4/gradle-plugin/reference/pdf/spring-boot-gradle-plugin-reference.pdf)) + +* [API](https://docs.spring.io/spring-boot/docs/2.6.4/gradle-plugin/api/) + +## 3. Spring Boot AntLib Module + +The Spring Boot AntLib module provides basic Spring Boot support for Apache Ant. +You can use the module to create executable jars. +To use the module, you need to declare an additional `spring-boot` namespace in your `build.xml`, as shown in the following example: + +``` + + ... + +``` + +You need to remember to start Ant using the `-lib` option, as shown in the following example: + +``` +$ ant -lib +``` + +> The “Using Spring Boot” section includes a more complete example of [using Apache Ant with `spring-boot-antlib`](using.html#using.build-systems.ant). + +### 3.1. Spring Boot Ant Tasks + +Once the `spring-boot-antlib` namespace has been declared, the following additional tasks are available: + +* [Using the “exejar” Task](#build-tool-plugins.antlib.tasks.exejar) + +* [Using the “findmainclass” Task](#build-tool-plugins.antlib.findmainclass) + +#### 3.1.1. Using the “exejar” Task + +You can use the `exejar` task to create a Spring Boot executable jar. +The following attributes are supported by the task: + +| Attribute | Description | Required | +|-------------|--------------------------------------|-------------------------------------------------------------------------| +| `destfile` | The destination jar file to create | Yes | +| `classes` |The root directory of Java class files| Yes | +|`start-class`| The main application class to run |No *(the default is the first class found that declares a `main` method)*| + +The following nested elements can be used with the task: + +| Element | Description | +|-----------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +|`resources`|One or more [Resource Collections](https://ant.apache.org/manual/Types/resources.html#collection) describing a set of [Resources](https://ant.apache.org/manual/Types/resources.html) that should be added to the content of the created jar file.| +| `lib` | One or more [Resource Collections](https://ant.apache.org/manual/Types/resources.html#collection) that should be added to the set of jar libraries that make up the runtime dependency classpath of the application. | + +#### 3.1.2. Examples + +This section shows two examples of Ant tasks. + +Specify start-class + +``` + + + + + + + + +``` + +Detect start-class + +``` + + + + + +``` + +### 3.2. Using the “findmainclass” Task + +The `findmainclass` task is used internally by `exejar` to locate a class declaring a `main`. +If necessary, you can also use this task directly in your build. +The following attributes are supported: + +| Attribute | Description | Required | +|-------------|----------------------------------------------------|-------------------------------------------| +|`classesroot`| The root directory of Java class files | Yes *(unless `mainclass` is specified)* | +| `mainclass` |Can be used to short-circuit the `main` class search| No | +| `property` |The Ant property that should be set with the result |No *(result will be logged if unspecified)*| + +#### 3.2.1. Examples + +This section contains three examples of using `findmainclass`. + +Find and log + +``` + +``` + +Find and set + +``` + +``` + +Override and set + +``` + +``` + +## 4. Supporting Other Build Systems + +If you want to use a build tool other than Maven, Gradle, or Ant, you likely need to develop your own plugin. +Executable jars need to follow a specific format and certain entries need to be written in an uncompressed form (see the “[executable jar format](executable-jar.html#appendix.executable-jar)” section in the appendix for details). + +The Spring Boot Maven and Gradle plugins both make use of `spring-boot-loader-tools` to actually generate jars. +If you need to, you may use this library directly. + +### 4.1. Repackaging Archives + +To repackage an existing archive so that it becomes a self-contained executable archive, use `org.springframework.boot.loader.tools.Repackager`. +The `Repackager` class takes a single constructor argument that refers to an existing jar or war archive. +Use one of the two available `repackage()` methods to either replace the original file or write to a new destination. +Various settings can also be configured on the repackager before it is run. + +### 4.2. Nested Libraries + +When repackaging an archive, you can include references to dependency files by using the `org.springframework.boot.loader.tools.Libraries` interface. +We do not provide any concrete implementations of `Libraries` here as they are usually build-system-specific. + +If your archive already includes libraries, you can use `Libraries.NONE`. + +### 4.3. Finding a Main Class + +If you do not use `Repackager.setMainClass()` to specify a main class, the repackager uses [ASM](https://asm.ow2.io/) to read class files and tries to find a suitable class with a `public static void main(String[] args)` method. +An exception is thrown if more than one candidate is found. + +### 4.4. Example Repackage Implementation + +The following example shows a typical repackage implementation: + +``` +import java.io.File; +import java.io.IOException; +import java.util.List; + +import org.springframework.boot.loader.tools.Library; +import org.springframework.boot.loader.tools.LibraryCallback; +import org.springframework.boot.loader.tools.LibraryScope; +import org.springframework.boot.loader.tools.Repackager; + +public class MyBuildTool { + + public void build() throws IOException { + File sourceJarFile = ... + Repackager repackager = new Repackager(sourceJarFile); + repackager.setBackupSource(false); + repackager.repackage(this::getLibraries); + } + + private void getLibraries(LibraryCallback callback) throws IOException { + // Build system specific implementation, callback for each dependency + for (File nestedJar : getCompileScopeJars()) { + callback.library(new Library(nestedJar, LibraryScope.COMPILE)); + } + // ... + } + + private List getCompileScopeJars() { + return ... + } + +} + +``` + +## 5. What to Read Next + +If you are interested in how the build tool plugins work, you can look at the [`spring-boot-tools`](https://github.com/spring-projects/spring-boot/tree/v2.6.4/spring-boot-project/spring-boot-tools) module on GitHub. +More technical details of the executable jar format are covered in [the appendix](executable-jar.html#appendix.executable-jar). + +If you have specific build-related questions, see the “[how-to](howto.html#howto)” guides. + diff --git a/docs/en/spring-boot/cli.md b/docs/en/spring-boot/cli.md new file mode 100644 index 0000000000000000000000000000000000000000..793151dc468fdb5107765fcf82d0de4181ea1694 --- /dev/null +++ b/docs/en/spring-boot/cli.md @@ -0,0 +1,369 @@ +# Spring Boot CLI + +The Spring Boot CLI is a command line tool that you can use if you want to quickly develop a Spring application. +It lets you run Groovy scripts, which means that you have a familiar Java-like syntax without so much boilerplate code. +You can also bootstrap a new project or write your own command for it. + +## 1. Installing the CLI + +The Spring Boot CLI (Command-Line Interface) can be installed manually by using SDKMAN! (the SDK Manager) or by using Homebrew or MacPorts if you are an OSX user. +See *[getting-started.html](getting-started.html#getting-started.installing.cli)* in the “Getting started” section for comprehensive installation instructions. + +## 2. Using the CLI + +Once you have installed the CLI, you can run it by typing `spring` and pressing Enter at the command line. +If you run `spring` without any arguments, a help screen is displayed, as follows: + +``` +$ spring +usage: spring [--help] [--version] + [] + +Available commands are: + + run [options] [--] [args] + Run a spring groovy script + + _... more command help is shown here_ +``` + +You can type `spring help` to get more details about any of the supported commands, as shown in the following example: + +``` +$ spring help run +spring run - Run a spring groovy script + +usage: spring run [options] [--] [args] + +Option Description +------ ----------- +--autoconfigure [Boolean] Add autoconfigure compiler + transformations (default: true) +--classpath, -cp Additional classpath entries +--no-guess-dependencies Do not attempt to guess dependencies +--no-guess-imports Do not attempt to guess imports +-q, --quiet Quiet logging +-v, --verbose Verbose logging of dependency + resolution +--watch Watch the specified file for changes +``` + +The `version` command provides a quick way to check which version of Spring Boot you are using, as follows: + +``` +$ spring version +Spring CLI v2.6.4 +``` + +### 2.1. Running Applications with the CLI + +You can compile and run Groovy source code by using the `run` command. +The Spring Boot CLI is completely self-contained, so you do not need any external Groovy installation. + +The following example shows a “hello world” web application written in Groovy: + +hello.groovy + +``` +@RestController +class WebApplication { + + @RequestMapping("/") + String home() { + "Hello World!" + } + +} + +``` + +To compile and run the application, type the following command: + +``` +$ spring run hello.groovy +``` + +To pass command-line arguments to the application, use `--` to separate the commands from the “spring” command arguments, as shown in the following example: + +``` +$ spring run hello.groovy -- --server.port=9000 +``` + +To set JVM command line arguments, you can use the `JAVA_OPTS` environment variable, as shown in the following example: + +``` +$ JAVA_OPTS=-Xmx1024m spring run hello.groovy +``` + +| |When setting `JAVA_OPTS` on Microsoft Windows, make sure to quote the entire instruction, such as `set "JAVA_OPTS=-Xms256m -Xmx2048m"`.
Doing so ensures the values are properly passed to the process.| +|---|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +#### 2.1.1. Deduced “grab” Dependencies + +Standard Groovy includes a `@Grab` annotation, which lets you declare dependencies on third-party libraries. +This useful technique lets Groovy download jars in the same way as Maven or Gradle would but without requiring you to use a build tool. + +Spring Boot extends this technique further and tries to deduce which libraries to “grab” based on your code. +For example, since the `WebApplication` code shown previously uses `@RestController` annotations, Spring Boot grabs "Tomcat" and "Spring MVC". + +The following items are used as “grab hints”: + +| Items | Grabs | +|----------------------------------------------------------|------------------------------| +|`JdbcTemplate`, `NamedParameterJdbcTemplate`, `DataSource`| JDBC Application. | +| `@EnableJms` | JMS Application. | +| `@EnableCaching` | Caching abstraction. | +| `@Test` | JUnit. | +| `@EnableRabbit` | RabbitMQ. | +| extends `Specification` | Spock test. | +| `@EnableBatchProcessing` | Spring Batch. | +| `@MessageEndpoint` `@EnableIntegration` | Spring Integration. | +| `@Controller` `@RestController` `@EnableWebMvc` |Spring MVC + Embedded Tomcat. | +| `@EnableWebSecurity` | Spring Security. | +| `@EnableTransactionManagement` |Spring Transaction Management.| + +| |See subclasses of [`CompilerAutoConfiguration`](https://github.com/spring-projects/spring-boot/tree/v2.6.4/spring-boot-project/spring-boot-cli/src/main/java/org/springframework/boot/cli/compiler/CompilerAutoConfiguration.java) in the Spring Boot CLI source code to understand exactly how customizations are applied.| +|---|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +#### 2.1.2. Deduced “grab” Coordinates + +Spring Boot extends Groovy’s standard `@Grab` support by letting you specify a dependency without a group or version (for example, `@Grab('freemarker')`). +Doing so consults Spring Boot’s default dependency metadata to deduce the artifact’s group and version. + +| |The default metadata is tied to the version of the CLI that you use.
It changes only when you move to a new version of the CLI, putting you in control of when the versions of your dependencies may change.
A table showing the dependencies and their versions that are included in the default metadata can be found in the [appendix](dependency-versions.html#appendix.dependency-versions).| +|---|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +#### 2.1.3. Default Import Statements + +To help reduce the size of your Groovy code, several `import` statements are automatically included. +Notice how the preceding example refers to `@Component`, `@RestController`, and `@RequestMapping` without needing to use fully-qualified names or `import` statements. + +| |Many Spring annotations work without using `import` statements.
Try running your application to see what fails before adding imports.| +|---|-----------------------------------------------------------------------------------------------------------------------------------------| + +#### 2.1.4. Automatic Main Method + +Unlike the equivalent Java application, you do not need to include a `public static void main(String[] args)` method with your `Groovy` scripts. +A `SpringApplication` is automatically created, with your compiled code acting as the `source`. + +#### 2.1.5. Custom Dependency Management + +By default, the CLI uses the dependency management declared in `spring-boot-dependencies` when resolving `@Grab` dependencies. +Additional dependency management, which overrides the default dependency management, can be configured by using the `@DependencyManagementBom` annotation. +The annotation’s value should specify the coordinates (`groupId:artifactId:version`) of one or more Maven BOMs. + +For example, consider the following declaration: + +``` +@DependencyManagementBom("com.example.custom-bom:1.0.0") + +``` + +The preceding declaration picks up `custom-bom-1.0.0.pom` in a Maven repository under `com/example/custom-versions/1.0.0/`. + +When you specify multiple BOMs, they are applied in the order in which you declare them, as shown in the following example: + +``` +@DependencyManagementBom([ + "com.example.custom-bom:1.0.0", + "com.example.another-bom:1.0.0"]) + +``` + +The preceding example indicates that the dependency management in `another-bom` overrides the dependency management in `custom-bom`. + +You can use `@DependencyManagementBom` anywhere that you can use `@Grab`. +However, to ensure consistent ordering of the dependency management, you can use `@DependencyManagementBom` at most once in your application. + +### 2.2. Applications with Multiple Source Files + +You can use “shell globbing” with all commands that accept file input. +Doing so lets you use multiple files from a single directory, as shown in the following example: + +``` +$ spring run *.groovy +``` + +### 2.3. Packaging Your Application + +You can use the `jar` command to package your application into a self-contained executable jar file, as shown in the following example: + +``` +$ spring jar my-app.jar *.groovy +``` + +The resulting jar contains the classes produced by compiling the application and all of the application’s dependencies so that it can then be run by using `java -jar`. +The jar file also contains entries from the application’s classpath. +You can add and remove explicit paths to the jar by using `--include` and `--exclude`. +Both are comma-separated, and both accept prefixes, in the form of “+” and “-”, to signify that they should be removed from the defaults. +The default includes are as follows: + +``` +public/**, resources/**, static/**, templates/**, META-INF/**, * +``` + +The default excludes are as follows: + +``` +.*, repository/**, build/**, target/**, **/*.jar, **/*.groovy +``` + +Type `spring help jar` on the command line for more information. + +### 2.4. Initialize a New Project + +The `init` command lets you create a new project by using [start.spring.io](https://start.spring.io) without leaving the shell, as shown in the following example: + +``` +$ spring init --dependencies=web,data-jpa my-project +Using service at https://start.spring.io +Project extracted to '/Users/developer/example/my-project' +``` + +The preceding example creates a `my-project` directory with a Maven-based project that uses `spring-boot-starter-web` and `spring-boot-starter-data-jpa`. +You can list the capabilities of the service by using the `--list` flag, as shown in the following example: + +``` +$ spring init --list +======================================= +Capabilities of https://start.spring.io +======================================= + +Available dependencies: +----------------------- +actuator - Actuator: Production ready features to help you monitor and manage your application +... +web - Web: Support for full-stack web development, including Tomcat and spring-webmvc +websocket - Websocket: Support for WebSocket development +ws - WS: Support for Spring Web Services + +Available project types: +------------------------ +gradle-build - Gradle Config [format:build, build:gradle] +gradle-project - Gradle Project [format:project, build:gradle] +maven-build - Maven POM [format:build, build:maven] +maven-project - Maven Project [format:project, build:maven] (default) + +... +``` + +The `init` command supports many options. +See the `help` output for more details. +For instance, the following command creates a Gradle project that uses Java 8 and `war` packaging: + +``` +$ spring init --build=gradle --java-version=1.8 --dependencies=websocket --packaging=war sample-app.zip +Using service at https://start.spring.io +Content saved to 'sample-app.zip' +``` + +### 2.5. Using the Embedded Shell + +Spring Boot includes command-line completion scripts for the BASH and zsh shells. +If you do not use either of these shells (perhaps you are a Windows user), you can use the `shell` command to launch an integrated shell, as shown in the following example: + +``` +$ spring shell +Spring Boot (v2.6.4) +Hit TAB to complete. Type \'help' and hit RETURN for help, and \'exit' to quit. +``` + +From inside the embedded shell, you can run other commands directly: + +``` +$ version +Spring CLI v2.6.4 +``` + +The embedded shell supports ANSI color output as well as `tab` completion. +If you need to run a native command, you can use the `!` prefix. +To exit the embedded shell, press `ctrl-c`. + +### 2.6. Adding Extensions to the CLI + +You can add extensions to the CLI by using the `install` command. +The command takes one or more sets of artifact coordinates in the format `group:artifact:version`, as shown in the following example: + +``` +$ spring install com.example:spring-boot-cli-extension:1.0.0.RELEASE +``` + +In addition to installing the artifacts identified by the coordinates you supply, all of the artifacts' dependencies are also installed. + +To uninstall a dependency, use the `uninstall` command. +As with the `install` command, it takes one or more sets of artifact coordinates in the format of `group:artifact:version`, as shown in the following example: + +``` +$ spring uninstall com.example:spring-boot-cli-extension:1.0.0.RELEASE +``` + +It uninstalls the artifacts identified by the coordinates you supply and their dependencies. + +To uninstall all additional dependencies, you can use the `--all` option, as shown in the following example: + +``` +$ spring uninstall --all +``` + +## 3. Developing Applications with the Groovy Beans DSL + +Spring Framework 4.0 has native support for a `beans{}` “DSL” (borrowed from [Grails](https://grails.org/)), and you can embed bean definitions in your Groovy application scripts by using the same format. +This is sometimes a good way to include external features like middleware declarations, as shown in the following example: + +``` +@Configuration(proxyBeanMethods = false) +class Application implements CommandLineRunner { + + @Autowired + SharedService service + + @Override + void run(String... args) { + println service.message + } + +} + +import my.company.SharedService + +beans { + service(SharedService) { + message = "Hello World" + } +} + +``` + +You can mix class declarations with `beans{}` in the same file as long as they stay at the top level, or, if you prefer, you can put the beans DSL in a separate file. + +## 4. Configuring the CLI with settings.xml + +The Spring Boot CLI uses Maven Resolver, Maven’s dependency resolution engine, to resolve dependencies. +The CLI makes use of the Maven configuration found in `~/.m2/settings.xml` to configure Maven Resolver. +The following configuration settings are honored by the CLI: + +* Offline + +* Mirrors + +* Servers + +* Proxies + +* Profiles + + * Activation + + * Repositories + +* Active profiles + +See [Maven’s settings documentation](https://maven.apache.org/settings.html) for further information. + +## 5. What to Read Next + +There are some [sample groovy scripts](https://github.com/spring-projects/spring-boot/tree/v2.6.4/spring-boot-project/spring-boot-cli/samples) available from the GitHub repository that you can use to try out the Spring Boot CLI. +There is also extensive Javadoc throughout the [source code](https://github.com/spring-projects/spring-boot/tree/v2.6.4/spring-boot-project/spring-boot-cli/src/main/java/org/springframework/boot/cli). + +If you find that you reach the limit of the CLI tool, you probably want to look at converting your application to a full Gradle or Maven built “Groovy project”. +The next section covers Spring Boot’s "[Build tool plugins](build-tool-plugins.html#build-tool-plugins)", which you can use with Gradle or Maven. diff --git a/docs/en/spring-boot/container-images.md b/docs/en/spring-boot/container-images.md new file mode 100644 index 0000000000000000000000000000000000000000..031a04bb6fa9cba450a65467e1d5553dce5d1742 --- /dev/null +++ b/docs/en/spring-boot/container-images.md @@ -0,0 +1,162 @@ +# Container Images + +Spring Boot applications can be containerized [using Dockerfiles](#container-images.dockerfiles), or by [using Cloud Native Buildpacks to create optimized docker compatible container images that you can run anywhere](#container-images.buildpacks). + +## 1. Efficient container images + + +It is easily possible to package a Spring Boot fat jar as a docker image. +However, there are various downsides to copying and running the fat jar as is in the docker image. +There’s always a certain amount of overhead when running a fat jar without unpacking it, and in a containerized environment this can be noticeable. +The other issue is that putting your application’s code and all its dependencies in one layer in the Docker image is sub-optimal. +Since you probably recompile your code more often than you upgrade the version of Spring Boot you use, it’s often better to separate things a bit more. +If you put jar files in the layer before your application classes, Docker often only needs to change the very bottom layer and can pick others up from its cache. + +### 1.1. Unpacking the fat jar + +If you are running your application from a container, you can use an executable jar, but it is also often an advantage to explode it and run it in a different way. +Certain PaaS implementations may also choose to unpack archives before they run. +For example, Cloud Foundry operates this way. +One way to run an unpacked archive is by starting the appropriate launcher, as follows: + +``` +$ jar -xf myapp.jar +$ java org.springframework.boot.loader.JarLauncher +``` + +This is actually slightly faster on startup (depending on the size of the jar) than running from an unexploded archive. +At runtime you should not expect any differences. + +Once you have unpacked the jar file, you can also get an extra boost to startup time by running the app with its "natural" main method instead of the `JarLauncher`. For example: + +``` +$ jar -xf myapp.jar +$ java -cp BOOT-INF/classes:BOOT-INF/lib/* com.example.MyApplication +``` + +| |Using the `JarLauncher` over the application’s main method has the added benefit of a predictable classpath order.
The jar contains a `classpath.idx` file which is used by the `JarLauncher` when constructing the classpath.| +|---|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +### 1.2. Layering Docker Images + +To make it easier to create optimized Docker images, Spring Boot supports adding a layer index file to the jar. +It provides a list of layers and the parts of the jar that should be contained within them. +The list of layers in the index is ordered based on the order in which the layers should be added to the Docker/OCI image. +Out-of-the-box, the following layers are supported: + +* `dependencies` (for regular released dependencies) + +* `spring-boot-loader` (for everything under `org/springframework/boot/loader`) + +* `snapshot-dependencies` (for snapshot dependencies) + +* `application` (for application classes and resources) + +The following shows an example of a `layers.idx` file: + +``` +- "dependencies": + - BOOT-INF/lib/library1.jar + - BOOT-INF/lib/library2.jar +- "spring-boot-loader": + - org/springframework/boot/loader/JarLauncher.class + - org/springframework/boot/loader/jar/JarEntry.class +- "snapshot-dependencies": + - BOOT-INF/lib/library3-SNAPSHOT.jar +- "application": + - META-INF/MANIFEST.MF + - BOOT-INF/classes/a/b/C.class +``` + +This layering is designed to separate code based on how likely it is to change between application builds. +Library code is less likely to change between builds, so it is placed in its own layers to allow tooling to re-use the layers from cache. +Application code is more likely to change between builds so it is isolated in a separate layer. + +Spring Boot also supports layering for war files with the help of a `layers.idx`. + +For Maven, see the [packaging layered jar or war section](https://docs.spring.io/spring-boot/docs/2.6.4/maven-plugin/reference/htmlsingle/#repackage-layers) for more details on adding a layer index to the archive. +For Gradle, see the [packaging layered jar or war section](https://docs.spring.io/spring-boot/docs/2.6.4/gradle-plugin/reference/htmlsingle/#packaging-layered-archives) of the Gradle plugin documentation. + +## 2. Dockerfiles + +While it is possible to convert a Spring Boot fat jar into a docker image with just a few lines in the Dockerfile, we will use the [layering feature](#container-images.efficient-images.layering) to create an optimized docker image. +When you create a jar containing the layers index file, the `spring-boot-jarmode-layertools` jar will be added as a dependency to your jar. +With this jar on the classpath, you can launch your application in a special mode which allows the bootstrap code to run something entirely different from your application, for example, something that extracts the layers. + +| |The `layertools` mode can not be used with a [fully executable Spring Boot archive](deployment.html#deployment.installing) that includes a launch script.
Disable launch script configuration when building a jar file that is intended to be used with `layertools`.| +|---|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +Here’s how you can launch your jar with a `layertools` jar mode: + +``` +$ java -Djarmode=layertools -jar my-app.jar +``` + +This will provide the following output: + +``` +Usage: + java -Djarmode=layertools -jar my-app.jar + +Available commands: + list List layers from the jar that can be extracted + extract Extracts layers from the jar for image creation + help Help about any command +``` + +The `extract` command can be used to easily split the application into layers to be added to the dockerfile. +Here is an example of a Dockerfile using `jarmode`. + +``` +FROM adoptopenjdk:11-jre-hotspot as builder +WORKDIR application +ARG JAR_FILE=target/*.jar +COPY ${JAR_FILE} application.jar +RUN java -Djarmode=layertools -jar application.jar extract + +FROM adoptopenjdk:11-jre-hotspot +WORKDIR application +COPY --from=builder application/dependencies/ ./ +COPY --from=builder application/spring-boot-loader/ ./ +COPY --from=builder application/snapshot-dependencies/ ./ +COPY --from=builder application/application/ ./ +ENTRYPOINT ["java", "org.springframework.boot.loader.JarLauncher"] +``` + +Assuming the above `Dockerfile` is in the current directory, your docker image can be built with `docker build .`, or optionally specifying the path to your application jar, as shown in the following example: + +``` +$ docker build --build-arg JAR_FILE=path/to/myapp.jar . +``` + +This is a multi-stage dockerfile. +The builder stage extracts the directories that are needed later. +Each of the `COPY` commands relates to the layers extracted by the jarmode. + +Of course, a Dockerfile can be written without using the jarmode. +You can use some combination of `unzip` and `mv` to move things to the right layer but jarmode simplifies that. + +## 3. Cloud Native Buildpacks + +Dockerfiles are just one way to build docker images. +Another way to build docker images is directly from your Maven or Gradle plugin, using buildpacks. +If you’ve ever used an application platform such as Cloud Foundry or Heroku then you’ve probably used a buildpack. +Buildpacks are the part of the platform that takes your application and converts it into something that the platform can actually run. +For example, Cloud Foundry’s Java buildpack will notice that you’re pushing a `.jar` file and automatically add a relevant JRE. + +With Cloud Native Buildpacks, you can create Docker compatible images that you can run anywhere. +Spring Boot includes buildpack support directly for both Maven and Gradle. +This means you can just type a single command and quickly get a sensible image into your locally running Docker daemon. + +See the individual plugin documentation on how to use buildpacks with [Maven](https://docs.spring.io/spring-boot/docs/2.6.4/maven-plugin/reference/htmlsingle/#build-image) and [Gradle](https://docs.spring.io/spring-boot/docs/2.6.4/gradle-plugin/reference/htmlsingle/#build-image). + +| |The [Paketo Spring Boot buildpack](https://github.com/paketo-buildpacks/spring-boot) has also been updated to support the `layers.idx` file so any customization that is applied to it will be reflected in the image created by the buildpack.| +|---|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +| |In order to achieve reproducible builds and container image caching, Buildpacks can manipulate the application resources metadata (such as the file "last modified" information).
You should ensure that your application does not rely on that metadata at runtime.
Spring Boot can use that information when serving static resources, but this can be disabled with `spring.web.resources.cache.use-last-modified`| +|---|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +## 4. What to Read Next + +Once you’ve learned how to build efficient container images, you can read about [deploying applications to a cloud platform](deployment.html#deployment.cloud.kubernetes), such as Kubernetes. + diff --git a/docs/en/spring-boot/data.md b/docs/en/spring-boot/data.md new file mode 100644 index 0000000000000000000000000000000000000000..0b1d803f8ab10b181a4511450d15bd93fd56fa03 --- /dev/null +++ b/docs/en/spring-boot/data.md @@ -0,0 +1,1630 @@ +# Data + +Spring Boot integrates with a number of data technologies, both SQL and NoSQL. + +# 1. SQL Databases +---------- + +The [Spring Framework](https://spring.io/projects/spring-framework) provides extensive support for working with SQL databases, from direct JDBC access using `JdbcTemplate` to complete “object relational mapping” technologies such as Hibernate.[Spring Data](https://spring.io/projects/spring-data) provides an additional level of functionality: creating `Repository` implementations directly from interfaces and using conventions to generate queries from your method names. + +### 1.1. Configure a DataSource + +Java’s `javax.sql.DataSource` interface provides a standard method of working with database connections. +Traditionally, a 'DataSource' uses a `URL` along with some credentials to establish a database connection. + +| |See [the “How-to” section](howto.html#howto.data-access.configure-custom-datasource) for more advanced examples, typically to take full control over the configuration of the DataSource.| +|---|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +#### 1.1.1. Embedded Database Support + +It is often convenient to develop applications by using an in-memory embedded database. +Obviously, in-memory databases do not provide persistent storage. +You need to populate your database when your application starts and be prepared to throw away data when your application ends. + +| |The “How-to” section includes a [section on how to initialize a database](howto.html#howto.data-initialization).| +|---|----------------------------------------------------------------------------------------------------------------| + +Spring Boot can auto-configure embedded [H2](https://www.h2database.com), [HSQL](http://hsqldb.org/), and [Derby](https://db.apache.org/derby/) databases. +You need not provide any connection URLs. +You need only include a build dependency to the embedded database that you want to use. +If there are multiple embedded databases on the classpath, set the `spring.datasource.embedded-database-connection` configuration property to control which one is used. +Setting the property to `none` disables auto-configuration of an embedded database. + +| |If you are using this feature in your tests, you may notice that the same database is reused by your whole test suite regardless of the number of application contexts that you use.
If you want to make sure that each context has a separate embedded database, you should set `spring.datasource.generate-unique-name` to `true`.| +|---|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +For example, the typical POM dependencies would be as follows: + +``` + + org.springframework.boot + spring-boot-starter-data-jpa + + + org.hsqldb + hsqldb + runtime + +``` + +| |You need a dependency on `spring-jdbc` for an embedded database to be auto-configured.
In this example, it is pulled in transitively through `spring-boot-starter-data-jpa`.| +|---|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +| |If, for whatever reason, you do configure the connection URL for an embedded database, take care to ensure that the database’s automatic shutdown is disabled.
If you use H2, you should use `DB_CLOSE_ON_EXIT=FALSE` to do so.
If you use HSQLDB, you should ensure that `shutdown=true` is not used.
Disabling the database’s automatic shutdown lets Spring Boot control when the database is closed, thereby ensuring that it happens once access to the database is no longer needed.| +|---|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +#### 1.1.2. Connection to a Production Database + +Production database connections can also be auto-configured by using a pooling `DataSource`. + +#### 1.1.3. DataSource Configuration + +DataSource configuration is controlled by external configuration properties in `spring.datasource.*`. +For example, you might declare the following section in `application.properties`: + +Properties + +``` +spring.datasource.url=jdbc:mysql://localhost/test +spring.datasource.username=dbuser +spring.datasource.password=dbpass +``` + +Yaml + +``` +spring: + datasource: + url: "jdbc:mysql://localhost/test" + username: "dbuser" + password: "dbpass" +``` + +| |You should at least specify the URL by setting the `spring.datasource.url` property.
Otherwise, Spring Boot tries to auto-configure an embedded database.| +|---|-------------------------------------------------------------------------------------------------------------------------------------------------------------| + +| |Spring Boot can deduce the JDBC driver class for most databases from the URL.
If you need to specify a specific class, you can use the `spring.datasource.driver-class-name` property.| +|---|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +| |For a pooling `DataSource` to be created, we need to be able to verify that a valid `Driver` class is available, so we check for that before doing anything.
In other words, if you set `spring.datasource.driver-class-name=com.mysql.jdbc.Driver`, then that class has to be loadable.| +|---|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +See [`DataSourceProperties`](https://github.com/spring-projects/spring-boot/tree/v2.6.4/spring-boot-project/spring-boot-autoconfigure/src/main/java/org/springframework/boot/autoconfigure/jdbc/DataSourceProperties.java) for more of the supported options. +These are the standard options that work regardless of [the actual implementation](features.html#data.sql.datasource.connection-pool). +It is also possible to fine-tune implementation-specific settings by using their respective prefix (`spring.datasource.hikari.*`, `spring.datasource.tomcat.*`, `spring.datasource.dbcp2.*`, and `spring.datasource.oracleucp.*`). +See the documentation of the connection pool implementation you are using for more details. + +For instance, if you use the [Tomcat connection pool](https://tomcat.apache.org/tomcat-9.0-doc/jdbc-pool.html#Common_Attributes), you could customize many additional settings, as shown in the following example: + +Properties + +``` +spring.datasource.tomcat.max-wait=10000 +spring.datasource.tomcat.max-active=50 +spring.datasource.tomcat.test-on-borrow=true +``` + +Yaml + +``` +spring: + datasource: + tomcat: + max-wait: 10000 + max-active: 50 + test-on-borrow: true +``` + +This will set the pool to wait 10000ms before throwing an exception if no connection is available, limit the maximum number of connections to 50 and validate the connection before borrowing it from the pool. + +#### 1.1.4. Supported Connection Pools + +Spring Boot uses the following algorithm for choosing a specific implementation: + +1. We prefer [HikariCP](https://github.com/brettwooldridge/HikariCP) for its performance and concurrency. + If HikariCP is available, we always choose it. + +2. Otherwise, if the Tomcat pooling `DataSource` is available, we use it. + +3. Otherwise, if [Commons DBCP2](https://commons.apache.org/proper/commons-dbcp/) is available, we use it. + +4. If none of HikariCP, Tomcat, and DBCP2 are available and if Oracle UCP is available, we use it. + +| |If you use the `spring-boot-starter-jdbc` or `spring-boot-starter-data-jpa` “starters”, you automatically get a dependency to `HikariCP`.| +|---|-----------------------------------------------------------------------------------------------------------------------------------------| + +You can bypass that algorithm completely and specify the connection pool to use by setting the `spring.datasource.type` property. +This is especially important if you run your application in a Tomcat container, as `tomcat-jdbc` is provided by default. + +Additional connection pools can always be configured manually, using `DataSourceBuilder`. +If you define your own `DataSource` bean, auto-configuration does not occur. +The following connection pools are supported by `DataSourceBuilder`: + +* HikariCP + +* Tomcat pooling `Datasource` + +* Commons DBCP2 + +* Oracle UCP & `OracleDataSource` + +* Spring Framework’s `SimpleDriverDataSource` + +* H2 `JdbcDataSource` + +* PostgreSQL `PGSimpleDataSource` + +#### 1.1.5. Connection to a JNDI DataSource + +If you deploy your Spring Boot application to an Application Server, you might want to configure and manage your DataSource by using your Application Server’s built-in features and access it by using JNDI. + +The `spring.datasource.jndi-name` property can be used as an alternative to the `spring.datasource.url`, `spring.datasource.username`, and `spring.datasource.password` properties to access the `DataSource` from a specific JNDI location. +For example, the following section in `application.properties` shows how you can access a JBoss AS defined `DataSource`: + +Properties + +``` +spring.datasource.jndi-name=java:jboss/datasources/customers +``` + +Yaml + +``` +spring: + datasource: + jndi-name: "java:jboss/datasources/customers" +``` + +### 1.2. Using JdbcTemplate + +Spring’s `JdbcTemplate` and `NamedParameterJdbcTemplate` classes are auto-configured, and you can `@Autowire` them directly into your own beans, as shown in the following example: + +``` +import org.springframework.jdbc.core.JdbcTemplate; +import org.springframework.stereotype.Component; + +@Component +public class MyBean { + + private final JdbcTemplate jdbcTemplate; + + public MyBean(JdbcTemplate jdbcTemplate) { + this.jdbcTemplate = jdbcTemplate; + } + + public void doSomething() { + this.jdbcTemplate ... + } + +} + +``` + +You can customize some properties of the template by using the `spring.jdbc.template.*` properties, as shown in the following example: + +Properties + +``` +spring.jdbc.template.max-rows=500 +``` + +Yaml + +``` +spring: + jdbc: + template: + max-rows: 500 +``` + +| |The `NamedParameterJdbcTemplate` reuses the same `JdbcTemplate` instance behind the scenes.
If more than one `JdbcTemplate` is defined and no primary candidate exists, the `NamedParameterJdbcTemplate` is not auto-configured.| +|---|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +### 1.3. JPA and Spring Data JPA + +The Java Persistence API is a standard technology that lets you “map” objects to relational databases. +The `spring-boot-starter-data-jpa` POM provides a quick way to get started. +It provides the following key dependencies: + +* Hibernate: One of the most popular JPA implementations. + +* Spring Data JPA: Helps you to implement JPA-based repositories. + +* Spring ORM: Core ORM support from the Spring Framework. + +| |We do not go into too many details of JPA or [Spring Data](https://spring.io/projects/spring-data) here.
You can follow the [“Accessing Data with JPA”](https://spring.io/guides/gs/accessing-data-jpa/) guide from [spring.io](https://spring.io) and read the [Spring Data JPA](https://spring.io/projects/spring-data-jpa) and [Hibernate](https://hibernate.org/orm/documentation/) reference documentation.| +|---|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +#### 1.3.1. Entity Classes + +Traditionally, JPA “Entity” classes are specified in a `persistence.xml` file. +With Spring Boot, this file is not necessary and “Entity Scanning” is used instead. +By default, all packages below your main configuration class (the one annotated with `@EnableAutoConfiguration` or `@SpringBootApplication`) are searched. + +Any classes annotated with `@Entity`, `@Embeddable`, or `@MappedSuperclass` are considered. +A typical entity class resembles the following example: + +``` +import java.io.Serializable; + +import javax.persistence.Column; +import javax.persistence.Entity; +import javax.persistence.GeneratedValue; +import javax.persistence.Id; + +@Entity +public class City implements Serializable { + + @Id + @GeneratedValue + private Long id; + + @Column(nullable = false) + private String name; + + @Column(nullable = false) + private String state; + + // ... additional members, often include @OneToMany mappings + + protected City() { + // no-args constructor required by JPA spec + // this one is protected since it should not be used directly + } + + public City(String name, String state) { + this.name = name; + this.state = state; + } + + public String getName() { + return this.name; + } + + public String getState() { + return this.state; + } + + // ... etc + +} + +``` + +| |You can customize entity scanning locations by using the `@EntityScan` annotation.
See the “[howto.html](howto.html#howto.data-access.separate-entity-definitions-from-spring-configuration)” how-to.| +|---|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +#### 1.3.2. Spring Data JPA Repositories + +[Spring Data JPA](https://spring.io/projects/spring-data-jpa) repositories are interfaces that you can define to access data. +JPA queries are created automatically from your method names. +For example, a `CityRepository` interface might declare a `findAllByState(String state)` method to find all the cities in a given state. + +For more complex queries, you can annotate your method with Spring Data’s [`Query`](https://docs.spring.io/spring-data/jpa/docs/2.6.2/api/org/springframework/data/jpa/repository/Query.html) annotation. + +Spring Data repositories usually extend from the [`Repository`](https://docs.spring.io/spring-data/commons/docs/2.6.2/api/org/springframework/data/repository/Repository.html) or [`CrudRepository`](https://docs.spring.io/spring-data/commons/docs/2.6.2/api/org/springframework/data/repository/CrudRepository.html) interfaces. +If you use auto-configuration, repositories are searched from the package containing your main configuration class (the one annotated with `@EnableAutoConfiguration` or `@SpringBootApplication`) down. + +The following example shows a typical Spring Data repository interface definition: + +``` +import org.springframework.boot.docs.data.sql.jpaandspringdata.entityclasses.City; +import org.springframework.data.domain.Page; +import org.springframework.data.domain.Pageable; +import org.springframework.data.repository.Repository; + +public interface CityRepository extends Repository { + + Page findAll(Pageable pageable); + + City findByNameAndStateAllIgnoringCase(String name, String state); + +} + +``` + +Spring Data JPA repositories support three different modes of bootstrapping: default, deferred, and lazy. +To enable deferred or lazy bootstrapping, set the `spring.data.jpa.repositories.bootstrap-mode` property to `deferred` or `lazy` respectively. +When using deferred or lazy bootstrapping, the auto-configured `EntityManagerFactoryBuilder` will use the context’s `AsyncTaskExecutor`, if any, as the bootstrap executor. +If more than one exists, the one named `applicationTaskExecutor` will be used. + +| |When using deferred or lazy bootstrapping, make sure to defer any access to the JPA infrastructure after the application context bootstrap phase.
You can use `SmartInitializingSingleton` to invoke any initialization that requires the JPA infrastructure.
For JPA components (such as converters) that are created as Spring beans, use `ObjectProvider` to delay the resolution of dependencies, if any.| +|---|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +| |We have barely scratched the surface of Spring Data JPA.
For complete details, see the [Spring Data JPA reference documentation](https://docs.spring.io/spring-data/jpa/docs/2.6.2/reference/html).| +|---|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +#### 1.3.3. Spring Data Envers Repositories + +If [Spring Data Envers](https://spring.io/projects/spring-data-envers) is available, JPA repositories are auto-configured to support typical Envers queries. + +To use Spring Data Envers, make sure your repository extends from `RevisionRepository` as show in the following example: + +``` +import org.springframework.boot.docs.data.sql.jpaandspringdata.entityclasses.Country; +import org.springframework.data.domain.Page; +import org.springframework.data.domain.Pageable; +import org.springframework.data.repository.Repository; +import org.springframework.data.repository.history.RevisionRepository; + +public interface CountryRepository extends RevisionRepository, Repository { + + Page findAll(Pageable pageable); + +} + +``` + +| |For more details, check the [Spring Data Envers reference documentation](https://docs.spring.io/spring-data/envers/docs/2.6.2/reference/html/).| +|---|-----------------------------------------------------------------------------------------------------------------------------------------------| + +#### 1.3.4. Creating and Dropping JPA Databases + +By default, JPA databases are automatically created **only** if you use an embedded database (H2, HSQL, or Derby). +You can explicitly configure JPA settings by using `spring.jpa.*` properties. +For example, to create and drop tables you can add the following line to your `application.properties`: + +Properties + +``` +spring.jpa.hibernate.ddl-auto=create-drop +``` + +Yaml + +``` +spring: + jpa: + hibernate.ddl-auto: "create-drop" +``` + +| |Hibernate’s own internal property name for this (if you happen to remember it better) is `hibernate.hbm2ddl.auto`.
You can set it, along with other Hibernate native properties, by using `spring.jpa.properties.*` (the prefix is stripped before adding them to the entity manager).
The following line shows an example of setting JPA properties for Hibernate:| +|---|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +Properties + +``` +spring.jpa.properties.hibernate[globally_quoted_identifiers]=true +``` + +Yaml + +``` +spring: + jpa: + properties: + hibernate: + "globally_quoted_identifiers": "true" +``` + +The line in the preceding example passes a value of `true` for the `hibernate.globally_quoted_identifiers` property to the Hibernate entity manager. + +By default, the DDL execution (or validation) is deferred until the `ApplicationContext` has started. +There is also a `spring.jpa.generate-ddl` flag, but it is not used if Hibernate auto-configuration is active, because the `ddl-auto` settings are more fine-grained. + +#### 1.3.5. Open EntityManager in View + +If you are running a web application, Spring Boot by default registers [`OpenEntityManagerInViewInterceptor`](https://docs.spring.io/spring-framework/docs/5.3.16/javadoc-api/org/springframework/orm/jpa/support/OpenEntityManagerInViewInterceptor.html) to apply the “Open EntityManager in View” pattern, to allow for lazy loading in web views. +If you do not want this behavior, you should set `spring.jpa.open-in-view` to `false` in your `application.properties`. + +### 1.4. Spring Data JDBC + +Spring Data includes repository support for JDBC and will automatically generate SQL for the methods on `CrudRepository`. +For more advanced queries, a `@Query` annotation is provided. + +Spring Boot will auto-configure Spring Data’s JDBC repositories when the necessary dependencies are on the classpath. +They can be added to your project with a single dependency on `spring-boot-starter-data-jdbc`. +If necessary, you can take control of Spring Data JDBC’s configuration by adding the `@EnableJdbcRepositories` annotation or a `JdbcConfiguration` subclass to your application. + +| |For complete details of Spring Data JDBC, see the [reference documentation](https://docs.spring.io/spring-data/jdbc/docs/2.3.2/reference/html/).| +|---|------------------------------------------------------------------------------------------------------------------------------------------------| + +### 1.5. Using H2’s Web Console + +The [H2 database](https://www.h2database.com) provides a [browser-based console](https://www.h2database.com/html/quickstart.html#h2_console) that Spring Boot can auto-configure for you. +The console is auto-configured when the following conditions are met: + +* You are developing a servlet-based web application. + +* `com.h2database:h2` is on the classpath. + +* You are using [Spring Boot’s developer tools](using.html#using.devtools). + +| |If you are not using Spring Boot’s developer tools but would still like to make use of H2’s console, you can configure the `spring.h2.console.enabled` property with a value of `true`.| +|---|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +| |The H2 console is only intended for use during development, so you should take care to ensure that `spring.h2.console.enabled` is not set to `true` in production.| +|---|------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +#### 1.5.1. Changing the H2 Console’s Path + +By default, the console is available at `/h2-console`. +You can customize the console’s path by using the `spring.h2.console.path` property. + +### 1.6. Using jOOQ + +jOOQ Object Oriented Querying ([jOOQ](https://www.jooq.org/)) is a popular product from [Data Geekery](https://www.datageekery.com/) which generates Java code from your database and lets you build type-safe SQL queries through its fluent API. +Both the commercial and open source editions can be used with Spring Boot. + +#### 1.6.1. Code Generation + +In order to use jOOQ type-safe queries, you need to generate Java classes from your database schema. +You can follow the instructions in the [jOOQ user manual](https://www.jooq.org/doc/3.14.15/manual-single-page/#jooq-in-7-steps-step3). +If you use the `jooq-codegen-maven` plugin and you also use the `spring-boot-starter-parent` “parent POM”, you can safely omit the plugin’s `` tag. +You can also use Spring Boot-defined version variables (such as `h2.version`) to declare the plugin’s database dependency. +The following listing shows an example: + +``` + + org.jooq + jooq-codegen-maven + + ... + + + + com.h2database + h2 + ${h2.version} + + + + + org.h2.Driver + jdbc:h2:~/yourdatabase + + + ... + + + +``` + +#### 1.6.2. Using DSLContext + +The fluent API offered by jOOQ is initiated through the `org.jooq.DSLContext` interface. +Spring Boot auto-configures a `DSLContext` as a Spring Bean and connects it to your application `DataSource`. +To use the `DSLContext`, you can inject it, as shown in the following example: + +``` +import java.util.GregorianCalendar; +import java.util.List; + +import org.jooq.DSLContext; + +import org.springframework.stereotype.Component; + +import static org.springframework.boot.docs.data.sql.jooq.dslcontext.Tables.AUTHOR; + +@Component +public class MyBean { + + private final DSLContext create; + + public MyBean(DSLContext dslContext) { + this.create = dslContext; + } + +} + +``` + +| |The jOOQ manual tends to use a variable named `create` to hold the `DSLContext`.| +|---|--------------------------------------------------------------------------------| + +You can then use the `DSLContext` to construct your queries, as shown in the following example: + +``` +public List authorsBornAfter1980() { + return this.create.selectFrom(AUTHOR) + .where(AUTHOR.DATE_OF_BIRTH.greaterThan(new GregorianCalendar(1980, 0, 1))) + .fetch(AUTHOR.DATE_OF_BIRTH); + +``` + +#### 1.6.3. jOOQ SQL Dialect + +Unless the `spring.jooq.sql-dialect` property has been configured, Spring Boot determines the SQL dialect to use for your datasource. +If Spring Boot could not detect the dialect, it uses `DEFAULT`. + +| |Spring Boot can only auto-configure dialects supported by the open source version of jOOQ.| +|---|------------------------------------------------------------------------------------------| + +#### 1.6.4. Customizing jOOQ + +More advanced customizations can be achieved by defining your own `DefaultConfigurationCustomizer` bean that will be invoked prior to creating the `org.jooq.Configuration` `@Bean`. +This takes precedence to anything that is applied by the auto-configuration. + +You can also create your own `org.jooq.Configuration` `@Bean` if you want to take complete control of the jOOQ configuration. + +### 1.7. Using R2DBC + +The Reactive Relational Database Connectivity ([R2DBC](https://r2dbc.io)) project brings reactive programming APIs to relational databases. +R2DBC’s `io.r2dbc.spi.Connection` provides a standard method of working with non-blocking database connections. +Connections are provided by using a `ConnectionFactory`, similar to a `DataSource` with jdbc. + +`ConnectionFactory` configuration is controlled by external configuration properties in `spring.r2dbc.*`. +For example, you might declare the following section in `application.properties`: + +Properties + +``` +spring.r2dbc.url=r2dbc:postgresql://localhost/test +spring.r2dbc.username=dbuser +spring.r2dbc.password=dbpass +``` + +Yaml + +``` +spring: + r2dbc: + url: "r2dbc:postgresql://localhost/test" + username: "dbuser" + password: "dbpass" +``` + +| |You do not need to specify a driver class name, since Spring Boot obtains the driver from R2DBC’s Connection Factory discovery.| +|---|-------------------------------------------------------------------------------------------------------------------------------| + +| |At least the url should be provided.
Information specified in the URL takes precedence over individual properties, that is `name`, `username`, `password` and pooling options.| +|---|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +| |The “How-to” section includes a [section on how to initialize a database](howto.html#howto.data-initialization.using-basic-sql-scripts).| +|---|----------------------------------------------------------------------------------------------------------------------------------------| + +To customize the connections created by a `ConnectionFactory`, that is, set specific parameters that you do not want (or cannot) configure in your central database configuration, you can use a `ConnectionFactoryOptionsBuilderCustomizer` `@Bean`. +The following example shows how to manually override the database port while the rest of the options is taken from the application configuration: + +``` +import io.r2dbc.spi.ConnectionFactoryOptions; + +import org.springframework.boot.autoconfigure.r2dbc.ConnectionFactoryOptionsBuilderCustomizer; +import org.springframework.context.annotation.Bean; +import org.springframework.context.annotation.Configuration; + +@Configuration(proxyBeanMethods = false) +public class MyR2dbcConfiguration { + + @Bean + public ConnectionFactoryOptionsBuilderCustomizer connectionFactoryPortCustomizer() { + return (builder) -> builder.option(ConnectionFactoryOptions.PORT, 5432); + } + +} + +``` + +The following examples show how to set some PostgreSQL connection options: + +``` +import java.util.HashMap; +import java.util.Map; + +import io.r2dbc.postgresql.PostgresqlConnectionFactoryProvider; + +import org.springframework.boot.autoconfigure.r2dbc.ConnectionFactoryOptionsBuilderCustomizer; +import org.springframework.context.annotation.Bean; +import org.springframework.context.annotation.Configuration; + +@Configuration(proxyBeanMethods = false) +public class MyPostgresR2dbcConfiguration { + + @Bean + public ConnectionFactoryOptionsBuilderCustomizer postgresCustomizer() { + Map options = new HashMap<>(); + options.put("lock_timeout", "30s"); + options.put("statement_timeout", "60s"); + return (builder) -> builder.option(PostgresqlConnectionFactoryProvider.OPTIONS, options); + } + +} + +``` + +When a `ConnectionFactory` bean is available, the regular JDBC `DataSource` auto-configuration backs off. +If you want to retain the JDBC `DataSource` auto-configuration, and are comfortable with the risk of using the blocking JDBC API in a reactive application, add `@Import(DataSourceAutoConfiguration.class)` on a `@Configuration` class in your application to re-enable it. + +#### 1.7.1. Embedded Database Support + +Similarly to [the JDBC support](features.html#data.sql.datasource.embedded), Spring Boot can automatically configure an embedded database for reactive usage. +You need not provide any connection URLs. +You need only include a build dependency to the embedded database that you want to use, as shown in the following example: + +``` + + io.r2dbc + r2dbc-h2 + runtime + +``` + +| |If you are using this feature in your tests, you may notice that the same database is reused by your whole test suite regardless of the number of application contexts that you use.
If you want to make sure that each context has a separate embedded database, you should set `spring.r2dbc.generate-unique-name` to `true`.| +|---|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +#### 1.7.2. Using DatabaseClient + +A `DatabaseClient` bean is auto-configured, and you can `@Autowire` it directly into your own beans, as shown in the following example: + +``` +import java.util.Map; + +import reactor.core.publisher.Flux; + +import org.springframework.r2dbc.core.DatabaseClient; +import org.springframework.stereotype.Component; + +@Component +public class MyBean { + + private final DatabaseClient databaseClient; + + public MyBean(DatabaseClient databaseClient) { + this.databaseClient = databaseClient; + } + + // ... + + public Flux> someMethod() { + return this.databaseClient.sql("select * from user").fetch().all(); + } + +} + +``` + +#### 1.7.3. Spring Data R2DBC Repositories + +[Spring Data R2DBC](https://spring.io/projects/spring-data-r2dbc) repositories are interfaces that you can define to access data. +Queries are created automatically from your method names. +For example, a `CityRepository` interface might declare a `findAllByState(String state)` method to find all the cities in a given state. + +For more complex queries, you can annotate your method with Spring Data’s [`Query`](https://docs.spring.io/spring-data/r2dbc/docs/1.4.2/api/org/springframework/data/r2dbc/repository/Query.html) annotation. + +Spring Data repositories usually extend from the [`Repository`](https://docs.spring.io/spring-data/commons/docs/2.6.2/api/org/springframework/data/repository/Repository.html) or [`CrudRepository`](https://docs.spring.io/spring-data/commons/docs/2.6.2/api/org/springframework/data/repository/CrudRepository.html) interfaces. +If you use auto-configuration, repositories are searched from the package containing your main configuration class (the one annotated with `@EnableAutoConfiguration` or `@SpringBootApplication`) down. + +The following example shows a typical Spring Data repository interface definition: + +``` +import reactor.core.publisher.Mono; + +import org.springframework.data.repository.Repository; + +public interface CityRepository extends Repository { + + Mono findByNameAndStateAllIgnoringCase(String name, String state); + +} + +``` + +| |We have barely scratched the surface of Spring Data R2DBC. For complete details, see the [Spring Data R2DBC reference documentation](https://docs.spring.io/spring-data/r2dbc/docs/1.4.2/reference/html/).| +|---|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +## 2. Working with NoSQL Technologies +---------- + +Spring Data provides additional projects that help you access a variety of NoSQL technologies, including: + +* [MongoDB](https://spring.io/projects/spring-data-mongodb) + +* [Neo4J](https://spring.io/projects/spring-data-neo4j) + +* [Elasticsearch](https://spring.io/projects/spring-data-elasticsearch) + +* [Redis](https://spring.io/projects/spring-data-redis) + +* [GemFire](https://spring.io/projects/spring-data-gemfire) or [Geode](https://spring.io/projects/spring-data-geode) + +* [Cassandra](https://spring.io/projects/spring-data-cassandra) + +* [Couchbase](https://spring.io/projects/spring-data-couchbase) + +* [LDAP](https://spring.io/projects/spring-data-ldap) + +Spring Boot provides auto-configuration for Redis, MongoDB, Neo4j, Solr, Elasticsearch, Cassandra, Couchbase, LDAP and InfluxDB. +You can make use of the other projects, but you must configure them yourself. +See the appropriate reference documentation at [spring.io/projects/spring-data](https://spring.io/projects/spring-data). + +### 2.1. Redis + +[Redis](https://redis.io/) is a cache, message broker, and richly-featured key-value store. +Spring Boot offers basic auto-configuration for the [Lettuce](https://github.com/lettuce-io/lettuce-core/) and [Jedis](https://github.com/xetorthio/jedis/) client libraries and the abstractions on top of them provided by [Spring Data Redis](https://github.com/spring-projects/spring-data-redis). + +There is a `spring-boot-starter-data-redis` “Starter” for collecting the dependencies in a convenient way. +By default, it uses [Lettuce](https://github.com/lettuce-io/lettuce-core/). +That starter handles both traditional and reactive applications. + +| |We also provide a `spring-boot-starter-data-redis-reactive` “Starter” for consistency with the other stores with reactive support.| +|---|----------------------------------------------------------------------------------------------------------------------------------| + +#### 2.1.1. Connecting to Redis + +You can inject an auto-configured `RedisConnectionFactory`, `StringRedisTemplate`, or vanilla `RedisTemplate` instance as you would any other Spring Bean. +By default, the instance tries to connect to a Redis server at `localhost:6379`. +The following listing shows an example of such a bean: + +``` +import org.springframework.data.redis.core.StringRedisTemplate; +import org.springframework.stereotype.Component; + +@Component +public class MyBean { + + private final StringRedisTemplate template; + + public MyBean(StringRedisTemplate template) { + this.template = template; + } + + // ... + + public Boolean someMethod() { + return this.template.hasKey("spring"); + } + +} + +``` + +| |You can also register an arbitrary number of beans that implement `LettuceClientConfigurationBuilderCustomizer` for more advanced customizations.`ClientResources` can also be customized using `ClientResourcesBuilderCustomizer`.
If you use Jedis, `JedisClientConfigurationBuilderCustomizer` is also available.
Alternatively, you can register a bean of type `RedisStandaloneConfiguration`, `RedisSentinelConfiguration`, or `RedisClusterConfiguration` to take full control over the configuration.| +|---|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +If you add your own `@Bean` of any of the auto-configured types, it replaces the default (except in the case of `RedisTemplate`, when the exclusion is based on the bean name, `redisTemplate`, not its type). + +By default, a pooled connection factory is auto-configured if `commons-pool2` is on the classpath. + +### 2.2. MongoDB + +[MongoDB](https://www.mongodb.com/) is an open-source NoSQL document database that uses a JSON-like schema instead of traditional table-based relational data. +Spring Boot offers several conveniences for working with MongoDB, including the `spring-boot-starter-data-mongodb` and `spring-boot-starter-data-mongodb-reactive` “Starters”. + +#### 2.2.1. Connecting to a MongoDB Database + +To access MongoDB databases, you can inject an auto-configured `org.springframework.data.mongodb.MongoDatabaseFactory`. +By default, the instance tries to connect to a MongoDB server at `mongodb://localhost/test`. +The following example shows how to connect to a MongoDB database: + +``` +import com.mongodb.client.MongoCollection; +import com.mongodb.client.MongoDatabase; +import org.bson.Document; + +import org.springframework.data.mongodb.MongoDatabaseFactory; +import org.springframework.stereotype.Component; + +@Component +public class MyBean { + + private final MongoDatabaseFactory mongo; + + public MyBean(MongoDatabaseFactory mongo) { + this.mongo = mongo; + } + + // ... + + public MongoCollection someMethod() { + MongoDatabase db = this.mongo.getMongoDatabase(); + return db.getCollection("users"); + } + +} + +``` + +If you have defined your own `MongoClient`, it will be used to auto-configure a suitable `MongoDatabaseFactory`. + +The auto-configured `MongoClient` is created using a `MongoClientSettings` bean. +If you have defined your own `MongoClientSettings`, it will be used without modification and the `spring.data.mongodb` properties will be ignored. +Otherwise a `MongoClientSettings` will be auto-configured and will have the `spring.data.mongodb` properties applied to it. +In either case, you can declare one or more `MongoClientSettingsBuilderCustomizer` beans to fine-tune the `MongoClientSettings` configuration. +Each will be called in order with the `MongoClientSettings.Builder` that is used to build the `MongoClientSettings`. + +You can set the `spring.data.mongodb.uri` property to change the URL and configure additional settings such as the *replica set*, as shown in the following example: + +Properties + +``` +spring.data.mongodb.uri=mongodb://user:[email protected]:12345,mongo2.example.com:23456/test +``` + +Yaml + +``` +spring: + data: + mongodb: + uri: "mongodb://user:[email protected]:12345,mongo2.example.com:23456/test" +``` + +Alternatively, you can specify connection details using discrete properties. +For example, you might declare the following settings in your `application.properties`: + +Properties + +``` +spring.data.mongodb.host=mongoserver.example.com +spring.data.mongodb.port=27017 +spring.data.mongodb.database=test +spring.data.mongodb.username=user +spring.data.mongodb.password=secret +``` + +Yaml + +``` +spring: + data: + mongodb: + host: "mongoserver.example.com" + port: 27017 + database: "test" + username: "user" + password: "secret" +``` + +| |If `spring.data.mongodb.port` is not specified, the default of `27017` is used.
You could delete this line from the example shown earlier.| +|---|----------------------------------------------------------------------------------------------------------------------------------------------| + +| |If you do not use Spring Data MongoDB, you can inject a `MongoClient` bean instead of using `MongoDatabaseFactory`.
If you want to take complete control of establishing the MongoDB connection, you can also declare your own `MongoDatabaseFactory` or `MongoClient` bean.| +|---|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +| |If you are using the reactive driver, Netty is required for SSL.
The auto-configuration configures this factory automatically if Netty is available and the factory to use has not been customized already.| +|---|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +#### 2.2.2. MongoTemplate + +[Spring Data MongoDB](https://spring.io/projects/spring-data-mongodb) provides a [`MongoTemplate`](https://docs.spring.io/spring-data/mongodb/docs/3.3.2/api/org/springframework/data/mongodb/core/MongoTemplate.html) class that is very similar in its design to Spring’s `JdbcTemplate`. +As with `JdbcTemplate`, Spring Boot auto-configures a bean for you to inject the template, as follows: + +``` +import com.mongodb.client.MongoCollection; +import org.bson.Document; + +import org.springframework.data.mongodb.core.MongoTemplate; +import org.springframework.stereotype.Component; + +@Component +public class MyBean { + + private final MongoTemplate mongoTemplate; + + public MyBean(MongoTemplate mongoTemplate) { + this.mongoTemplate = mongoTemplate; + } + + // ... + + public MongoCollection someMethod() { + return this.mongoTemplate.getCollection("users"); + } + +} + +``` + +See the [`MongoOperations` Javadoc](https://docs.spring.io/spring-data/mongodb/docs/3.3.2/api/org/springframework/data/mongodb/core/MongoOperations.html) for complete details. + +#### 2.2.3. Spring Data MongoDB Repositories + +Spring Data includes repository support for MongoDB. +As with the JPA repositories discussed earlier, the basic principle is that queries are constructed automatically, based on method names. + +In fact, both Spring Data JPA and Spring Data MongoDB share the same common infrastructure. +You could take the JPA example from earlier and, assuming that `City` is now a MongoDB data class rather than a JPA `@Entity`, it works in the same way, as shown in the following example: + +``` +import org.springframework.data.domain.Page; +import org.springframework.data.domain.Pageable; +import org.springframework.data.repository.Repository; + +public interface CityRepository extends Repository { + + Page findAll(Pageable pageable); + + City findByNameAndStateAllIgnoringCase(String name, String state); + +} + +``` + +| |You can customize document scanning locations by using the `@EntityScan` annotation.| +|---|------------------------------------------------------------------------------------| + +| |For complete details of Spring Data MongoDB, including its rich object mapping technologies, see its [reference documentation](https://spring.io/projects/spring-data-mongodb).| +|---|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +#### 2.2.4. Embedded Mongo + +Spring Boot offers auto-configuration for [Embedded Mongo](https://github.com/flapdoodle-oss/de.flapdoodle.embed.mongo). +To use it in your Spring Boot application, add a dependency on `de.flapdoodle.embed:de.flapdoodle.embed.mongo` and set the `spring.mongodb.embedded.version` property to match the version of MongoDB that your application will use in production. + +| |The default download configuration allows access to most of the versions listed in [Embedded Mongo’s `Version` class](https://github.com/flapdoodle-oss/de.flapdoodle.embed.mongo/blob/de.flapdoodle.embed.mongo-3.0.0/src/main/java/de/flapdoodle/embed/mongo/distribution/Version.java) as well as some others.
Configuring an inaccessible version will result in an error when attempting to download the server.
Such an error can be corrected by defining an appropriately configured `DownloadConfigBuilderCustomizer` bean.| +|---|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +The port that Mongo listens on can be configured by setting the `spring.data.mongodb.port` property. +To use a randomly allocated free port, use a value of 0. +The `MongoClient` created by `MongoAutoConfiguration` is automatically configured to use the randomly allocated port. + +| |If you do not configure a custom port, the embedded support uses a random port (rather than 27017) by default.| +|---|--------------------------------------------------------------------------------------------------------------| + +If you have SLF4J on the classpath, the output produced by Mongo is automatically routed to a logger named `org.springframework.boot.autoconfigure.mongo.embedded.EmbeddedMongo`. + +You can declare your own `IMongodConfig` and `IRuntimeConfig` beans to take control of the Mongo instance’s configuration and logging routing. +The download configuration can be customized by declaring a `DownloadConfigBuilderCustomizer` bean. + +### 2.3. Neo4j + +[Neo4j](https://neo4j.com/) is an open-source NoSQL graph database that uses a rich data model of nodes connected by first class relationships, which is better suited for connected big data than traditional RDBMS approaches. +Spring Boot offers several conveniences for working with Neo4j, including the `spring-boot-starter-data-neo4j` “Starter”. + +#### 2.3.1. Connecting to a Neo4j Database + +To access a Neo4j server, you can inject an auto-configured `org.neo4j.driver.Driver`. +By default, the instance tries to connect to a Neo4j server at `localhost:7687` using the Bolt protocol. +The following example shows how to inject a Neo4j `Driver` that gives you access, amongst other things, to a `Session`: + +``` +import org.neo4j.driver.Driver; +import org.neo4j.driver.Session; +import org.neo4j.driver.Values; + +import org.springframework.stereotype.Component; + +@Component +public class MyBean { + + private final Driver driver; + + public MyBean(Driver driver) { + this.driver = driver; + } + + // ... + + public String someMethod(String message) { + try (Session session = this.driver.session()) { + return session.writeTransaction((transaction) -> transaction + .run("CREATE (a:Greeting) SET a.message = $message RETURN a.message + ', from node ' + id(a)", + Values.parameters("message", message)) + .single().get(0).asString()); + } + } + +} + +``` + +You can configure various aspects of the driver using `spring.neo4j.*` properties. +The following example shows how to configure the uri and credentials to use: + +Properties + +``` +spring.neo4j.uri=bolt://my-server:7687 +spring.neo4j.authentication.username=neo4j +spring.neo4j.authentication.password=secret +``` + +Yaml + +``` +spring: + neo4j: + uri: "bolt://my-server:7687" + authentication: + username: "neo4j" + password: "secret" +``` + +The auto-configured `Driver` is created using `ConfigBuilder`. +To fine-tune its configuration, declare one or more `ConfigBuilderCustomizer` beans. +Each will be called in order with the `ConfigBuilder` that is used to build the `Driver`. + +#### 2.3.2. Spring Data Neo4j Repositories + +Spring Data includes repository support for Neo4j. +For complete details of Spring Data Neo4j, see the [reference documentation](https://docs.spring.io/spring-data/neo4j/docs/6.2.2/reference/html/). + +Spring Data Neo4j shares the common infrastructure with Spring Data JPA as many other Spring Data modules do. +You could take the JPA example from earlier and define `City` as Spring Data Neo4j `@Node` rather than JPA `@Entity` and the repository abstraction works in the same way, as shown in the following example: + +``` +import java.util.Optional; + +import org.springframework.data.neo4j.repository.Neo4jRepository; + +public interface CityRepository extends Neo4jRepository { + + Optional findOneByNameAndState(String name, String state); + +} + +``` + +The `spring-boot-starter-data-neo4j` “Starter” enables the repository support as well as transaction management. +Spring Boot supports both classic and reactive Neo4j repositories, using the `Neo4jTemplate` or `ReactiveNeo4jTemplate` beans. +When Project Reactor is available on the classpath, the reactive style is also auto-configured. + +You can customize the locations to look for repositories and entities by using `@EnableNeo4jRepositories` and `@EntityScan` respectively on a `@Configuration`-bean. + +| |In an application using the reactive style, a `ReactiveTransactionManager` is not auto-configured.
To enable transaction management, the following bean must be defined in your configuration:

```
import org.neo4j.driver.Driver;

import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Configuration;
import org.springframework.data.neo4j.core.ReactiveDatabaseSelectionProvider;
import org.springframework.data.neo4j.core.transaction.ReactiveNeo4jTransactionManager;

@Configuration(proxyBeanMethods = false)
public class MyNeo4jConfiguration {

@Bean
public ReactiveNeo4jTransactionManager reactiveTransactionManager(Driver driver,
ReactiveDatabaseSelectionProvider databaseNameProvider) {
return new ReactiveNeo4jTransactionManager(driver, databaseNameProvider);
}

}

```| +|---|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +### 2.4. Solr + +[Apache Solr](https://lucene.apache.org/solr/) is a search engine. +Spring Boot offers basic auto-configuration for the Solr 5 client library. + +#### 2.4.1. Connecting to Solr + +You can inject an auto-configured `SolrClient` instance as you would any other Spring bean. +By default, the instance tries to connect to a server at `[localhost:8983/solr](http://localhost:8983/solr)`. +The following example shows how to inject a Solr bean: + +``` +import java.io.IOException; + +import org.apache.solr.client.solrj.SolrClient; +import org.apache.solr.client.solrj.SolrServerException; +import org.apache.solr.client.solrj.response.SolrPingResponse; + +import org.springframework.stereotype.Component; + +@Component +public class MyBean { + + private final SolrClient solr; + + public MyBean(SolrClient solr) { + this.solr = solr; + } + + // ... + + public SolrPingResponse someMethod() throws SolrServerException, IOException { + return this.solr.ping("users"); + } + +} + +``` + +If you add your own `@Bean` of type `SolrClient`, it replaces the default. + +### 2.5. Elasticsearch + +[Elasticsearch](https://www.elastic.co/products/elasticsearch) is an open source, distributed, RESTful search and analytics engine. +Spring Boot offers basic auto-configuration for Elasticsearch clients. + +Spring Boot supports several clients: + +* The official Java "Low Level" and "High Level" REST clients + +* The `ReactiveElasticsearchClient` provided by Spring Data Elasticsearch + +Spring Boot provides a dedicated “Starter”, `spring-boot-starter-data-elasticsearch`. + +#### 2.5.1. Connecting to Elasticsearch using REST clients + +Elasticsearch ships [two different REST clients](https://www.elastic.co/guide/en/elasticsearch/client/java-rest/current/index.html) that you can use to query a cluster: the "Low Level" client and the "High Level" client. +Spring Boot provides support for the "High Level" client, which ships with `org.elasticsearch.client:elasticsearch-rest-high-level-client`. +Additionally, Spring Boot provides support for a reactive client, based on Spring Framework’s `WebClient`, that ships with `org.springframework.data:spring-data-elasticsearch`. +By default, the clients will target `[localhost:9200](http://localhost:9200)`. +You can use `spring.elasticsearch.*` properties to further tune how the clients are configured, as shown in the following example: + +Properties + +``` +spring.elasticsearch.uris=https://search.example.com:9200 +spring.elasticsearch.socket-timeout=10s +spring.elasticsearch.username=user +spring.elasticsearch.password=secret +``` + +Yaml + +``` +spring: + elasticsearch: + uris: "https://search.example.com:9200" + socket-timeout: "10s" + username: "user" + password: "secret" +``` + +##### Connecting to Elasticsearch using RestHighLevelClient ##### + +If you have `elasticsearch-rest-high-level-client` on the classpath, Spring Boot will auto-configure and register a `RestHighLevelClient` bean. +In addition to the properties described previously, to fine-tune the `RestHighLevelClient`, you can register an arbitrary number of beans that implement `RestClientBuilderCustomizer` for more advanced customizations. +To take full control over its registration, define a `RestClientBuilder` bean. + +| |If your application needs access to a "Low Level" `RestClient`, you can get it by calling `client.getLowLevelClient()` on the auto-configured `RestHighLevelClient`.| +|---|--------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +Additionally, if `elasticsearch-rest-client-sniffer` is on the classpath, a `Sniffer` is auto-configured to automatically discover nodes from a running Elasticsearch cluster and set them on the `RestHighLevelClient` bean. +You can further tune how `Sniffer` is configured, as shown in the following example: + +Properties + +``` +spring.elasticsearch.restclient.sniffer.interval=10m +spring.elasticsearch.restclient.sniffer.delay-after-failure=30s +``` + +Yaml + +``` +spring: + elasticsearch: + restclient: + sniffer: + interval: "10m" + delay-after-failure: "30s" +``` + +##### Connecting to Elasticsearch using ReactiveElasticsearchClient ##### + +[Spring Data Elasticsearch](https://spring.io/projects/spring-data-elasticsearch) ships `ReactiveElasticsearchClient` for querying Elasticsearch instances in a reactive fashion. +It is built on top of WebFlux’s `WebClient`, so both `spring-boot-starter-elasticsearch` and `spring-boot-starter-webflux` dependencies are useful to enable this support. + +By default, Spring Boot will auto-configure and register a `ReactiveElasticsearchClient`. +In addition to the properties described previously, the `spring.elasticsearch.webclient.*` properties can be used to configure reactive-specific settings, as shown in the following example: + +Properties + +``` +spring.elasticsearch.webclient.max-in-memory-size=1MB +``` + +Yaml + +``` +spring: + elasticsearch: + webclient: + max-in-memory-size: "1MB" +``` + +If the `spring.elasticsearch.` **and `spring.elasticsearch.webclient.`** configuration properties are not enough and you’d like to fully control the client configuration, you can register a custom `ClientConfiguration` bean. + +#### 2.5.2. Connecting to Elasticsearch by Using Spring Data #### + +To connect to Elasticsearch, a `RestHighLevelClient` bean must be defined, +auto-configured by Spring Boot or manually provided by the application (see previous sections). +With this configuration in place, an`ElasticsearchRestTemplate` can be injected like any other Spring bean, +as shown in the following example: + +``` +import org.springframework.data.elasticsearch.core.ElasticsearchRestTemplate; +import org.springframework.stereotype.Component; + +@Component +public class MyBean { + + private final ElasticsearchRestTemplate template; + + public MyBean(ElasticsearchRestTemplate template) { + this.template = template; + } + + // ... + + public boolean someMethod(String id) { + return this.template.exists(id, User.class); + } + +} + +``` + +In the presence of `spring-data-elasticsearch` and the required dependencies for using a `WebClient` (typically `spring-boot-starter-webflux`), Spring Boot can also auto-configure a [ReactiveElasticsearchClient](features.html#data.nosql.elasticsearch.connecting-using-rest.webclient) and a `ReactiveElasticsearchTemplate` as beans. +They are the reactive equivalent of the other REST clients. + +#### 2.5.3. Spring Data Elasticsearch Repositories + +Spring Data includes repository support for Elasticsearch. +As with the JPA repositories discussed earlier, the basic principle is that queries are constructed for you automatically based on method names. + +In fact, both Spring Data JPA and Spring Data Elasticsearch share the same common infrastructure. +You could take the JPA example from earlier and, assuming that `City` is now an Elasticsearch `@Document` class rather than a JPA `@Entity`, it works in the same way. + +| |For complete details of Spring Data Elasticsearch, see the [reference documentation](https://docs.spring.io/spring-data/elasticsearch/docs/current/reference/html/).| +|---|--------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +Spring Boot supports both classic and reactive Elasticsearch repositories, using the `ElasticsearchRestTemplate` or `ReactiveElasticsearchTemplate` beans. +Most likely those beans are auto-configured by Spring Boot given the required dependencies are present. + +If you wish to use your own template for backing the Elasticsearch repositories, you can add your own `ElasticsearchRestTemplate` or `ElasticsearchOperations` `@Bean`, as long as it is named `"elasticsearchTemplate"`. +Same applies to `ReactiveElasticsearchTemplate` and `ReactiveElasticsearchOperations`, with the bean name `"reactiveElasticsearchTemplate"`. + +You can choose to disable the repositories support with the following property: + +Properties + +``` +spring.data.elasticsearch.repositories.enabled=false +``` + +Yaml + +``` +spring: + data: + elasticsearch: + repositories: + enabled: false +``` + +### 2.6. Cassandra + +[Cassandra](https://cassandra.apache.org/) is an open source, distributed database management system designed to handle large amounts of data across many commodity servers. +Spring Boot offers auto-configuration for Cassandra and the abstractions on top of it provided by [Spring Data Cassandra](https://github.com/spring-projects/spring-data-cassandra). +There is a `spring-boot-starter-data-cassandra` “Starter” for collecting the dependencies in a convenient way. + +#### 2.6.1. Connecting to Cassandra + +You can inject an auto-configured `CassandraTemplate` or a Cassandra `CqlSession` instance as you would with any other Spring Bean. +The `spring.data.cassandra.*` properties can be used to customize the connection. +Generally, you provide `keyspace-name` and `contact-points` as well the local datacenter name, as shown in the following example: + +Properties + +``` +spring.data.cassandra.keyspace-name=mykeyspace +spring.data.cassandra.contact-points=cassandrahost1:9042,cassandrahost2:9042 +spring.data.cassandra.local-datacenter=datacenter1 +``` + +Yaml + +``` +spring: + data: + cassandra: + keyspace-name: "mykeyspace" + contact-points: "cassandrahost1:9042,cassandrahost2:9042" + local-datacenter: "datacenter1" +``` + +If the port is the same for all your contact points you can use a shortcut and only specify the host names, as shown in the following example: + +Properties + +``` +spring.data.cassandra.keyspace-name=mykeyspace +spring.data.cassandra.contact-points=cassandrahost1,cassandrahost2 +spring.data.cassandra.local-datacenter=datacenter1 +``` + +Yaml + +``` +spring: + data: + cassandra: + keyspace-name: "mykeyspace" + contact-points: "cassandrahost1,cassandrahost2" + local-datacenter: "datacenter1" +``` + +| |Those two examples are identical as the port default to `9042`.
If you need to configure the port, use `spring.data.cassandra.port`.| +|---|----------------------------------------------------------------------------------------------------------------------------------------| + +| |The Cassandra driver has its own configuration infrastructure that loads an `application.conf` at the root of the classpath.

Spring Boot does not look for such a file by default but can load one using `spring.data.cassandra.config`.
If a property is both present in `spring.data.cassandra.*` and the configuration file, the value in `spring.data.cassandra.*` takes precedence.

For more advanced driver customizations, you can register an arbitrary number of beans that implement `DriverConfigLoaderBuilderCustomizer`.
The `CqlSession` can be customized with a bean of type `CqlSessionBuilderCustomizer`.| +|---|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +| |If you use `CqlSessionBuilder` to create multiple `CqlSession` beans, keep in mind the builder is mutable so make sure to inject a fresh copy for each session.| +|---|---------------------------------------------------------------------------------------------------------------------------------------------------------------| + +The following code listing shows how to inject a Cassandra bean: + +``` +import org.springframework.data.cassandra.core.CassandraTemplate; +import org.springframework.stereotype.Component; + +@Component +public class MyBean { + + private final CassandraTemplate template; + + public MyBean(CassandraTemplate template) { + this.template = template; + } + + // ... + + public long someMethod() { + return this.template.count(User.class); + } + +} + +``` + +If you add your own `@Bean` of type `CassandraTemplate`, it replaces the default. + +#### 2.6.2. Spring Data Cassandra Repositories + +Spring Data includes basic repository support for Cassandra. +Currently, this is more limited than the JPA repositories discussed earlier and needs to annotate finder methods with `@Query`. + +| |For complete details of Spring Data Cassandra, see the [reference documentation](https://docs.spring.io/spring-data/cassandra/docs/).| +|---|-------------------------------------------------------------------------------------------------------------------------------------| + +### 2.7. Couchbase + +[Couchbase](https://www.couchbase.com/) is an open-source, distributed, multi-model NoSQL document-oriented database that is optimized for interactive applications. +Spring Boot offers auto-configuration for Couchbase and the abstractions on top of it provided by [Spring Data Couchbase](https://github.com/spring-projects/spring-data-couchbase). +There are `spring-boot-starter-data-couchbase` and `spring-boot-starter-data-couchbase-reactive` “Starters” for collecting the dependencies in a convenient way. + +#### 2.7.1. Connecting to Couchbase + +You can get a `Cluster` by adding the Couchbase SDK and some configuration. +The `spring.couchbase.*` properties can be used to customize the connection. +Generally, you provide the [connection string](https://github.com/couchbaselabs/sdk-rfcs/blob/master/rfc/0011-connection-string.md), username, and password, as shown in the following example: + +Properties + +``` +spring.couchbase.connection-string=couchbase://192.168.1.123 +spring.couchbase.username=user +spring.couchbase.password=secret +``` + +Yaml + +``` +spring: + couchbase: + connection-string: "couchbase://192.168.1.123" + username: "user" + password: "secret" +``` + +It is also possible to customize some of the `ClusterEnvironment` settings. +For instance, the following configuration changes the timeout to use to open a new `Bucket` and enables SSL support: + +Properties + +``` +spring.couchbase.env.timeouts.connect=3s +spring.couchbase.env.ssl.key-store=/location/of/keystore.jks +spring.couchbase.env.ssl.key-store-password=secret +``` + +Yaml + +``` +spring: + couchbase: + env: + timeouts: + connect: "3s" + ssl: + key-store: "/location/of/keystore.jks" + key-store-password: "secret" +``` + +| |Check the `spring.couchbase.env.*` properties for more details.
To take more control, one or more `ClusterEnvironmentBuilderCustomizer` beans can be used.| +|---|--------------------------------------------------------------------------------------------------------------------------------------------------------------| + +#### 2.7.2. Spring Data Couchbase Repositories + +Spring Data includes repository support for Couchbase. +For complete details of Spring Data Couchbase, see the [reference documentation](https://docs.spring.io/spring-data/couchbase/docs/4.3.2/reference/html/). + +You can inject an auto-configured `CouchbaseTemplate` instance as you would with any other Spring Bean, provided a `CouchbaseClientFactory` bean is available. +This happens when a `Cluster` is available, as described above, and a bucket name has been specified: + +Properties + +``` +spring.data.couchbase.bucket-name=my-bucket +``` + +Yaml + +``` +spring: + data: + couchbase: + bucket-name: "my-bucket" +``` + +The following examples shows how to inject a `CouchbaseTemplate` bean: + +``` +import org.springframework.data.couchbase.core.CouchbaseTemplate; +import org.springframework.stereotype.Component; + +@Component +public class MyBean { + + private final CouchbaseTemplate template; + + public MyBean(CouchbaseTemplate template) { + this.template = template; + } + + // ... + + public String someMethod() { + return this.template.getBucketName(); + } + +} + +``` + +There are a few beans that you can define in your own configuration to override those provided by the auto-configuration: + +* A `CouchbaseMappingContext` `@Bean` with a name of `couchbaseMappingContext`. + +* A `CustomConversions` `@Bean` with a name of `couchbaseCustomConversions`. + +* A `CouchbaseTemplate` `@Bean` with a name of `couchbaseTemplate`. + +To avoid hard-coding those names in your own config, you can reuse `BeanNames` provided by Spring Data Couchbase. +For instance, you can customize the converters to use, as follows: + +``` +import org.assertj.core.util.Arrays; + +import org.springframework.context.annotation.Bean; +import org.springframework.context.annotation.Configuration; +import org.springframework.data.couchbase.config.BeanNames; +import org.springframework.data.couchbase.core.convert.CouchbaseCustomConversions; + +@Configuration(proxyBeanMethods = false) +public class MyCouchbaseConfiguration { + + @Bean(BeanNames.COUCHBASE_CUSTOM_CONVERSIONS) + public CouchbaseCustomConversions myCustomConversions() { + return new CouchbaseCustomConversions(Arrays.asList(new MyConverter())); + } + +} + +``` + +### 2.8. LDAP + +[LDAP](https://en.wikipedia.org/wiki/Lightweight_Directory_Access_Protocol) (Lightweight Directory Access Protocol) is an open, vendor-neutral, industry standard application protocol for accessing and maintaining distributed directory information services over an IP network. +Spring Boot offers auto-configuration for any compliant LDAP server as well as support for the embedded in-memory LDAP server from [UnboundID](https://ldap.com/unboundid-ldap-sdk-for-java/). + +LDAP abstractions are provided by [Spring Data LDAP](https://github.com/spring-projects/spring-data-ldap). +There is a `spring-boot-starter-data-ldap` “Starter” for collecting the dependencies in a convenient way. + +#### 2.8.1. Connecting to an LDAP Server + +To connect to an LDAP server, make sure you declare a dependency on the `spring-boot-starter-data-ldap` “Starter” or `spring-ldap-core` and then declare the URLs of your server in your application.properties, as shown in the following example: + +Properties + +``` +spring.ldap.urls=ldap://myserver:1235 +spring.ldap.username=admin +spring.ldap.password=secret +``` + +Yaml + +``` +spring: + ldap: + urls: "ldap://myserver:1235" + username: "admin" + password: "secret" +``` + +If you need to customize connection settings, you can use the `spring.ldap.base` and `spring.ldap.base-environment` properties. + +An `LdapContextSource` is auto-configured based on these settings. +If a `DirContextAuthenticationStrategy` bean is available, it is associated to the auto-configured `LdapContextSource`. +If you need to customize it, for instance to use a `PooledContextSource`, you can still inject the auto-configured `LdapContextSource`. +Make sure to flag your customized `ContextSource` as `@Primary` so that the auto-configured `LdapTemplate` uses it. + +#### 2.8.2. Spring Data LDAP Repositories + +Spring Data includes repository support for LDAP. +For complete details of Spring Data LDAP, see the [reference documentation](https://docs.spring.io/spring-data/ldap/docs/1.0.x/reference/html/). + +You can also inject an auto-configured `LdapTemplate` instance as you would with any other Spring Bean, as shown in the following example: + +``` +import java.util.List; + +import org.springframework.ldap.core.LdapTemplate; +import org.springframework.stereotype.Component; + +@Component +public class MyBean { + + private final LdapTemplate template; + + public MyBean(LdapTemplate template) { + this.template = template; + } + + // ... + + public List someMethod() { + return this.template.findAll(User.class); + } + +} + +``` + +#### 2.8.3. Embedded In-memory LDAP Server + +For testing purposes, Spring Boot supports auto-configuration of an in-memory LDAP server from [UnboundID](https://ldap.com/unboundid-ldap-sdk-for-java/). +To configure the server, add a dependency to `com.unboundid:unboundid-ldapsdk` and declare a `spring.ldap.embedded.base-dn` property, as follows: + +Properties + +``` +spring.ldap.embedded.base-dn=dc=spring,dc=io +``` + +Yaml + +``` +spring: + ldap: + embedded: + base-dn: "dc=spring,dc=io" +``` + +| |It is possible to define multiple base-dn values, however, since distinguished names usually contain commas, they must be defined using the correct notation.

In yaml files, you can use the yaml list notation. In properties files, you must include the index as part of the property name:

Properties

```
spring.ldap.embedded.base-dn[0]=dc=spring,dc=io
spring.ldap.embedded.base-dn[1]=dc=pivotal,dc=io
```

Yaml

```
spring.ldap.embedded.base-dn:
- "dc=spring,dc=io"
- "dc=pivotal,dc=io"
```| +|---|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +By default, the server starts on a random port and triggers the regular LDAP support. +There is no need to specify a `spring.ldap.urls` property. + +If there is a `schema.ldif` file on your classpath, it is used to initialize the server. +If you want to load the initialization script from a different resource, you can also use the `spring.ldap.embedded.ldif` property. + +By default, a standard schema is used to validate `LDIF` files. +You can turn off validation altogether by setting the `spring.ldap.embedded.validation.enabled` property. +If you have custom attributes, you can use `spring.ldap.embedded.validation.schema` to define your custom attribute types or object classes. + +### 2.9. InfluxDB + +[InfluxDB](https://www.influxdata.com/) is an open-source time series database optimized for fast, high-availability storage and retrieval of time series data in fields such as operations monitoring, application metrics, Internet-of-Things sensor data, and real-time analytics. + +#### 2.9.1. Connecting to InfluxDB + +Spring Boot auto-configures an `InfluxDB` instance, provided the `influxdb-java` client is on the classpath and the URL of the database is set, as shown in the following example: + +Properties + +``` +spring.influx.url=https://172.0.0.1:8086 +``` + +Yaml + +``` +spring: + influx: + url: "https://172.0.0.1:8086" +``` + +If the connection to InfluxDB requires a user and password, you can set the `spring.influx.user` and `spring.influx.password` properties accordingly. + +InfluxDB relies on OkHttp. +If you need to tune the http client `InfluxDB` uses behind the scenes, you can register an `InfluxDbOkHttpClientBuilderProvider` bean. + +If you need more control over the configuration, consider registering an `InfluxDbCustomizer` bean. + +## 3. What to Read Next +---------- + +You should now have a feeling for how to use Spring Boot with various data technologies. +From here, you can read about Spring Boot’s support for various [messaging technologies](messaging.html#messaging) and how to enable them in your application. + diff --git a/docs/en/spring-boot/deployment.md b/docs/en/spring-boot/deployment.md new file mode 100644 index 0000000000000000000000000000000000000000..7538cd067824b12d52904981a127b15a9468b9dd --- /dev/null +++ b/docs/en/spring-boot/deployment.md @@ -0,0 +1,643 @@ +# Deploying Spring Boot Applications + +Spring Boot’s flexible packaging options provide a great deal of choice when it comes to deploying your application. +You can deploy Spring Boot applications to a variety of cloud platforms, to virtual/real machines, or make them fully executable for Unix systems. + +This section covers some of the more common deployment scenarios. + +## 1. Deploying to the Cloud + +Spring Boot’s executable jars are ready-made for most popular cloud PaaS (Platform-as-a-Service) providers. +These providers tend to require that you “bring your own container”. +They manage application processes (not Java applications specifically), so they need an intermediary layer that adapts *your* application to the *cloud’s* notion of a running process. + +Two popular cloud providers, Heroku and Cloud Foundry, employ a “buildpack” approach. +The buildpack wraps your deployed code in whatever is needed to *start* your application. +It might be a JDK and a call to `java`, an embedded web server, or a full-fledged application server. +A buildpack is pluggable, but ideally you should be able to get by with as few customizations to it as possible. +This reduces the footprint of functionality that is not under your control. +It minimizes divergence between development and production environments. + +Ideally, your application, like a Spring Boot executable jar, has everything that it needs to run packaged within it. + +In this section, we look at what it takes to get the [application that we developed](getting-started.html#getting-started.first-application) in the “Getting Started” section up and running in the Cloud. + +### 1.1. Cloud Foundry + +Cloud Foundry provides default buildpacks that come into play if no other buildpack is specified. +The Cloud Foundry [Java buildpack](https://github.com/cloudfoundry/java-buildpack) has excellent support for Spring applications, including Spring Boot. +You can deploy stand-alone executable jar applications as well as traditional `.war` packaged applications. + +Once you have built your application (by using, for example, `mvn clean package`) and have [installed the `cf` command line tool](https://docs.cloudfoundry.org/cf-cli/install-go-cli.html), deploy your application by using the `cf push` command, substituting the path to your compiled `.jar`. +Be sure to have [logged in with your `cf` command line client](https://docs.cloudfoundry.org/cf-cli/getting-started.html#login) before pushing an application. +The following line shows using the `cf push` command to deploy an application: + +``` +$ cf push acloudyspringtime -p target/demo-0.0.1-SNAPSHOT.jar +``` + +| |In the preceding example, we substitute `acloudyspringtime` for whatever value you give `cf` as the name of your application.| +|---|-----------------------------------------------------------------------------------------------------------------------------| + +See the [`cf push` documentation](https://docs.cloudfoundry.org/cf-cli/getting-started.html#push) for more options. +If there is a Cloud Foundry [`manifest.yml`](https://docs.cloudfoundry.org/devguide/deploy-apps/manifest.html) file present in the same directory, it is considered. + +At this point, `cf` starts uploading your application, producing output similar to the following example: + +``` +Uploading acloudyspringtime... OK +Preparing to start acloudyspringtime... OK +-----> Downloaded app package (8.9M) +-----> Java Buildpack Version: v3.12 (offline) | https://github.com/cloudfoundry/java-buildpack.git#6f25b7e +-----> Downloading Open Jdk JRE 1.8.0_121 from https://java-buildpack.cloudfoundry.org/openjdk/trusty/x86_64/openjdk-1.8.0_121.tar.gz (found in cache) + Expanding Open Jdk JRE to .java-buildpack/open_jdk_jre (1.6s) +-----> Downloading Open JDK Like Memory Calculator 2.0.2_RELEASE from https://java-buildpack.cloudfoundry.org/memory-calculator/trusty/x86_64/memory-calculator-2.0.2_RELEASE.tar.gz (found in cache) + Memory Settings: -Xss349K -Xmx681574K -XX:MaxMetaspaceSize=104857K -Xms681574K -XX:MetaspaceSize=104857K +-----> Downloading Container Certificate Trust Store 1.0.0_RELEASE from https://java-buildpack.cloudfoundry.org/container-certificate-trust-store/container-certificate-trust-store-1.0.0_RELEASE.jar (found in cache) + Adding certificates to .java-buildpack/container_certificate_trust_store/truststore.jks (0.6s) +-----> Downloading Spring Auto Reconfiguration 1.10.0_RELEASE from https://java-buildpack.cloudfoundry.org/auto-reconfiguration/auto-reconfiguration-1.10.0_RELEASE.jar (found in cache) +Checking status of app 'acloudyspringtime'... + 0 of 1 instances running (1 starting) + ... + 0 of 1 instances running (1 starting) + ... + 0 of 1 instances running (1 starting) + ... + 1 of 1 instances running (1 running) + +App started +``` + +Congratulations! The application is now live! + +Once your application is live, you can verify the status of the deployed application by using the `cf apps` command, as shown in the following example: + +``` +$ cf apps +Getting applications in ... +OK + +name requested state instances memory disk urls +... +acloudyspringtime started 1/1 512M 1G acloudyspringtime.cfapps.io +... +``` + +Once Cloud Foundry acknowledges that your application has been deployed, you should be able to find the application at the URI given. +In the preceding example, you could find it at `https://acloudyspringtime.cfapps.io/`. + +#### 1.1.1. Binding to Services + +By default, metadata about the running application as well as service connection information is exposed to the application as environment variables (for example: `$VCAP_SERVICES`). +This architecture decision is due to Cloud Foundry’s polyglot (any language and platform can be supported as a buildpack) nature. +Process-scoped environment variables are language agnostic. + +Environment variables do not always make for the easiest API, so Spring Boot automatically extracts them and flattens the data into properties that can be accessed through Spring’s `Environment` abstraction, as shown in the following example: + +``` +import org.springframework.context.EnvironmentAware; +import org.springframework.core.env.Environment; +import org.springframework.stereotype.Component; + +@Component +public class MyBean implements EnvironmentAware { + + private String instanceId; + + @Override + public void setEnvironment(Environment environment) { + this.instanceId = environment.getProperty("vcap.application.instance_id"); + } + + // ... + +} + +``` + +All Cloud Foundry properties are prefixed with `vcap`. +You can use `vcap` properties to access application information (such as the public URL of the application) and service information (such as database credentials). +See the [‘CloudFoundryVcapEnvironmentPostProcessor’](https://docs.spring.io/spring-boot/docs/2.6.4/api/org/springframework/boot/cloud/CloudFoundryVcapEnvironmentPostProcessor.html) Javadoc for complete details. + +| |The [Java CFEnv](https://github.com/pivotal-cf/java-cfenv/) project is a better fit for tasks such as configuring a DataSource.| +|---|-------------------------------------------------------------------------------------------------------------------------------| + +### 1.2. Kubernetes + +Spring Boot auto-detects Kubernetes deployment environments by checking the environment for `"*_SERVICE_HOST"` and `"*_SERVICE_PORT"` variables. +You can override this detection with the `spring.main.cloud-platform` configuration property. + +Spring Boot helps you to [manage the state of your application](features.html#features.spring-application.application-availability) and export it with [HTTP Kubernetes Probes using Actuator](actuator.html#actuator.endpoints.kubernetes-probes). + +#### 1.2.1. Kubernetes Container Lifecycle + +When Kubernetes deletes an application instance, the shutdown process involves several subsystems concurrently: shutdown hooks, unregistering the service, removing the instance from the load-balancer…​ +Because this shutdown processing happens in parallel (and due to the nature of distributed systems), there is a window during which traffic can be routed to a pod that has also begun its shutdown processing. + +You can configure a sleep execution in a preStop handler to avoid requests being routed to a pod that has already begun shutting down. +This sleep should be long enough for new requests to stop being routed to the pod and its duration will vary from deployment to deployment. +The preStop handler can be configured by using the PodSpec in the pod’s configuration file as follows: + +``` +spec: + containers: + - name: "example-container" + image: "example-image" + lifecycle: + preStop: + exec: + command: ["sh", "-c", "sleep 10"] +``` + +Once the pre-stop hook has completed, SIGTERM will be sent to the container and [graceful shutdown](web.html#web.graceful-shutdown) will begin, allowing any remaining in-flight requests to complete. + +| |When Kubernetes sends a SIGTERM signal to the pod, it waits for a specified time called the termination grace period (the default for which is 30 seconds).
If the containers are still running after the grace period, they are sent the SIGKILL signal and forcibly removed.
If the pod takes longer than 30 seconds to shut down, which could be because you have increased `spring.lifecycle.timeout-per-shutdown-phase`, make sure to increase the termination grace period by setting the `terminationGracePeriodSeconds` option in the Pod YAML.| +|---|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +### 1.3. Heroku + +Heroku is another popular PaaS platform. +To customize Heroku builds, you provide a `Procfile`, which provides the incantation required to deploy an application. +Heroku assigns a `port` for the Java application to use and then ensures that routing to the external URI works. + +You must configure your application to listen on the correct port. +The following example shows the `Procfile` for our starter REST application: + +``` +web: java -Dserver.port=$PORT -jar target/demo-0.0.1-SNAPSHOT.jar +``` + +Spring Boot makes `-D` arguments available as properties accessible from a Spring `Environment` instance. +The `server.port` configuration property is fed to the embedded Tomcat, Jetty, or Undertow instance, which then uses the port when it starts up. +The `$PORT` environment variable is assigned to us by the Heroku PaaS. + +This should be everything you need. +The most common deployment workflow for Heroku deployments is to `git push` the code to production, as shown in the following example: + +``` +$ git push heroku main +``` + +Which will result in the following: + +``` +Initializing repository, done. +Counting objects: 95, done. +Delta compression using up to 8 threads. +Compressing objects: 100% (78/78), done. +Writing objects: 100% (95/95), 8.66 MiB | 606.00 KiB/s, done. +Total 95 (delta 31), reused 0 (delta 0) + +-----> Java app detected +-----> Installing OpenJDK 1.8... done +-----> Installing Maven 3.3.1... done +-----> Installing settings.xml... done +-----> Executing: mvn -B -DskipTests=true clean install + + [INFO] Scanning for projects... + Downloading: https://repo.spring.io/... + Downloaded: https://repo.spring.io/... (818 B at 1.8 KB/sec) + .... + Downloaded: https://s3pository.heroku.com/jvm/... (152 KB at 595.3 KB/sec) + [INFO] Installing /tmp/build_0c35a5d2-a067-4abc-a232-14b1fb7a8229/target/... + [INFO] Installing /tmp/build_0c35a5d2-a067-4abc-a232-14b1fb7a8229/pom.xml ... + [INFO] ------------------------------------------------------------------------ + [INFO] BUILD SUCCESS + [INFO] ------------------------------------------------------------------------ + [INFO] Total time: 59.358s + [INFO] Finished at: Fri Mar 07 07:28:25 UTC 2014 + [INFO] Final Memory: 20M/493M + [INFO] ------------------------------------------------------------------------ + +-----> Discovering process types + Procfile declares types -> web + +-----> Compressing... done, 70.4MB +-----> Launching... done, v6 + https://agile-sierra-1405.herokuapp.com/ deployed to Heroku + +To [email protected]:agile-sierra-1405.git + * [new branch] main -> main +``` + +Your application should now be up and running on Heroku. +For more details, see [Deploying Spring Boot Applications to Heroku](https://devcenter.heroku.com/articles/deploying-spring-boot-apps-to-heroku). + +### 1.4. OpenShift + +[OpenShift](https://www.openshift.com/) has many resources describing how to deploy Spring Boot applications, including: + +* [Using the S2I builder](https://blog.openshift.com/using-openshift-enterprise-grade-spring-boot-deployments/) + +* [Architecture guide](https://access.redhat.com/documentation/en-us/reference_architectures/2017/html-single/spring_boot_microservices_on_red_hat_openshift_container_platform_3/) + +* [Running as a traditional web application on Wildfly](https://blog.openshift.com/using-spring-boot-on-openshift/) + +* [OpenShift Commons Briefing](https://blog.openshift.com/openshift-commons-briefing-96-cloud-native-applications-spring-rhoar/) + +### 1.5. Amazon Web Services (AWS) + +Amazon Web Services offers multiple ways to install Spring Boot-based applications, either as traditional web applications (war) or as executable jar files with an embedded web server. +The options include: + +* AWS Elastic Beanstalk + +* AWS Code Deploy + +* AWS OPS Works + +* AWS Cloud Formation + +* AWS Container Registry + +Each has different features and pricing models. +In this document, we describe to approach using AWS Elastic Beanstalk. + +#### 1.5.1. AWS Elastic Beanstalk + +As described in the official [Elastic Beanstalk Java guide](https://docs.aws.amazon.com/elasticbeanstalk/latest/dg/create_deploy_Java.html), there are two main options to deploy a Java application. +You can either use the “Tomcat Platform” or the “Java SE platform”. + +##### Using the Tomcat Platform + +This option applies to Spring Boot projects that produce a war file. +No special configuration is required. +You need only follow the official guide. + +##### Using the Java SE Platform + +This option applies to Spring Boot projects that produce a jar file and run an embedded web container. +Elastic Beanstalk environments run an nginx instance on port 80 to proxy the actual application, running on port 5000. +To configure it, add the following line to your `application.properties` file: + +``` +server.port=5000 +``` + +| |Upload binaries instead of sources

By default, Elastic Beanstalk uploads sources and compiles them in AWS.
However, it is best to upload the binaries instead.
To do so, add lines similar to the following to your `.elasticbeanstalk/config.yml` file:

```
deploy:
artifact: target/demo-0.0.1-SNAPSHOT.jar
```| +|---|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +| |Reduce costs by setting the environment type

By default an Elastic Beanstalk environment is load balanced.
The load balancer has a significant cost.
To avoid that cost, set the environment type to “Single instance”, as described in [the Amazon documentation](https://docs.aws.amazon.com/elasticbeanstalk/latest/dg/environments-create-wizard.html#environments-create-wizard-capacity).
You can also create single instance environments by using the CLI and the following command:

```
eb create -s
```| +|---|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +#### 1.5.2. Summary + +This is one of the easiest ways to get to AWS, but there are more things to cover, such as how to integrate Elastic Beanstalk into any CI / CD tool, use the Elastic Beanstalk Maven plugin instead of the CLI, and others. +There is a [blog post](https://exampledriven.wordpress.com/2017/01/09/spring-boot-aws-elastic-beanstalk-example/) covering these topics more in detail. + +### 1.6. CloudCaptain and Amazon Web Services + +[CloudCaptain](https://cloudcaptain.sh/) works by turning your Spring Boot executable jar or war into a minimal VM image that can be deployed unchanged either on VirtualBox or on AWS. +CloudCaptain comes with deep integration for Spring Boot and uses the information from your Spring Boot configuration file to automatically configure ports and health check URLs. +CloudCaptain leverages this information both for the images it produces as well as for all the resources it provisions (instances, security groups, elastic load balancers, and so on). + +Once you have created a [CloudCaptain account](https://console.cloudcaptain.sh), connected it to your AWS account, installed the latest version of the CloudCaptain Client, and ensured that the application has been built by Maven or Gradle (by using, for example, `mvn clean package`), you can deploy your Spring Boot application to AWS with a command similar to the following: + +``` +$ boxfuse run myapp-1.0.jar -env=prod +``` + +See the [`boxfuse run` documentation](https://cloudcaptain.sh/docs/commandline/run.html) for more options. +If there is a [`boxfuse.conf`](https://cloudcaptain.sh/docs/commandline/#configuration) file present in the current directory, it is considered. + +| |By default, CloudCaptain activates a Spring profile named `boxfuse` on startup.
If your executable jar or war contains an [`application-boxfuse.properties`](https://cloudcaptain.sh/docs/payloads/springboot.html#configuration) file, CloudCaptain bases its configuration on the properties it contains.| +|---|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +At this point, CloudCaptain creates an image for your application, uploads it, and configures and starts the necessary resources on AWS, resulting in output similar to the following example: + +``` +Fusing Image for myapp-1.0.jar ... +Image fused in 00:06.838s (53937 K) -> axelfontaine/myapp:1.0 +Creating axelfontaine/myapp ... +Pushing axelfontaine/myapp:1.0 ... +Verifying axelfontaine/myapp:1.0 ... +Creating Elastic IP ... +Mapping myapp-axelfontaine.boxfuse.io to 52.28.233.167 ... +Waiting for AWS to create an AMI for axelfontaine/myapp:1.0 in eu-central-1 (this may take up to 50 seconds) ... +AMI created in 00:23.557s -> ami-d23f38cf +Creating security group boxfuse-sg_axelfontaine/myapp:1.0 ... +Launching t2.micro instance of axelfontaine/myapp:1.0 (ami-d23f38cf) in eu-central-1 ... +Instance launched in 00:30.306s -> i-92ef9f53 +Waiting for AWS to boot Instance i-92ef9f53 and Payload to start at https://52.28.235.61/ ... +Payload started in 00:29.266s -> https://52.28.235.61/ +Remapping Elastic IP 52.28.233.167 to i-92ef9f53 ... +Waiting 15s for AWS to complete Elastic IP Zero Downtime transition ... +Deployment completed successfully. axelfontaine/myapp:1.0 is up and running at https://myapp-axelfontaine.boxfuse.io/ +``` + +Your application should now be up and running on AWS. + +See the blog post on [deploying Spring Boot apps on EC2](https://cloudcaptain.sh/blog/spring-boot-ec2.html) as well as the [documentation for the CloudCaptain Spring Boot integration](https://cloudcaptain.sh/docs/payloads/springboot.html) to get started with a Maven build to run the app. + +### 1.7. Azure + +This [Getting Started guide](https://spring.io/guides/gs/spring-boot-for-azure/) walks you through deploying your Spring Boot application to either [Azure Spring Cloud](https://azure.microsoft.com/en-us/services/spring-cloud/) or [Azure App Service](https://docs.microsoft.com/en-us/azure/app-service/overview). + +### 1.8. Google Cloud + +Google Cloud has several options that can be used to launch Spring Boot applications. +The easiest to get started with is probably App Engine, but you could also find ways to run Spring Boot in a container with Container Engine or on a virtual machine with Compute Engine. + +To run in App Engine, you can create a project in the UI first, which sets up a unique identifier for you and also sets up HTTP routes. +Add a Java app to the project and leave it empty and then use the [Google Cloud SDK](https://cloud.google.com/sdk/install) to push your Spring Boot app into that slot from the command line or CI build. + +App Engine Standard requires you to use WAR packaging. +Follow [these steps](https://github.com/GoogleCloudPlatform/java-docs-samples/tree/master/appengine-java8/springboot-helloworld/README.md) to deploy App Engine Standard application to Google Cloud. + +Alternatively, App Engine Flex requires you to create an `app.yaml` file to describe the resources your app requires. +Normally, you put this file in `src/main/appengine`, and it should resemble the following file: + +``` +service: "default" + +runtime: "java" +env: "flex" + +runtime_config: + jdk: "openjdk8" + +handlers: +- url: "/.*" + script: "this field is required, but ignored" + +manual_scaling: + instances: 1 + +health_check: + enable_health_check: false + +env_variables: + ENCRYPT_KEY: "your_encryption_key_here" +``` + +You can deploy the app (for example, with a Maven plugin) by adding the project ID to the build configuration, as shown in the following example: + +``` + + com.google.cloud.tools + appengine-maven-plugin + 1.3.0 + + myproject + + +``` + +Then deploy with `mvn appengine:deploy` (if you need to authenticate first, the build fails). + +## 2. Installing Spring Boot Applications + +In addition to running Spring Boot applications by using `java -jar`, it is also possible to make fully executable applications for Unix systems. +A fully executable jar can be executed like any other executable binary or it can be [registered with `init.d` or `systemd`](#deployment.installing.nix-services). +This helps when installing and managing Spring Boot applications in common production environments. + +| |Fully executable jars work by embedding an extra script at the front of the file.
Currently, some tools do not accept this format, so you may not always be able to use this technique.
For example, `jar -xf` may silently fail to extract a jar or war that has been made fully executable.
It is recommended that you make your jar or war fully executable only if you intend to execute it directly, rather than running it with `java -jar` or deploying it to a servlet container.| +|---|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +| |A zip64-format jar file cannot be made fully executable.
Attempting to do so will result in a jar file that is reported as corrupt when executed directly or with `java -jar`.
A standard-format jar file that contains one or more zip64-format nested jars can be fully executable.| +|---|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +To create a ‘fully executable’ jar with Maven, use the following plugin configuration: + +``` + + org.springframework.boot + spring-boot-maven-plugin + + true + + +``` + +The following example shows the equivalent Gradle configuration: + +``` +bootJar { + launchScript() +} +``` + +You can then run your application by typing `./my-application.jar` (where `my-application` is the name of your artifact). +The directory containing the jar is used as your application’s working directory. + +### 2.1. Supported Operating Systems + +The default script supports most Linux distributions and is tested on CentOS and Ubuntu. +Other platforms, such as OS X and FreeBSD, require the use of a custom `embeddedLaunchScript`. + +### 2.2. Unix/Linux Services + +Spring Boot application can be easily started as Unix/Linux services by using either `init.d` or `systemd`. + +#### 2.2.1. Installation as an init.d Service (System V) + +If you configured Spring Boot’s Maven or Gradle plugin to generate a [fully executable jar](#deployment.installing), and you do not use a custom `embeddedLaunchScript`, your application can be used as an `init.d` service. +To do so, symlink the jar to `init.d` to support the standard `start`, `stop`, `restart`, and `status` commands. + +The script supports the following features: + +* Starts the services as the user that owns the jar file + +* Tracks the application’s PID by using `/var/run//.pid` + +* Writes console logs to `/var/log/.log` + +Assuming that you have a Spring Boot application installed in `/var/myapp`, to install a Spring Boot application as an `init.d` service, create a symlink, as follows: + +``` +$ sudo ln -s /var/myapp/myapp.jar /etc/init.d/myapp +``` + +Once installed, you can start and stop the service in the usual way. +For example, on a Debian-based system, you could start it with the following command: + +``` +$ service myapp start +``` + +| |If your application fails to start, check the log file written to `/var/log/.log` for errors.| +|---|------------------------------------------------------------------------------------------------------| + +You can also flag the application to start automatically by using your standard operating system tools. +For example, on Debian, you could use the following command: + +``` +$ update-rc.d myapp defaults +``` + +##### Securing an init.d Service + +| |The following is a set of guidelines on how to secure a Spring Boot application that runs as an init.d service.
It is not intended to be an exhaustive list of everything that should be done to harden an application and the environment in which it runs.| +|---|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +When executed as root, as is the case when root is being used to start an init.d service, the default executable script runs the application as the user specified in the `RUN_AS_USER` environment variable. +When the environment variable is not set, the user who owns the jar file is used instead. +You should never run a Spring Boot application as `root`, so `RUN_AS_USER` should never be root and your application’s jar file should never be owned by root. +Instead, create a specific user to run your application and set the `RUN_AS_USER` environment variable or use `chown` to make it the owner of the jar file, as shown in the following example: + +``` +$ chown bootapp:bootapp your-app.jar +``` + +In this case, the default executable script runs the application as the `bootapp` user. + +| |To reduce the chances of the application’s user account being compromised, you should consider preventing it from using a login shell.
For example, you can set the account’s shell to `/usr/sbin/nologin`.| +|---|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +You should also take steps to prevent the modification of your application’s jar file. +Firstly, configure its permissions so that it cannot be written and can only be read or executed by its owner, as shown in the following example: + +``` +$ chmod 500 your-app.jar +``` + +Second, you should also take steps to limit the damage if your application or the account that is running it is compromised. +If an attacker does gain access, they could make the jar file writable and change its contents. +One way to protect against this is to make it immutable by using `chattr`, as shown in the following example: + +``` +$ sudo chattr +i your-app.jar +``` + +This will prevent any user, including root, from modifying the jar. + +If root is used to control the application’s service and you [use a `.conf` file](#deployment.installing.nix-services.script-customization.when-running.conf-file) to customize its startup, the `.conf` file is read and evaluated by the root user. +It should be secured accordingly. +Use `chmod` so that the file can only be read by the owner and use `chown` to make root the owner, as shown in the following example: + +``` +$ chmod 400 your-app.conf +$ sudo chown root:root your-app.conf +``` + +#### 2.2.2. Installation as a systemd Service + +`systemd` is the successor of the System V init system and is now being used by many modern Linux distributions. +Although you can continue to use `init.d` scripts with `systemd`, it is also possible to launch Spring Boot applications by using `systemd` ‘service’ scripts. + +Assuming that you have a Spring Boot application installed in `/var/myapp`, to install a Spring Boot application as a `systemd` service, create a script named `myapp.service` and place it in `/etc/systemd/system` directory. +The following script offers an example: + +``` +[Unit] +Description=myapp +After=syslog.target + +[Service] +User=myapp +ExecStart=/var/myapp/myapp.jar +SuccessExitStatus=143 + +[Install] +WantedBy=multi-user.target +``` + +| |Remember to change the `Description`, `User`, and `ExecStart` fields for your application.| +|---|------------------------------------------------------------------------------------------| + +| |The `ExecStart` field does not declare the script action command, which means that the `run` command is used by default.| +|---|------------------------------------------------------------------------------------------------------------------------| + +Note that, unlike when running as an `init.d` service, the user that runs the application, the PID file, and the console log file are managed by `systemd` itself and therefore must be configured by using appropriate fields in the ‘service’ script. +Consult the [service unit configuration man page](https://www.freedesktop.org/software/systemd/man/systemd.service.html) for more details. + +To flag the application to start automatically on system boot, use the following command: + +``` +$ systemctl enable myapp.service +``` + +Run `man systemctl` for more details. + +#### 2.2.3. Customizing the Startup Script + +The default embedded startup script written by the Maven or Gradle plugin can be customized in a number of ways. +For most people, using the default script along with a few customizations is usually enough. +If you find you cannot customize something that you need to, use the `embeddedLaunchScript` option to write your own file entirely. + +##### Customizing the Start Script When It Is Written ##### + +It often makes sense to customize elements of the start script as it is written into the jar file. +For example, init.d scripts can provide a “description”. +Since you know the description up front (and it need not change), you may as well provide it when the jar is generated. + +To customize written elements, use the `embeddedLaunchScriptProperties` option of the Spring Boot Maven plugin or the [`properties` property of the Spring Boot Gradle plugin’s `launchScript`](https://docs.spring.io/spring-boot/docs/2.6.4/gradle-plugin/reference/htmlsingle/#packaging-executable-configuring-launch-script). + +The following property substitutions are supported with the default script: + +| Name | Description | Gradle default | Maven default | +|--------------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|------------------------------------------------------------------------------------|------------------------------------------------------------| +| `mode` | The script mode. | `auto` | `auto` | +| `initInfoProvides` | The `Provides` section of “INIT INFO” | `${task.baseName}` | `${project.artifactId}` | +| `initInfoRequiredStart` | `Required-Start` section of “INIT INFO”. | `$remote_fs $syslog $network` | `$remote_fs $syslog $network` | +| `initInfoRequiredStop` | `Required-Stop` section of “INIT INFO”. | `$remote_fs $syslog $network` | `$remote_fs $syslog $network` | +| `initInfoDefaultStart` | `Default-Start` section of “INIT INFO”. | `2 3 4 5` | `2 3 4 5` | +| `initInfoDefaultStop` | `Default-Stop` section of “INIT INFO”. | `0 1 6` | `0 1 6` | +|`initInfoShortDescription`| `Short-Description` section of “INIT INFO”. |Single-line version of `${project.description}` (falling back to `${task.baseName}`)| `${project.name}` | +| `initInfoDescription` | `Description` section of “INIT INFO”. | `${project.description}` (falling back to `${task.baseName}`) |`${project.description}` (falling back to `${project.name}`)| +| `initInfoChkconfig` | `chkconfig` section of “INIT INFO” | `2345 99 01` | `2345 99 01` | +| `confFolder` | The default value for `CONF_FOLDER` | Folder containing the jar | Folder containing the jar | +| `inlinedConfScript` |Reference to a file script that should be inlined in the default launch script.
This can be used to set environmental variables such as `JAVA_OPTS` before any external config files are loaded| | | +| `logFolder` | Default value for `LOG_FOLDER`.
Only valid for an `init.d` service | | | +| `logFilename` | Default value for `LOG_FILENAME`.
Only valid for an `init.d` service | | | +| `pidFolder` | Default value for `PID_FOLDER`.
Only valid for an `init.d` service | | | +| `pidFilename` | Default value for the name of the PID file in `PID_FOLDER`.
Only valid for an `init.d` service | | | +| `useStartStopDaemon` | Whether the `start-stop-daemon` command, when it is available, should be used to control the process | `true` | `true` | +| `stopWaitTime` | Default value for `STOP_WAIT_TIME` in seconds.
Only valid for an `init.d` service | 60 | 60 | + +##### Customizing a Script When It Runs ##### + +For items of the script that need to be customized *after* the jar has been written, you can use environment variables or a [config file](#deployment.installing.nix-services.script-customization.when-running.conf-file). + +The following environment properties are supported with the default script: + +| Variable | Description | +|-----------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| `MODE` |The “mode” of operation.
The default depends on the way the jar was built but is usually `auto` (meaning it tries to guess if it is an init script by checking if it is a symlink in a directory called `init.d`).
You can explicitly set it to `service` so that the `stop|start|status|restart` commands work or to `run` if you want to run the script in the foreground.| +| `RUN_AS_USER` | The user that will be used to run the application.
When not set, the user that owns the jar file will be used. | +|`USE_START_STOP_DAEMON`| Whether the `start-stop-daemon` command, when it is available, should be used to control the process.
Defaults to `true`. | +| `PID_FOLDER` | The root name of the pid folder (`/var/run` by default). | +| `LOG_FOLDER` | The name of the folder in which to put log files (`/var/log` by default). | +| `CONF_FOLDER` | The name of the folder from which to read .conf files (same folder as jar-file by default). | +| `LOG_FILENAME` | The name of the log file in the `LOG_FOLDER` (`.log` by default). | +| `APP_NAME` | The name of the app.
If the jar is run from a symlink, the script guesses the app name.
If it is not a symlink or you want to explicitly set the app name, this can be useful. | +| `RUN_ARGS` | The arguments to pass to the program (the Spring Boot app). | +| `JAVA_HOME` | The location of the `java` executable is discovered by using the `PATH` by default, but you can set it explicitly if there is an executable file at `$JAVA_HOME/bin/java`. | +| `JAVA_OPTS` | Options that are passed to the JVM when it is launched. | +| `JARFILE` | The explicit location of the jar file, in case the script is being used to launch a jar that it is not actually embedded. | +| `DEBUG` | If not empty, sets the `-x` flag on the shell process, allowing you to see the logic in the script. | +| `STOP_WAIT_TIME` | The time in seconds to wait when stopping the application before forcing a shutdown (`60` by default). | + +| |The `PID_FOLDER`, `LOG_FOLDER`, and `LOG_FILENAME` variables are only valid for an `init.d` service.
For `systemd`, the equivalent customizations are made by using the ‘service’ script.
See the [service unit configuration man page](https://www.freedesktop.org/software/systemd/man/systemd.service.html) for more details.| +|---|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +With the exception of `JARFILE` and `APP_NAME`, the settings listed in the preceding section can be configured by using a `.conf` file. +The file is expected to be next to the jar file and have the same name but suffixed with `.conf` rather than `.jar`. +For example, a jar named `/var/myapp/myapp.jar` uses the configuration file named `/var/myapp/myapp.conf`, as shown in the following example: + +myapp.conf + +``` +JAVA_OPTS=-Xmx1024M +LOG_FOLDER=/custom/log/folder +``` + +| |If you do not like having the config file next to the jar file, you can set a `CONF_FOLDER` environment variable to customize the location of the config file.| +|---|--------------------------------------------------------------------------------------------------------------------------------------------------------------| + +To learn about securing this file appropriately, see [the guidelines for securing an init.d service](#deployment.installing.nix-services.init-d.securing). + +### 2.3. Microsoft Windows Services + +A Spring Boot application can be started as a Windows service by using [`winsw`](https://github.com/kohsuke/winsw). + +A ([separately maintained sample](https://github.com/snicoll/spring-boot-daemon)) describes step-by-step how you can create a Windows service for your Spring Boot application. + +## 3. What to Read Next + +See the [Cloud Foundry](https://www.cloudfoundry.org/), [Heroku](https://www.heroku.com/), [OpenShift](https://www.openshift.com), and [Boxfuse](https://boxfuse.com) web sites for more information about the kinds of features that a PaaS can offer. +These are just four of the most popular Java PaaS providers. +Since Spring Boot is so amenable to cloud-based deployment, you can freely consider other providers as well. + +The next section goes on to cover the *[Spring Boot CLI](cli.html#cli)*, or you can jump ahead to read about *[build tool plugins](build-tool-plugins.html#build-tool-plugins)*. + diff --git a/docs/en/spring-boot/documentation.md b/docs/en/spring-boot/documentation.md new file mode 100644 index 0000000000000000000000000000000000000000..e3f939afb734807725136cba84214b6dee9f2a1c --- /dev/null +++ b/docs/en/spring-boot/documentation.md @@ -0,0 +1,131 @@ +# Documentation Overview + +This section provides a brief overview of Spring Boot reference documentation. +It serves as a map for the rest of the document. + +The latest copy of this document is available at [docs.spring.io/spring-boot/docs/current/reference/](https://docs.spring.io/spring-boot/docs/current/reference/). + +## 1. First Steps + +If you are getting started with Spring Boot or 'Spring' in general, start with [the following topics](getting-started.html#getting-started): + +* **From scratch:** [Overview](getting-started.html#getting-started.introducing-spring-boot) | [Requirements](getting-started.html#getting-started.system-requirements) | [Installation](getting-started.html#getting-started.installing) + +* **Tutorial:** [Part 1](getting-started.html#getting-started.first-application) | [Part 2](getting-started.html#getting-started.first-application.code) + +* **Running your example:** [Part 1](getting-started.html#getting-started.first-application.run) | [Part 2](getting-started.html#getting-started.first-application.executable-jar) + +## 2. Upgrading From an Earlier Version + +You should always ensure that you are running a [supported version](https://github.com/spring-projects/spring-boot/wiki/Supported-Versions) of Spring Boot. + +Depending on the version that you are upgrading to, you can find some additional tips here: + +* **From 1.x:** [Upgrading from 1.x](actuator.html#upgrading.from-1x) + +* **To a new feature release:** [Upgrading to New Feature Release](upgrading.html#upgrading.to-feature) + +* **Spring Boot CLI:** [Upgrading the Spring Boot CLI](upgrading.html#upgrading.cli) + +## 3. Developing with Spring Boot + +Ready to actually start using Spring Boot? [We have you covered](using.html#using): + +* **Build systems:** [Maven](using.html#using.build-systems.maven) | [Gradle](using.html#using.build-systems.gradle) | [Ant](using.html#using.build-systems.ant) | [Starters](using.html#using.build-systems.starters) + +* **Best practices:** [Code Structure](using.html#using.structuring-your-code) | [@Configuration](using.html#using.configuration-classes) | [@EnableAutoConfiguration](using.html#using.auto-configuration) | [Beans and Dependency Injection](using.html#using.spring-beans-and-dependency-injection) + +* **Running your code:** [IDE](using.html#using.running-your-application.from-an-ide) | [Packaged](using.html#using.running-your-application.as-a-packaged-application) | [Maven](using.html#using.running-your-application.with-the-maven-plugin) | [Gradle](using.html#using.running-your-application.with-the-gradle-plugin) + +* **Packaging your app:** [Production jars](using.html#using.packaging-for-production) + +* **Spring Boot CLI:** [Using the CLI](cli.html#cli) + +## 4. Learning About Spring Boot Features + +Need more details about Spring Boot’s core features?[The following content is for you](features.html#features): + +* **Spring Application:** [SpringApplication](features.html#features.spring-application) + +* **External Configuration:** [External Configuration](features.html#features.external-config) + +* **Profiles:** [Profiles](features.html#features.profiles) + +* **Logging:** [Logging](features.html#features.logging) + +## 5. Web + +If you develop Spring Boot web applications, take a look at the following content: + +* **Servlet Web Applications:** [Spring MVC, Jersey, Embedded Servlet Containers](web.html#web.servlet) + +* **Reactive Web Applications:** [Spring Webflux, Embedded Servlet Containers](web.html#web.reactive) + +* **Graceful Shutdown:** [Graceful Shutdown](web.html#web.graceful-shutdown) + +* **Spring Security:** [Default Security Configuration, Auto-configuration for OAuth2, SAML](web.html#web.security) + +* **Spring Session:** [Auto-configuration for Spring Session](web.html#web.spring-session) + +* **Spring HATEOAS:** [Auto-configuration for Spring HATEOAS](web.html#web.spring-hateoas) + +## 6. Data + +If your application deals with a datastore, you can see how to configure that here: + +* **SQL:** [Configuring a SQL Datastore, Embedded Database support, Connection pools, and more.](data.html#data.sql) + +* **NOSQL:** [Auto-configuration for NOSQL stores such as Redis, MongoDB, Neo4j, and others.](data.html#data.nosql) + +## 7. Messaging + +If your application uses any messaging protocol, see one or more of the following sections: + +* **JMS:** [Auto-configuration for ActiveMQ and Artemis, Sending and Receiving messages through JMS](messaging.html#messaging.jms) + +* **AMQP:** [Auto-configuration for RabbitMQ](messaging.html#messaging.amqp) + +* **Kafka:** [Auto-configuration for Spring Kafka](messaging.html#messaging.kafka) + +* **RSocket:** [Auto-configuration for Spring Framework’s RSocket Support](messaging.html#messaging.rsocket) + +* **Spring Integration:** [Auto-configuration for Spring Integration](messaging.html#messaging.spring-integration) + +## 8. IO + +If your application needs IO capabilities, see one or more of the following sections: + +* **Caching:** [Caching support EhCache, Hazelcast, Infinispan and more](io.html#io.caching) + +* **Quartz:** [Quartz Scheduling](io.html#io.quartz) + +* **Mail:** [Sending Email](io.html#io.email) + +* **Validation:** [JSR-303 Validation](io.html#io.validation) + +* **REST Clients:** [Calling REST Services with RestTemplate and WebClient](io.html#io.rest-client) + +* **Webservices:** [Auto-configuration for Spring Web Services](io.html#io.webservices) + +* **JTA:** [Distributed Transactions with JTA](io.html#io.jta) + +## 9. Container Images + +Spring Boot provides first-class support for building efficient container images. You can read more about it here: + +* **Efficient Container Images:** [Tips to optimize container images such as Docker images](container-images.html#container-images.efficient-images) + +* **Dockerfiles:** [Building container images using dockerfiles](container-images.html#container-images.dockerfiles) + +* **Cloud Native Buildpacks:** [Support for Cloud Native Buildpacks with Maven and Gradle](container-images.html#container-images.buildpacks) + +## 10. Advanced Topics + +Finally, we have a few topics for more advanced users: + +* **Spring Boot Applications Deployment:** [Cloud Deployment](deployment.html#deployment.cloud) | [OS Service](deployment.html#deployment.installing.nix-services) + +* **Build tool plugins:** [Maven](build-tool-plugins.html#build-tool-plugins.maven) | [Gradle](build-tool-plugins.html#build-tool-plugins.gradle) + +* **Appendix:** [Application Properties](application-properties.html#appendix.application-properties) | [Configuration Metadata](configuration-metadata.html#appendix.configuration-metadata) | [Auto-configuration Classes](auto-configuration-classes.html#appendix.auto-configuration-classes) | [Test Auto-configuration Annotations](test-auto-configuration.html#appendix.test-auto-configuration) | [Executable Jars](executable-jar.html#appendix.executable-jar) | [Dependency Versions](dependency-versions.html#appendix.dependency-versions) + diff --git a/docs/en/spring-boot/features.md b/docs/en/spring-boot/features.md new file mode 100644 index 0000000000000000000000000000000000000000..2abd21de2f2eba65ef4870e157cdd77b1cfabebd --- /dev/null +++ b/docs/en/spring-boot/features.md @@ -0,0 +1,5097 @@ +# Core Features + +This section dives into the details of Spring Boot. +Here you can learn about the key features that you may want to use and customize. +If you have not already done so, you might want to read the "[getting-started.html](getting-started.html#getting-started)" and "[using.html](using.html#using)" sections, so that you have a good grounding of the basics. + +## 1. SpringApplication + + +The `SpringApplication` class provides a convenient way to bootstrap a Spring application that is started from a `main()` method. +In many situations, you can delegate to the static `SpringApplication.run` method, as shown in the following example: + +``` +import org.springframework.boot.SpringApplication; +import org.springframework.boot.autoconfigure.SpringBootApplication; + +@SpringBootApplication +public class MyApplication { + + public static void main(String[] args) { + SpringApplication.run(MyApplication.class, args); + } + +} + +``` + +When your application starts, you should see something similar to the following output: + +``` + . ____ _ __ _ _ + /\\ / ___'_ __ _ _(_)_ __ __ _ \ \ \ \ +( ( )\___ | '_ | '_| | '_ \/ _` | \ \ \ \ + \\/ ___)| |_)| | | | | || (_| | ) ) ) ) + ' |____| .__|_| |_|_| |_\__, | / / / / + =========|_|==============|___/=/_/_/_/ + :: Spring Boot :: v2.6.4 + +2021-02-03 10:33:25.224 INFO 17321 --- [ main] o.s.b.d.s.s.SpringApplicationExample : Starting SpringApplicationExample using Java 1.8.0_232 on mycomputer with PID 17321 (/apps/myjar.jar started by pwebb) +2021-02-03 10:33:25.226 INFO 17900 --- [ main] o.s.b.d.s.s.SpringApplicationExample : No active profile set, falling back to default profiles: default +2021-02-03 10:33:26.046 INFO 17321 --- [ main] o.s.b.w.embedded.tomcat.TomcatWebServer : Tomcat initialized with port(s): 8080 (http) +2021-02-03 10:33:26.054 INFO 17900 --- [ main] o.apache.catalina.core.StandardService : Starting service [Tomcat] +2021-02-03 10:33:26.055 INFO 17900 --- [ main] org.apache.catalina.core.StandardEngine : Starting Servlet engine: [Apache Tomcat/9.0.41] +2021-02-03 10:33:26.097 INFO 17900 --- [ main] o.a.c.c.C.[Tomcat].[localhost].[/] : Initializing Spring embedded WebApplicationContext +2021-02-03 10:33:26.097 INFO 17900 --- [ main] w.s.c.ServletWebServerApplicationContext : Root WebApplicationContext: initialization completed in 821 ms +2021-02-03 10:33:26.144 INFO 17900 --- [ main] s.tomcat.SampleTomcatApplication : ServletContext initialized +2021-02-03 10:33:26.376 INFO 17900 --- [ main] o.s.b.w.embedded.tomcat.TomcatWebServer : Tomcat started on port(s): 8080 (http) with context path '' +2021-02-03 10:33:26.384 INFO 17900 --- [ main] o.s.b.d.s.s.SpringApplicationExample : Started SampleTomcatApplication in 1.514 seconds (JVM running for 1.823) +``` + +By default, `INFO` logging messages are shown, including some relevant startup details, such as the user that launched the application. +If you need a log level other than `INFO`, you can set it, as described in [Log Levels](#features.logging.log-levels). +The application version is determined using the implementation version from the main application class’s package. +Startup information logging can be turned off by setting `spring.main.log-startup-info` to `false`. +This will also turn off logging of the application’s active profiles. + +| |To add additional logging during startup, you can override `logStartupInfo(boolean)` in a subclass of `SpringApplication`.| +|---|--------------------------------------------------------------------------------------------------------------------------| + +### 1.1. Startup Failure + +If your application fails to start, registered `FailureAnalyzers` get a chance to provide a dedicated error message and a concrete action to fix the problem. +For instance, if you start a web application on port `8080` and that port is already in use, you should see something similar to the following message: + +``` +*************************** +APPLICATION FAILED TO START +*************************** + +Description: + +Embedded servlet container failed to start. Port 8080 was already in use. + +Action: + +Identify and stop the process that is listening on port 8080 or configure this application to listen on another port. +``` + +| |Spring Boot provides numerous `FailureAnalyzer` implementations, and you can [add your own](howto.html#howto.application.failure-analyzer).| +|---|-------------------------------------------------------------------------------------------------------------------------------------------| + +If no failure analyzers are able to handle the exception, you can still display the full conditions report to better understand what went wrong. +To do so, you need to [enable the `debug` property](#features.external-config) or [enable `DEBUG` logging](#features.logging.log-levels) for `org.springframework.boot.autoconfigure.logging.ConditionEvaluationReportLoggingListener`. + +For instance, if you are running your application by using `java -jar`, you can enable the `debug` property as follows: + +``` +$ java -jar myproject-0.0.1-SNAPSHOT.jar --debug +``` + +### 1.2. Lazy Initialization + +`SpringApplication` allows an application to be initialized lazily. +When lazy initialization is enabled, beans are created as they are needed rather than during application startup. +As a result, enabling lazy initialization can reduce the time that it takes your application to start. +In a web application, enabling lazy initialization will result in many web-related beans not being initialized until an HTTP request is received. + +A downside of lazy initialization is that it can delay the discovery of a problem with the application. +If a misconfigured bean is initialized lazily, a failure will no longer occur during startup and the problem will only become apparent when the bean is initialized. +Care must also be taken to ensure that the JVM has sufficient memory to accommodate all of the application’s beans and not just those that are initialized during startup. +For these reasons, lazy initialization is not enabled by default and it is recommended that fine-tuning of the JVM’s heap size is done before enabling lazy initialization. + +Lazy initialization can be enabled programmatically using the `lazyInitialization` method on `SpringApplicationBuilder` or the `setLazyInitialization` method on `SpringApplication`. +Alternatively, it can be enabled using the `spring.main.lazy-initialization` property as shown in the following example: + +Properties + +``` +spring.main.lazy-initialization=true +``` + +Yaml + +``` +spring: + main: + lazy-initialization: true +``` + +| |If you want to disable lazy initialization for certain beans while using lazy initialization for the rest of the application, you can explicitly set their lazy attribute to false using the `@Lazy(false)` annotation.| +|---|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +### 1.3. Customizing the Banner + +The banner that is printed on start up can be changed by adding a `banner.txt` file to your classpath or by setting the `spring.banner.location` property to the location of such a file. +If the file has an encoding other than UTF-8, you can set `spring.banner.charset`. +In addition to a text file, you can also add a `banner.gif`, `banner.jpg`, or `banner.png` image file to your classpath or set the `spring.banner.image.location` property. +Images are converted into an ASCII art representation and printed above any text banner. + +Inside your `banner.txt` file, you can use any key available in the `Environment` as well as any of the following placeholders: + +| Variable | Description | +|--------------------------------------------------------------------------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| `${application.version}` | The version number of your application, as declared in `MANIFEST.MF`.
For example, `Implementation-Version: 1.0` is printed as `1.0`. | +| `${application.formatted-version}` | The version number of your application, as declared in `MANIFEST.MF` and formatted for display (surrounded with brackets and prefixed with `v`).
For example `(v1.0)`. | +| `${spring-boot.version}` | The Spring Boot version that you are using.
For example `2.6.4`. | +| `${spring-boot.formatted-version}` | The Spring Boot version that you are using, formatted for display (surrounded with brackets and prefixed with `v`).
For example `(v2.6.4)`. | +|`${Ansi.NAME}` (or `${AnsiColor.NAME}`, `${AnsiBackground.NAME}`, `${AnsiStyle.NAME}`)|Where `NAME` is the name of an ANSI escape code.
See [`AnsiPropertySource`](https://github.com/spring-projects/spring-boot/tree/v2.6.4/spring-boot-project/spring-boot/src/main/java/org/springframework/boot/ansi/AnsiPropertySource.java) for details.| +| `${application.title}` | The title of your application, as declared in `MANIFEST.MF`.
For example `Implementation-Title: MyApp` is printed as `MyApp`. | + +| |The `SpringApplication.setBanner(…​)` method can be used if you want to generate a banner programmatically.
Use the `org.springframework.boot.Banner` interface and implement your own `printBanner()` method.| +|---|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +You can also use the `spring.main.banner-mode` property to determine if the banner has to be printed on `System.out` (`console`), sent to the configured logger (`log`), or not produced at all (`off`). + +The printed banner is registered as a singleton bean under the following name: `springBootBanner`. + +| |The `${application.version}` and `${application.formatted-version}` properties are only available if you are using Spring Boot launchers.
The values will not be resolved if you are running an unpacked jar and starting it with `java -cp `.

This is why we recommend that you always launch unpacked jars using `java org.springframework.boot.loader.JarLauncher`.
This will initialize the `application.*` banner variables before building the classpath and launching your app.| +|---|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +### 1.4. Customizing SpringApplication + +If the `SpringApplication` defaults are not to your taste, you can instead create a local instance and customize it. +For example, to turn off the banner, you could write: + +``` +import org.springframework.boot.Banner; +import org.springframework.boot.SpringApplication; +import org.springframework.boot.autoconfigure.SpringBootApplication; + +@SpringBootApplication +public class MyApplication { + + public static void main(String[] args) { + SpringApplication application = new SpringApplication(MyApplication.class); + application.setBannerMode(Banner.Mode.OFF); + application.run(args); + } + +} + +``` + +| |The constructor arguments passed to `SpringApplication` are configuration sources for Spring beans.
In most cases, these are references to `@Configuration` classes, but they could also be direct references `@Component` classes.| +|---|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +It is also possible to configure the `SpringApplication` by using an `application.properties` file. +See *[Externalized Configuration](#features.external-config)* for details. + +For a complete list of the configuration options, see the [`SpringApplication` Javadoc](https://docs.spring.io/spring-boot/docs/2.6.4/api/org/springframework/boot/SpringApplication.html). + +### 1.5. Fluent Builder API + +If you need to build an `ApplicationContext` hierarchy (multiple contexts with a parent/child relationship) or if you prefer using a “fluent” builder API, you can use the `SpringApplicationBuilder`. + +The `SpringApplicationBuilder` lets you chain together multiple method calls and includes `parent` and `child` methods that let you create a hierarchy, as shown in the following example: + +``` +new SpringApplicationBuilder() + .sources(Parent.class) + .child(Application.class) + .bannerMode(Banner.Mode.OFF) + .run(args); + +``` + +| |There are some restrictions when creating an `ApplicationContext` hierarchy.
For example, Web components **must** be contained within the child context, and the same `Environment` is used for both parent and child contexts.
See the [`SpringApplicationBuilder` Javadoc](https://docs.spring.io/spring-boot/docs/2.6.4/api/org/springframework/boot/builder/SpringApplicationBuilder.html) for full details.| +|---|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +### 1.6. Application Availability + +When deployed on platforms, applications can provide information about their availability to the platform using infrastructure such as [Kubernetes Probes](https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/). +Spring Boot includes out-of-the box support for the commonly used “liveness” and “readiness” availability states. +If you are using Spring Boot’s “actuator” support then these states are exposed as health endpoint groups. + +In addition, you can also obtain availability states by injecting the `ApplicationAvailability` interface into your own beans. + +#### 1.6.1. Liveness State + +The “Liveness” state of an application tells whether its internal state allows it to work correctly, or recover by itself if it is currently failing. +A broken “Liveness” state means that the application is in a state that it cannot recover from, and the infrastructure should restart the application. + +| |In general, the "Liveness" state should not be based on external checks, such as [Health checks](actuator.html#actuator.endpoints.health).
If it did, a failing external system (a database, a Web API, an external cache) would trigger massive restarts and cascading failures across the platform.| +|---|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +The internal state of Spring Boot applications is mostly represented by the Spring `ApplicationContext`. +If the application context has started successfully, Spring Boot assumes that the application is in a valid state. +An application is considered live as soon as the context has been refreshed, see [Spring Boot application lifecycle and related Application Events](#features.spring-application.application-events-and-listeners). + +#### 1.6.2. Readiness State + +The “Readiness” state of an application tells whether the application is ready to handle traffic. +A failing “Readiness” state tells the platform that it should not route traffic to the application for now. +This typically happens during startup, while `CommandLineRunner` and `ApplicationRunner` components are being processed, or at any time if the application decides that it is too busy for additional traffic. + +An application is considered ready as soon as application and command-line runners have been called, see [Spring Boot application lifecycle and related Application Events](#features.spring-application.application-events-and-listeners). + +| |Tasks expected to run during startup should be executed by `CommandLineRunner` and `ApplicationRunner` components instead of using Spring component lifecycle callbacks such as `@PostConstruct`.| +|---|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +#### 1.6.3. Managing the Application Availability State + +Application components can retrieve the current availability state at any time, by injecting the `ApplicationAvailability` interface and calling methods on it. +More often, applications will want to listen to state updates or update the state of the application. + +For example, we can export the "Readiness" state of the application to a file so that a Kubernetes "exec Probe" can look at this file: + +``` +import org.springframework.boot.availability.AvailabilityChangeEvent; +import org.springframework.boot.availability.ReadinessState; +import org.springframework.context.event.EventListener; +import org.springframework.stereotype.Component; + +@Component +public class MyReadinessStateExporter { + + @EventListener + public void onStateChange(AvailabilityChangeEvent event) { + switch (event.getState()) { + case ACCEPTING_TRAFFIC: + // create file /tmp/healthy + break; + case REFUSING_TRAFFIC: + // remove file /tmp/healthy + break; + } + } + +} + +``` + +We can also update the state of the application, when the application breaks and cannot recover: + +``` +import org.springframework.boot.availability.AvailabilityChangeEvent; +import org.springframework.boot.availability.LivenessState; +import org.springframework.context.ApplicationEventPublisher; +import org.springframework.stereotype.Component; + +@Component +public class MyLocalCacheVerifier { + + private final ApplicationEventPublisher eventPublisher; + + public MyLocalCacheVerifier(ApplicationEventPublisher eventPublisher) { + this.eventPublisher = eventPublisher; + } + + public void checkLocalCache() { + try { + // ... + } + catch (CacheCompletelyBrokenException ex) { + AvailabilityChangeEvent.publish(this.eventPublisher, ex, LivenessState.BROKEN); + } + } + +} + +``` + +Spring Boot provides [Kubernetes HTTP probes for "Liveness" and "Readiness" with Actuator Health Endpoints](actuator.html#actuator.endpoints.kubernetes-probes). +You can get more guidance about [deploying Spring Boot applications on Kubernetes in the dedicated section](deployment.html#deployment.cloud.kubernetes). + +### 1.7. Application Events and Listeners + +In addition to the usual Spring Framework events, such as [`ContextRefreshedEvent`](https://docs.spring.io/spring-framework/docs/5.3.16/javadoc-api/org/springframework/context/event/ContextRefreshedEvent.html), a `SpringApplication` sends some additional application events. + +| |Some events are actually triggered before the `ApplicationContext` is created, so you cannot register a listener on those as a `@Bean`.
You can register them with the `SpringApplication.addListeners(…​)` method or the `SpringApplicationBuilder.listeners(…​)` method.

If you want those listeners to be registered automatically, regardless of the way the application is created, you can add a `META-INF/spring.factories` file to your project and reference your listener(s) by using the `org.springframework.context.ApplicationListener` key, as shown in the following example:

```
org.springframework.context.ApplicationListener=com.example.project.MyListener
```| +|---|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +Application events are sent in the following order, as your application runs: + +1. An `ApplicationStartingEvent` is sent at the start of a run but before any processing, except for the registration of listeners and initializers. + +2. An `ApplicationEnvironmentPreparedEvent` is sent when the `Environment` to be used in the context is known but before the context is created. + +3. An `ApplicationContextInitializedEvent` is sent when the `ApplicationContext` is prepared and ApplicationContextInitializers have been called but before any bean definitions are loaded. + +4. An `ApplicationPreparedEvent` is sent just before the refresh is started but after bean definitions have been loaded. + +5. An `ApplicationStartedEvent` is sent after the context has been refreshed but before any application and command-line runners have been called. + +6. An `AvailabilityChangeEvent` is sent right after with `LivenessState.CORRECT` to indicate that the application is considered as live. + +7. An `ApplicationReadyEvent` is sent after any [application and command-line runners](#features.spring-application.command-line-runner) have been called. + +8. An `AvailabilityChangeEvent` is sent right after with `ReadinessState.ACCEPTING_TRAFFIC` to indicate that the application is ready to service requests. + +9. An `ApplicationFailedEvent` is sent if there is an exception on startup. + +The above list only includes `SpringApplicationEvent`s that are tied to a `SpringApplication`. +In addition to these, the following events are also published after `ApplicationPreparedEvent` and before `ApplicationStartedEvent`: + +* A `WebServerInitializedEvent` is sent after the `WebServer` is ready.`ServletWebServerInitializedEvent` and `ReactiveWebServerInitializedEvent` are the servlet and reactive variants respectively. + +* A `ContextRefreshedEvent` is sent when an `ApplicationContext` is refreshed. + +| |You often need not use application events, but it can be handy to know that they exist.
Internally, Spring Boot uses events to handle a variety of tasks.| +|---|-------------------------------------------------------------------------------------------------------------------------------------------------------------| + +| |Event listeners should not run potentially lengthy tasks as they execute in the same thread by default.
Consider using [application and command-line runners](#features.spring-application.command-line-runner) instead.| +|---|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +Application events are sent by using Spring Framework’s event publishing mechanism. +Part of this mechanism ensures that an event published to the listeners in a child context is also published to the listeners in any ancestor contexts. +As a result of this, if your application uses a hierarchy of `SpringApplication` instances, a listener may receive multiple instances of the same type of application event. + +To allow your listener to distinguish between an event for its context and an event for a descendant context, it should request that its application context is injected and then compare the injected context with the context of the event. +The context can be injected by implementing `ApplicationContextAware` or, if the listener is a bean, by using `@Autowired`. + +### 1.8. Web Environment + +A `SpringApplication` attempts to create the right type of `ApplicationContext` on your behalf. +The algorithm used to determine a `WebApplicationType` is the following: + +* If Spring MVC is present, an `AnnotationConfigServletWebServerApplicationContext` is used + +* If Spring MVC is not present and Spring WebFlux is present, an `AnnotationConfigReactiveWebServerApplicationContext` is used + +* Otherwise, `AnnotationConfigApplicationContext` is used + +This means that if you are using Spring MVC and the new `WebClient` from Spring WebFlux in the same application, Spring MVC will be used by default. +You can override that easily by calling `setWebApplicationType(WebApplicationType)`. + +It is also possible to take complete control of the `ApplicationContext` type that is used by calling `setApplicationContextClass(…​)`. + +| |It is often desirable to call `setWebApplicationType(WebApplicationType.NONE)` when using `SpringApplication` within a JUnit test.| +|---|----------------------------------------------------------------------------------------------------------------------------------| + +### 1.9. Accessing Application Arguments + +If you need to access the application arguments that were passed to `SpringApplication.run(…​)`, you can inject a `org.springframework.boot.ApplicationArguments` bean. +The `ApplicationArguments` interface provides access to both the raw `String[]` arguments as well as parsed `option` and `non-option` arguments, as shown in the following example: + +``` +import java.util.List; + +import org.springframework.boot.ApplicationArguments; +import org.springframework.stereotype.Component; + +@Component +public class MyBean { + + public MyBean(ApplicationArguments args) { + boolean debug = args.containsOption("debug"); + List files = args.getNonOptionArgs(); + if (debug) { + System.out.println(files); + } + // if run with "--debug logfile.txt" prints ["logfile.txt"] + } + +} + +``` + +| |Spring Boot also registers a `CommandLinePropertySource` with the Spring `Environment`.
This lets you also inject single application arguments by using the `@Value` annotation.| +|---|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +### 1.10. Using the ApplicationRunner or CommandLineRunner + +If you need to run some specific code once the `SpringApplication` has started, you can implement the `ApplicationRunner` or `CommandLineRunner` interfaces. +Both interfaces work in the same way and offer a single `run` method, which is called just before `SpringApplication.run(…​)` completes. + +| |This contract is well suited for tasks that should run after application startup but before it starts accepting traffic.| +|---|------------------------------------------------------------------------------------------------------------------------| + +The `CommandLineRunner` interfaces provides access to application arguments as a string array, whereas the `ApplicationRunner` uses the `ApplicationArguments` interface discussed earlier. +The following example shows a `CommandLineRunner` with a `run` method: + +``` +import org.springframework.boot.CommandLineRunner; +import org.springframework.stereotype.Component; + +@Component +public class MyCommandLineRunner implements CommandLineRunner { + + @Override + public void run(String... args) { + // Do something... + } + +} + +``` + +If several `CommandLineRunner` or `ApplicationRunner` beans are defined that must be called in a specific order, you can additionally implement the `org.springframework.core.Ordered` interface or use the `org.springframework.core.annotation.Order` annotation. + +### 1.11. Application Exit + +Each `SpringApplication` registers a shutdown hook with the JVM to ensure that the `ApplicationContext` closes gracefully on exit. +All the standard Spring lifecycle callbacks (such as the `DisposableBean` interface or the `@PreDestroy` annotation) can be used. + +In addition, beans may implement the `org.springframework.boot.ExitCodeGenerator` interface if they wish to return a specific exit code when `SpringApplication.exit()` is called. +This exit code can then be passed to `System.exit()` to return it as a status code, as shown in the following example: + +``` +import org.springframework.boot.ExitCodeGenerator; +import org.springframework.boot.SpringApplication; +import org.springframework.boot.autoconfigure.SpringBootApplication; +import org.springframework.context.annotation.Bean; + +@SpringBootApplication +public class MyApplication { + + @Bean + public ExitCodeGenerator exitCodeGenerator() { + return () -> 42; + } + + public static void main(String[] args) { + System.exit(SpringApplication.exit(SpringApplication.run(MyApplication.class, args))); + } + +} + +``` + +Also, the `ExitCodeGenerator` interface may be implemented by exceptions. +When such an exception is encountered, Spring Boot returns the exit code provided by the implemented `getExitCode()` method. + +### 1.12. Admin Features + +It is possible to enable admin-related features for the application by specifying the `spring.application.admin.enabled` property. +This exposes the [`SpringApplicationAdminMXBean`](https://github.com/spring-projects/spring-boot/tree/v2.6.4/spring-boot-project/spring-boot/src/main/java/org/springframework/boot/admin/SpringApplicationAdminMXBean.java) on the platform `MBeanServer`. +You could use this feature to administer your Spring Boot application remotely. +This feature could also be useful for any service wrapper implementation. + +| |If you want to know on which HTTP port the application is running, get the property with a key of `local.server.port`.| +|---|----------------------------------------------------------------------------------------------------------------------| + +### 1.13. Application Startup tracking + +During the application startup, the `SpringApplication` and the `ApplicationContext` perform many tasks related to the application lifecycle, +the beans lifecycle or even processing application events. +With [`ApplicationStartup`](https://docs.spring.io/spring-framework/docs/5.3.16/javadoc-api/org/springframework/core/metrics/ApplicationStartup.html), Spring Framework [allows you to track the application startup sequence with `StartupStep` objects](https://docs.spring.io/spring-framework/docs/5.3.16/reference/html/core.html#context-functionality-startup). +This data can be collected for profiling purposes, or just to have a better understanding of an application startup process. + +You can choose an `ApplicationStartup` implementation when setting up the `SpringApplication` instance. +For example, to use the `BufferingApplicationStartup`, you could write: + +``` +import org.springframework.boot.SpringApplication; +import org.springframework.boot.autoconfigure.SpringBootApplication; +import org.springframework.boot.context.metrics.buffering.BufferingApplicationStartup; + +@SpringBootApplication +public class MyApplication { + + public static void main(String[] args) { + SpringApplication application = new SpringApplication(MyApplication.class); + application.setApplicationStartup(new BufferingApplicationStartup(2048)); + application.run(args); + } + +} + +``` + +The first available implementation, `FlightRecorderApplicationStartup` is provided by Spring Framework. +It adds Spring-specific startup events to a Java Flight Recorder session and is meant for profiling applications and correlating their Spring context lifecycle with JVM events (such as allocations, GCs, class loading…​). +Once configured, you can record data by running the application with the Flight Recorder enabled: + +``` +$ java -XX:StartFlightRecording:filename=recording.jfr,duration=10s -jar demo.jar +``` + +Spring Boot ships with the `BufferingApplicationStartup` variant; this implementation is meant for buffering the startup steps and draining them into an external metrics system. +Applications can ask for the bean of type `BufferingApplicationStartup` in any component. + +Spring Boot can also be configured to expose a [`startup` endpoint](https://docs.spring.io/spring-boot/docs/2.6.4/actuator-api/htmlsingle/#startup) that provides this information as a JSON document. + +## 2. Externalized Configuration + + +Spring Boot lets you externalize your configuration so that you can work with the same application code in different environments. +You can use a variety of external configuration sources, include Java properties files, YAML files, environment variables, and command-line arguments. + +Property values can be injected directly into your beans by using the `@Value` annotation, accessed through Spring’s `Environment` abstraction, or be [bound to structured objects](#features.external-config.typesafe-configuration-properties) through `@ConfigurationProperties`. + +Spring Boot uses a very particular `PropertySource` order that is designed to allow sensible overriding of values. +Properties are considered in the following order (with values from lower items overriding earlier ones): + +1. Default properties (specified by setting `SpringApplication.setDefaultProperties`). + +2. [`@PropertySource`](https://docs.spring.io/spring-framework/docs/5.3.16/javadoc-api/org/springframework/context/annotation/PropertySource.html) annotations on your `@Configuration` classes. + Please note that such property sources are not added to the `Environment` until the application context is being refreshed. + This is too late to configure certain properties such as `logging.*` and `spring.main.*` which are read before refresh begins. + +3. Config data (such as `application.properties` files). + +4. A `RandomValuePropertySource` that has properties only in `random.*`. + +5. OS environment variables. + +6. Java System properties (`System.getProperties()`). + +7. JNDI attributes from `java:comp/env`. + +8. `ServletContext` init parameters. + +9. `ServletConfig` init parameters. + +10. Properties from `SPRING_APPLICATION_JSON` (inline JSON embedded in an environment variable or system property). + +11. Command line arguments. + +12. `properties` attribute on your tests. + Available on [`@SpringBootTest`](https://docs.spring.io/spring-boot/docs/2.6.4/api/org/springframework/boot/test/context/SpringBootTest.html) and the [test annotations for testing a particular slice of your application](#features.testing.spring-boot-applications.autoconfigured-tests). + +13. [`@TestPropertySource`](https://docs.spring.io/spring-framework/docs/5.3.16/javadoc-api/org/springframework/test/context/TestPropertySource.html) annotations on your tests. + +14. [Devtools global settings properties](using.html#using.devtools.globalsettings) in the `$HOME/.config/spring-boot` directory when devtools is active. + +Config data files are considered in the following order: + +1. [Application properties](#features.external-config.files) packaged inside your jar (`application.properties` and YAML variants). + +2. [Profile-specific application properties](#features.external-config.files.profile-specific) packaged inside your jar (`application-{profile}.properties` and YAML variants). + +3. [Application properties](#features.external-config.files) outside of your packaged jar (`application.properties` and YAML variants). + +4. [Profile-specific application properties](#features.external-config.files.profile-specific) outside of your packaged jar (`application-{profile}.properties` and YAML variants). + +| |It is recommended to stick with one format for your entire application.
If you have configuration files with both `.properties` and `.yml` format in the same location, `.properties` takes precedence.| +|---|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +To provide a concrete example, suppose you develop a `@Component` that uses a `name` property, as shown in the following example: + +``` +import org.springframework.beans.factory.annotation.Value; +import org.springframework.stereotype.Component; + +@Component +public class MyBean { + + @Value("${name}") + private String name; + + // ... + +} + +``` + +On your application classpath (for example, inside your jar) you can have an `application.properties` file that provides a sensible default property value for `name`. +When running in a new environment, an `application.properties` file can be provided outside of your jar that overrides the `name`. +For one-off testing, you can launch with a specific command line switch (for example, `java -jar app.jar --name="Spring"`). + +| |The `env` and `configprops` endpoints can be useful in determining why a property has a particular value.
You can use these two endpoints to diagnose unexpected property values.
See the "[Production ready features](actuator.html#actuator.endpoints)" section for details.| +|---|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +### 2.1. Accessing Command Line Properties + +By default, `SpringApplication` converts any command line option arguments (that is, arguments starting with `--`, such as `--server.port=9000`) to a `property` and adds them to the Spring `Environment`. +As mentioned previously, command line properties always take precedence over file-based property sources. + +If you do not want command line properties to be added to the `Environment`, you can disable them by using `SpringApplication.setAddCommandLineProperties(false)`. + +### 2.2. JSON Application Properties + +Environment variables and system properties often have restrictions that mean some property names cannot be used. +To help with this, Spring Boot allows you to encode a block of properties into a single JSON structure. + +When your application starts, any `spring.application.json` or `SPRING_APPLICATION_JSON` properties will be parsed and added to the `Environment`. + +For example, the `SPRING_APPLICATION_JSON` property can be supplied on the command line in a UN\*X shell as an environment variable: + +``` +$ SPRING_APPLICATION_JSON='{"my":{"name":"test"}}' java -jar myapp.jar +``` + +In the preceding example, you end up with `my.name=test` in the Spring `Environment`. + +The same JSON can also be provided as a system property: + +``` +$ java -Dspring.application.json='{"my":{"name":"test"}}' -jar myapp.jar +``` + +Or you could supply the JSON by using a command line argument: + +``` +$ java -jar myapp.jar --spring.application.json='{"my":{"name":"test"}}' +``` + +If you are deploying to a classic Application Server, you could also use a JNDI variable named `java:comp/env/spring.application.json`. + +| |Although `null` values from the JSON will be added to the resulting property source, the `PropertySourcesPropertyResolver` treats `null` properties as missing values.
This means that the JSON cannot override properties from lower order property sources with a `null` value.| +|---|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +### 2.3. External Application Properties + +Spring Boot will automatically find and load `application.properties` and `application.yaml` files from the following locations when your application starts: + +1. From the classpath + + 1. The classpath root + + 2. The classpath `/config` package + +2. From the current directory + + 1. The current directory + + 2. The `/config` subdirectory in the current directory + + 3. Immediate child directories of the `/config` subdirectory + +The list is ordered by precedence (with values from lower items overriding earlier ones). +Documents from the loaded files are added as `PropertySources` to the Spring `Environment`. + +If you do not like `application` as the configuration file name, you can switch to another file name by specifying a `spring.config.name` environment property. +For example, to look for `myproject.properties` and `myproject.yaml` files you can run your application as follows: + +``` +$ java -jar myproject.jar --spring.config.name=myproject +``` + +You can also refer to an explicit location by using the `spring.config.location` environment property. +This properties accepts a comma-separated list of one or more locations to check. + +The following example shows how to specify two distinct files: + +``` +$ java -jar myproject.jar --spring.config.location=\ + optional:classpath:/default.properties,\ + optional:classpath:/override.properties +``` + +| |Use the prefix `optional:` if the [locations are optional](#features.external-config.files.optional-prefix) and you do not mind if they do not exist.| +|---|-----------------------------------------------------------------------------------------------------------------------------------------------------| + +| |`spring.config.name`, `spring.config.location`, and `spring.config.additional-location` are used very early to determine which files have to be loaded.
They must be defined as an environment property (typically an OS environment variable, a system property, or a command-line argument).| +|---|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +If `spring.config.location` contains directories (as opposed to files), they should end in `/`. +At runtime they will be appended with the names generated from `spring.config.name` before being loaded. +Files specified in `spring.config.location` are imported directly. + +| |Both directory and file location values are also expanded to check for [profile-specific files](#features.external-config.files.profile-specific).
For example, if you have a `spring.config.location` of `classpath:myconfig.properties`, you will also find appropriate `classpath:myconfig-.properties` files are loaded.| +|---|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +In most situations, each `spring.config.location` item you add will reference a single file or directory. +Locations are processed in the order that they are defined and later ones can override the values of earlier ones. + +If you have a complex location setup, and you use profile-specific configuration files, you may need to provide further hints so that Spring Boot knows how they should be grouped. +A location group is a collection of locations that are all considered at the same level. +For example, you might want to group all classpath locations, then all external locations. +Items within a location group should be separated with `;`. +See the example in the “[Profile Specific Files](#features.external-config.files.profile-specific)” section for more details. + +Locations configured by using `spring.config.location` replace the default locations. +For example, if `spring.config.location` is configured with the value `optional:classpath:/custom-config/,optional:file:./custom-config/`, the complete set of locations considered is: + +1. `optional:classpath:custom-config/` + +2. `optional:file:./custom-config/` + +If you prefer to add additional locations, rather than replacing them, you can use `spring.config.additional-location`. +Properties loaded from additional locations can override those in the default locations. +For example, if `spring.config.additional-location` is configured with the value `optional:classpath:/custom-config/,optional:file:./custom-config/`, the complete set of locations considered is: + +1. `optional:classpath:/;optional:classpath:/config/` + +2. `optional:file:./;optional:file:./config/;optional:file:./config/*/` + +3. `optional:classpath:custom-config/` + +4. `optional:file:./custom-config/` + +This search ordering lets you specify default values in one configuration file and then selectively override those values in another. +You can provide default values for your application in `application.properties` (or whatever other basename you choose with `spring.config.name`) in one of the default locations. +These default values can then be overridden at runtime with a different file located in one of the custom locations. + +| |If you use environment variables rather than system properties, most operating systems disallow period-separated key names, but you can use underscores instead (for example, `SPRING_CONFIG_NAME` instead of `spring.config.name`).
See [Binding from Environment Variables](#features.external-config.typesafe-configuration-properties.relaxed-binding.environment-variables) for details.| +|---|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +| |If your application runs in a servlet container or application server, then JNDI properties (in `java:comp/env`) or servlet context initialization parameters can be used instead of, or as well as, environment variables or system properties.| +|---|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +#### 2.3.1. Optional Locations + +By default, when a specified config data location does not exist, Spring Boot will throw a `ConfigDataLocationNotFoundException` and your application will not start. + +If you want to specify a location, but you do not mind if it does not always exist, you can use the `optional:` prefix. +You can use this prefix with the `spring.config.location` and `spring.config.additional-location` properties, as well as with [`spring.config.import`](#features.external-config.files.importing) declarations. + +For example, a `spring.config.import` value of `optional:file:./myconfig.properties` allows your application to start, even if the `myconfig.properties` file is missing. + +If you want to ignore all `ConfigDataLocationNotFoundExceptions` and always continue to start your application, you can use the `spring.config.on-not-found` property. +Set the value to `ignore` using `SpringApplication.setDefaultProperties(…​)` or with a system/environment variable. + +#### 2.3.2. Wildcard Locations + +If a config file location includes the `*` character for the last path segment, it is considered a wildcard location. +Wildcards are expanded when the config is loaded so that immediate subdirectories are also checked. +Wildcard locations are particularly useful in an environment such as Kubernetes when there are multiple sources of config properties. + +For example, if you have some Redis configuration and some MySQL configuration, you might want to keep those two pieces of configuration separate, while requiring that both those are present in an `application.properties` file. +This might result in two separate `application.properties` files mounted at different locations such as `/config/redis/application.properties` and `/config/mysql/application.properties`. +In such a case, having a wildcard location of `config/*/`, will result in both files being processed. + +By default, Spring Boot includes `config/*/` in the default search locations. +It means that all subdirectories of the `/config` directory outside of your jar will be searched. + +You can use wildcard locations yourself with the `spring.config.location` and `spring.config.additional-location` properties. + +| |A wildcard location must contain only one `*` and end with `*/` for search locations that are directories or `*/` for search locations that are files.
Locations with wildcards are sorted alphabetically based on the absolute path of the file names.| +|---|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +| |Wildcard locations only work with external directories.
You cannot use a wildcard in a `classpath:` location.| +|---|-----------------------------------------------------------------------------------------------------------------| + +#### 2.3.3. Profile Specific Files + +As well as `application` property files, Spring Boot will also attempt to load profile-specific files using the naming convention `application-{profile}`. +For example, if your application activates a profile named `prod` and uses YAML files, then both `application.yml` and `application-prod.yml` will be considered. + +Profile-specific properties are loaded from the same locations as standard `application.properties`, with profile-specific files always overriding the non-specific ones. +If several profiles are specified, a last-wins strategy applies. +For example, if profiles `prod,live` are specified by the `spring.profiles.active` property, values in `application-prod.properties` can be overridden by those in `application-live.properties`. + +| |The last-wins strategy applies at the [location group](#features.external-config.files.location-groups) level.
A `spring.config.location` of `classpath:/cfg/,classpath:/ext/` will not have the same override rules as `classpath:/cfg/;classpath:/ext/`.

For example, continuing our `prod,live` example above, we might have the following files:

```
/cfg
application-live.properties
/ext
application-live.properties
application-prod.properties
```

When we have a `spring.config.location` of `classpath:/cfg/,classpath:/ext/` we process all `/cfg` files before all `/ext` files:

1. `/cfg/application-live.properties`

2. `/ext/application-prod.properties`

3. `/ext/application-live.properties`

When we have `classpath:/cfg/;classpath:/ext/` instead (with a `;` delimiter) we process `/cfg` and `/ext` at the same level:

1. `/ext/application-prod.properties`

2. `/cfg/application-live.properties`

3. `/ext/application-live.properties`| +|---|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +The `Environment` has a set of default profiles (by default, `[default]`) that are used if no active profiles are set. +In other words, if no profiles are explicitly activated, then properties from `application-default` are considered. + +| |Properties files are only ever loaded once.
If you have already directly [imported](#features.external-config.files.importing) a profile specific property files then it will not be imported a second time.| +|---|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +#### 2.3.4. Importing Additional Data + +Application properties may import further config data from other locations using the `spring.config.import` property. +Imports are processed as they are discovered, and are treated as additional documents inserted immediately below the one that declares the import. + +For example, you might have the following in your classpath `application.properties` file: + +Properties + +``` +spring.application.name=myapp +spring.config.import=optional:file:./dev.properties +``` + +Yaml + +``` +spring: + application: + name: "myapp" + config: + import: "optional:file:./dev.properties" +``` + +This will trigger the import of a `dev.properties` file in current directory (if such a file exists). +Values from the imported `dev.properties` will take precedence over the file that triggered the import. +In the above example, the `dev.properties` could redefine `spring.application.name` to a different value. + +An import will only be imported once no matter how many times it is declared. +The order an import is defined inside a single document within the properties/yaml file does not matter. +For instance, the two examples below produce the same result: + +Properties + +``` +spring.config.import=my.properties +my.property=value +``` + +Yaml + +``` +spring: + config: + import: "my.properties" +my: + property: "value" +``` + +Properties + +``` +my.property=value +spring.config.import=my.properties +``` + +Yaml + +``` +my: + property: "value" +spring: + config: + import: "my.properties" +``` + +In both of the above examples, the values from the `my.properties` file will take precedence over the file that triggered its import. + +Several locations can be specified under a single `spring.config.import` key. +Locations will be processed in the order that they are defined, with later imports taking precedence. + +| |When appropriate, [Profile-specific variants](#features.external-config.files.profile-specific) are also considered for import.
The example above would import both `my.properties` as well as any `my-.properties` variants.| +|---|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +| |Spring Boot includes pluggable API that allows various different location addresses to be supported.
By default you can import Java Properties, YAML and “[configuration trees](#features.external-config.files.configtree)”.

Third-party jars can offer support for additional technologies (there is no requirement for files to be local).
For example, you can imagine config data being from external stores such as Consul, Apache ZooKeeper or Netflix Archaius.

If you want to support your own locations, see the `ConfigDataLocationResolver` and `ConfigDataLoader` classes in the `org.springframework.boot.context.config` package.| +|---|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +#### 2.3.5. Importing Extensionless Files + +Some cloud platforms cannot add a file extension to volume mounted files. +To import these extensionless files, you need to give Spring Boot a hint so that it knows how to load them. +You can do this by putting an extension hint in square brackets. + +For example, suppose you have a `/etc/config/myconfig` file that you wish to import as yaml. +You can import it from your `application.properties` using the following: + +Properties + +``` +spring.config.import=file:/etc/config/myconfig[.yaml] +``` + +Yaml + +``` +spring: + config: + import: "file:/etc/config/myconfig[.yaml]" +``` + +#### 2.3.6. Using Configuration Trees + +When running applications on a cloud platform (such as Kubernetes) you often need to read config values that the platform supplies. +It is not uncommon to use environment variables for such purposes, but this can have drawbacks, especially if the value is supposed to be kept secret. + +As an alternative to environment variables, many cloud platforms now allow you to map configuration into mounted data volumes. +For example, Kubernetes can volume mount both [`ConfigMaps`](https://kubernetes.io/docs/tasks/configure-pod-container/configure-pod-configmap/#populate-a-volume-with-data-stored-in-a-configmap) and [`Secrets`](https://kubernetes.io/docs/concepts/configuration/secret/#using-secrets-as-files-from-a-pod). + +There are two common volume mount patterns that can be used: + +1. A single file contains a complete set of properties (usually written as YAML). + +2. Multiple files are written to a directory tree, with the filename becoming the ‘key’ and the contents becoming the ‘value’. + +For the first case, you can import the YAML or Properties file directly using `spring.config.import` as described [above](#features.external-config.files.importing). +For the second case, you need to use the `configtree:` prefix so that Spring Boot knows it needs to expose all the files as properties. + +As an example, let’s imagine that Kubernetes has mounted the following volume: + +``` +etc/ + config/ + myapp/ + username + password +``` + +The contents of the `username` file would be a config value, and the contents of `password` would be a secret. + +To import these properties, you can add the following to your `application.properties` or `application.yaml` file: + +Properties + +``` +spring.config.import=optional:configtree:/etc/config/ +``` + +Yaml + +``` +spring: + config: + import: "optional:configtree:/etc/config/" +``` + +You can then access or inject `myapp.username` and `myapp.password` properties from the `Environment` in the usual way. + +| |The folders under the config tree form the property name.
In the above example, to access the properties as `username` and `password`, you can set `spring.config.import` to `optional:configtree:/etc/config/myapp`.| +|---|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +| |Filenames with dot notation are also correctly mapped.
For example, in the above example, a file named `myapp.username` in `/etc/config` would result in a `myapp.username` property in the `Environment`.| +|---|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +| |Configuration tree values can be bound to both string `String` and `byte[]` types depending on the contents expected.| +|---|---------------------------------------------------------------------------------------------------------------------| + +If you have multiple config trees to import from the same parent folder you can use a wildcard shortcut. +Any `configtree:` location that ends with `/*/` will import all immediate children as config trees. + +For example, given the following volume: + +``` +etc/ + config/ + dbconfig/ + db/ + username + password + mqconfig/ + mq/ + username + password +``` + +You can use `configtree:/etc/config/*/` as the import location: + +Properties + +``` +spring.config.import=optional:configtree:/etc/config/*/ +``` + +Yaml + +``` +spring: + config: + import: "optional:configtree:/etc/config/*/" +``` + +This will add `db.username`, `db.password`, `mq.username` and `mq.password` properties. + +| |Directories loaded using a wildcard are sorted alphabetically.
If you need a different order, then you should list each location as a separate import| +|---|---------------------------------------------------------------------------------------------------------------------------------------------------------| + +Configuration trees can also be used for Docker secrets. +When a Docker swarm service is granted access to a secret, the secret gets mounted into the container. +For example, if a secret named `db.password` is mounted at location `/run/secrets/`, you can make `db.password` available to the Spring environment using the following: + +Properties + +``` +spring.config.import=optional:configtree:/run/secrets/ +``` + +Yaml + +``` +spring: + config: + import: "optional:configtree:/run/secrets/" +``` + +#### 2.3.7. Property Placeholders + +The values in `application.properties` and `application.yml` are filtered through the existing `Environment` when they are used, so you can refer back to previously defined values (for example, from System properties). +The standard `${name}` property-placeholder syntax can be used anywhere within a value. + +For example, the following file will set `app.description` to “MyApp is a Spring Boot application”: + +Properties + +``` +app.name=MyApp +app.description=${app.name} is a Spring Boot application +``` + +Yaml + +``` +app: + name: "MyApp" + description: "${app.name} is a Spring Boot application" +``` + +| |You can also use this technique to create “short” variants of existing Spring Boot properties.
See the *[howto.html](howto.html#howto.properties-and-configuration.short-command-line-arguments)* how-to for details.| +|---|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +#### 2.3.8. Working with Multi-Document Files + +Spring Boot allows you to split a single physical file into multiple logical documents which are each added independently. +Documents are processed in order, from top to bottom. +Later documents can override the properties defined in earlier ones. + +For `application.yml` files, the standard YAML multi-document syntax is used. +Three consecutive hyphens represent the end of one document, and the start of the next. + +For example, the following file has two logical documents: + +``` +spring: + application: + name: "MyApp" +--- +spring: + application: + name: "MyCloudApp" + config: + activate: + on-cloud-platform: "kubernetes" +``` + +For `application.properties` files a special `#---` comment is used to mark the document splits: + +``` +spring.application.name=MyApp +#--- +spring.application.name=MyCloudApp +spring.config.activate.on-cloud-platform=kubernetes +``` + +| |Property file separators must not have any leading whitespace and must have exactly three hyphen characters.
The lines immediately before and after the separator must not be comments.| +|---|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +| |Multi-document property files are often used in conjunction with activation properties such as `spring.config.activate.on-profile`.
See the [next section](#features.external-config.files.activation-properties) for details.| +|---|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +| |Multi-document property files cannot be loaded by using the `@PropertySource` or `@TestPropertySource` annotations.| +|---|-------------------------------------------------------------------------------------------------------------------| + +#### 2.3.9. Activation Properties + +It is sometimes useful to only activate a given set of properties when certain conditions are met. +For example, you might have properties that are only relevant when a specific profile is active. + +You can conditionally activate a properties document using `spring.config.activate.*`. + +The following activation properties are available: + +| Property | Note | +|-------------------|------------------------------------------------------------------------| +| `on-profile` | A profile expression that must match for the document to be active. | +|`on-cloud-platform`|The `CloudPlatform` that must be detected for the document to be active.| + +For example, the following specifies that the second document is only active when running on Kubernetes, and only when either the “prod” or “staging” profiles are active: + +Properties + +``` +myprop=always-set +#--- +spring.config.activate.on-cloud-platform=kubernetes +spring.config.activate.on-profile=prod | staging +myotherprop=sometimes-set +``` + +Yaml + +``` +myprop: + "always-set" +--- +spring: + config: + activate: + on-cloud-platform: "kubernetes" + on-profile: "prod | staging" +myotherprop: "sometimes-set" +``` + +### 2.4. Encrypting Properties + +Spring Boot does not provide any built in support for encrypting property values, however, it does provide the hook points necessary to modify values contained in the Spring `Environment`. +The `EnvironmentPostProcessor` interface allows you to manipulate the `Environment` before the application starts. +See [howto.html](howto.html#howto.application.customize-the-environment-or-application-context) for details. + +If you need a secure way to store credentials and passwords, the [Spring Cloud Vault](https://cloud.spring.io/spring-cloud-vault/) project provides support for storing externalized configuration in [HashiCorp Vault](https://www.vaultproject.io/). + +### 2.5. Working with YAML + +[YAML](https://yaml.org) is a superset of JSON and, as such, is a convenient format for specifying hierarchical configuration data. +The `SpringApplication` class automatically supports YAML as an alternative to properties whenever you have the [SnakeYAML](https://bitbucket.org/asomov/snakeyaml) library on your classpath. + +| |If you use “Starters”, SnakeYAML is automatically provided by `spring-boot-starter`.| +|---|------------------------------------------------------------------------------------| + +#### 2.5.1. Mapping YAML to Properties + +YAML documents need to be converted from their hierarchical format to a flat structure that can be used with the Spring `Environment`. +For example, consider the following YAML document: + +``` +environments: + dev: + url: "https://dev.example.com" + name: "Developer Setup" + prod: + url: "https://another.example.com" + name: "My Cool App" +``` + +In order to access these properties from the `Environment`, they would be flattened as follows: + +``` +environments.dev.url=https://dev.example.com +environments.dev.name=Developer Setup +environments.prod.url=https://another.example.com +environments.prod.name=My Cool App +``` + +Likewise, YAML lists also need to be flattened. +They are represented as property keys with `[index]` dereferencers. +For example, consider the following YAML: + +``` +my: + servers: + - "dev.example.com" + - "another.example.com" +``` + +The preceding example would be transformed into these properties: + +``` +my.servers[0]=dev.example.com +my.servers[1]=another.example.com +``` + +| |Properties that use the `[index]` notation can be bound to Java `List` or `Set` objects using Spring Boot’s `Binder` class.
For more details see the “[Type-safe Configuration Properties](#features.external-config.typesafe-configuration-properties)” section below.| +|---|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +| |YAML files cannot be loaded by using the `@PropertySource` or `@TestPropertySource` annotations.
So, in the case that you need to load values that way, you need to use a properties file.| +|---|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +#### 2.5.2. Directly Loading YAML + +Spring Framework provides two convenient classes that can be used to load YAML documents. +The `YamlPropertiesFactoryBean` loads YAML as `Properties` and the `YamlMapFactoryBean` loads YAML as a `Map`. + +You can also use the `YamlPropertySourceLoader` class if you want to load YAML as a Spring `PropertySource`. + +### 2.6. Configuring Random Values + +The `RandomValuePropertySource` is useful for injecting random values (for example, into secrets or test cases). +It can produce integers, longs, uuids, or strings, as shown in the following example: + +Properties + +``` +my.secret=${random.value} +my.number=${random.int} +my.bignumber=${random.long} +my.uuid=${random.uuid} +my.number-less-than-ten=${random.int(10)} +my.number-in-range=${random.int[1024,65536]} +``` + +Yaml + +``` +my: + secret: "${random.value}" + number: "${random.int}" + bignumber: "${random.long}" + uuid: "${random.uuid}" + number-less-than-ten: "${random.int(10)}" + number-in-range: "${random.int[1024,65536]}" +``` + +The `random.int*` syntax is `OPEN value (,max) CLOSE` where the `OPEN,CLOSE` are any character and `value,max` are integers. +If `max` is provided, then `value` is the minimum value and `max` is the maximum value (exclusive). + +### 2.7. Configuring System Environment Properties + +Spring Boot supports setting a prefix for environment properties. +This is useful if the system environment is shared by multiple Spring Boot applications with different configuration requirements. +The prefix for system environment properties can be set directly on `SpringApplication`. + +For example, if you set the prefix to `input`, a property such as `remote.timeout` will also be resolved as `input.remote.timeout` in the system environment. + +### 2.8. Type-safe Configuration Properties + +Using the `@Value("${property}")` annotation to inject configuration properties can sometimes be cumbersome, especially if you are working with multiple properties or your data is hierarchical in nature. +Spring Boot provides an alternative method of working with properties that lets strongly typed beans govern and validate the configuration of your application. + +| |See also the [differences between `@Value` and type-safe configuration properties](#features.external-config.typesafe-configuration-properties.vs-value-annotation).| +|---|--------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +#### 2.8.1. JavaBean properties binding + +It is possible to bind a bean declaring standard JavaBean properties as shown in the following example: + +``` +import java.net.InetAddress; +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; + +import org.springframework.boot.context.properties.ConfigurationProperties; + +@ConfigurationProperties("my.service") +public class MyProperties { + + private boolean enabled; + + private InetAddress remoteAddress; + + private final Security security = new Security(); + + // getters / setters... + + public boolean isEnabled() { + return this.enabled; + } + + public void setEnabled(boolean enabled) { + this.enabled = enabled; + } + + public InetAddress getRemoteAddress() { + return this.remoteAddress; + } + + public void setRemoteAddress(InetAddress remoteAddress) { + this.remoteAddress = remoteAddress; + } + + public Security getSecurity() { + return this.security; + } + + public static class Security { + + private String username; + + private String password; + + private List roles = new ArrayList<>(Collections.singleton("USER")); + + // getters / setters... + + public String getUsername() { + return this.username; + } + + public void setUsername(String username) { + this.username = username; + } + + public String getPassword() { + return this.password; + } + + public void setPassword(String password) { + this.password = password; + } + + public List getRoles() { + return this.roles; + } + + public void setRoles(List roles) { + this.roles = roles; + } + + } + +} + +``` + +The preceding POJO defines the following properties: + +* `my.service.enabled`, with a value of `false` by default. + +* `my.service.remote-address`, with a type that can be coerced from `String`. + +* `my.service.security.username`, with a nested "security" object whose name is determined by the name of the property. + In particular, the type is not used at all there and could have been `SecurityProperties`. + +* `my.service.security.password`. + +* `my.service.security.roles`, with a collection of `String` that defaults to `USER`. + +| |The properties that map to `@ConfigurationProperties` classes available in Spring Boot, which are configured through properties files, YAML files, environment variables, and other mechanisms, are public API but the accessors (getters/setters) of the class itself are not meant to be used directly.| +|---|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +| |Such arrangement relies on a default empty constructor and getters and setters are usually mandatory, since binding is through standard Java Beans property descriptors, just like in Spring MVC.
A setter may be omitted in the following cases:

* Maps, as long as they are initialized, need a getter but not necessarily a setter, since they can be mutated by the binder.

* Collections and arrays can be accessed either through an index (typically with YAML) or by using a single comma-separated value (properties).
In the latter case, a setter is mandatory.
We recommend to always add a setter for such types.
If you initialize a collection, make sure it is not immutable (as in the preceding example).

* If nested POJO properties are initialized (like the `Security` field in the preceding example), a setter is not required.
If you want the binder to create the instance on the fly by using its default constructor, you need a setter.

Some people use Project Lombok to add getters and setters automatically.
Make sure that Lombok does not generate any particular constructor for such a type, as it is used automatically by the container to instantiate the object.

Finally, only standard Java Bean properties are considered and binding on static properties is not supported.| +|---|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +#### 2.8.2. Constructor binding + +The example in the previous section can be rewritten in an immutable fashion as shown in the following example: + +``` +import java.net.InetAddress; +import java.util.List; + +import org.springframework.boot.context.properties.ConfigurationProperties; +import org.springframework.boot.context.properties.ConstructorBinding; +import org.springframework.boot.context.properties.bind.DefaultValue; + +@ConstructorBinding +@ConfigurationProperties("my.service") +public class MyProperties { + + // fields... + + private final boolean enabled; + + private final InetAddress remoteAddress; + + private final Security security; + + public MyProperties(boolean enabled, InetAddress remoteAddress, Security security) { + this.enabled = enabled; + this.remoteAddress = remoteAddress; + this.security = security; + } + + // getters... + + public boolean isEnabled() { + return this.enabled; + } + + public InetAddress getRemoteAddress() { + return this.remoteAddress; + } + + public Security getSecurity() { + return this.security; + } + + public static class Security { + + // fields... + + private final String username; + + private final String password; + + private final List roles; + + public Security(String username, String password, @DefaultValue("USER") List roles) { + this.username = username; + this.password = password; + this.roles = roles; + } + + // getters... + + public String getUsername() { + return this.username; + } + + public String getPassword() { + return this.password; + } + + public List getRoles() { + return this.roles; + } + + } + +} + +``` + +In this setup, the `@ConstructorBinding` annotation is used to indicate that constructor binding should be used. +This means that the binder will expect to find a constructor with the parameters that you wish to have bound. +If you are using Java 16 or later, constructor binding can be used with records. +In this case, unless your record has multiple constructors, there is no need to use `@ConstructorBinding`. + +Nested members of a `@ConstructorBinding` class (such as `Security` in the example above) will also be bound through their constructor. + +Default values can be specified using `@DefaultValue` and the same conversion service will be applied to coerce the `String` value to the target type of a missing property. +By default, if no properties are bound to `Security`, the `MyProperties` instance will contain a `null` value for `security`. +If you wish you return a non-null instance of `Security` even when no properties are bound to it, you can use an empty `@DefaultValue` annotation to do so: + +``` +public MyProperties(boolean enabled, InetAddress remoteAddress, @DefaultValue Security security) { + this.enabled = enabled; + this.remoteAddress = remoteAddress; + this.security = security; +} + +``` + +| |To use constructor binding the class must be enabled using `@EnableConfigurationProperties` or configuration property scanning.
You cannot use constructor binding with beans that are created by the regular Spring mechanisms (for example `@Component` beans, beans created by using `@Bean` methods or beans loaded by using `@Import`)| +|---|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +| |If you have more than one constructor for your class you can also use `@ConstructorBinding` directly on the constructor that should be bound.| +|---|---------------------------------------------------------------------------------------------------------------------------------------------| + +| |The use of `java.util.Optional` with `@ConfigurationProperties` is not recommended as it is primarily intended for use as a return type.
As such, it is not well-suited to configuration property injection.
For consistency with properties of other types, if you do declare an `Optional` property and it has no value, `null` rather than an empty `Optional` will be bound.| +|---|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +#### 2.8.3. Enabling @ConfigurationProperties-annotated types + +Spring Boot provides infrastructure to bind `@ConfigurationProperties` types and register them as beans. +You can either enable configuration properties on a class-by-class basis or enable configuration property scanning that works in a similar manner to component scanning. + +Sometimes, classes annotated with `@ConfigurationProperties` might not be suitable for scanning, for example, if you’re developing your own auto-configuration or you want to enable them conditionally. +In these cases, specify the list of types to process using the `@EnableConfigurationProperties` annotation. +This can be done on any `@Configuration` class, as shown in the following example: + +``` +import org.springframework.boot.context.properties.EnableConfigurationProperties; +import org.springframework.context.annotation.Configuration; + +@Configuration(proxyBeanMethods = false) +@EnableConfigurationProperties(SomeProperties.class) +public class MyConfiguration { + +} + +``` + +To use configuration property scanning, add the `@ConfigurationPropertiesScan` annotation to your application. +Typically, it is added to the main application class that is annotated with `@SpringBootApplication` but it can be added to any `@Configuration` class. +By default, scanning will occur from the package of the class that declares the annotation. +If you want to define specific packages to scan, you can do so as shown in the following example: + +``` +import org.springframework.boot.autoconfigure.SpringBootApplication; +import org.springframework.boot.context.properties.ConfigurationPropertiesScan; + +@SpringBootApplication +@ConfigurationPropertiesScan({ "com.example.app", "com.example.another" }) +public class MyApplication { + +} + +``` + +| |When the `@ConfigurationProperties` bean is registered using configuration property scanning or through `@EnableConfigurationProperties`, the bean has a conventional name: `-`, where `` is the environment key prefix specified in the `@ConfigurationProperties` annotation and `` is the fully qualified name of the bean.
If the annotation does not provide any prefix, only the fully qualified name of the bean is used.

The bean name in the example above is `com.example.app-com.example.app.SomeProperties`.| +|---|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +We recommend that `@ConfigurationProperties` only deal with the environment and, in particular, does not inject other beans from the context. +For corner cases, setter injection can be used or any of the `*Aware` interfaces provided by the framework (such as `EnvironmentAware` if you need access to the `Environment`). +If you still want to inject other beans using the constructor, the configuration properties bean must be annotated with `@Component` and use JavaBean-based property binding. + +#### 2.8.4. Using @ConfigurationProperties-annotated types + +This style of configuration works particularly well with the `SpringApplication` external YAML configuration, as shown in the following example: + +``` +my: + service: + remote-address: 192.168.1.1 + security: + username: "admin" + roles: + - "USER" + - "ADMIN" +``` + +To work with `@ConfigurationProperties` beans, you can inject them in the same way as any other bean, as shown in the following example: + +``` +import org.springframework.stereotype.Service; + +@Service +public class MyService { + + private final SomeProperties properties; + + public MyService(SomeProperties properties) { + this.properties = properties; + } + + public void openConnection() { + Server server = new Server(this.properties.getRemoteAddress()); + server.start(); + // ... + } + + // ... + +} + +``` + +| |Using `@ConfigurationProperties` also lets you generate metadata files that can be used by IDEs to offer auto-completion for your own keys.
See the [appendix](configuration-metadata.html#appendix.configuration-metadata) for details.| +|---|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +#### 2.8.5. Third-party Configuration + +As well as using `@ConfigurationProperties` to annotate a class, you can also use it on public `@Bean` methods. +Doing so can be particularly useful when you want to bind properties to third-party components that are outside of your control. + +To configure a bean from the `Environment` properties, add `@ConfigurationProperties` to its bean registration, as shown in the following example: + +``` +import org.springframework.boot.context.properties.ConfigurationProperties; +import org.springframework.context.annotation.Bean; +import org.springframework.context.annotation.Configuration; + +@Configuration(proxyBeanMethods = false) +public class ThirdPartyConfiguration { + + @Bean + @ConfigurationProperties(prefix = "another") + public AnotherComponent anotherComponent() { + return new AnotherComponent(); + } + +} + +``` + +Any JavaBean property defined with the `another` prefix is mapped onto that `AnotherComponent` bean in manner similar to the preceding `SomeProperties` example. + +#### 2.8.6. Relaxed Binding + +Spring Boot uses some relaxed rules for binding `Environment` properties to `@ConfigurationProperties` beans, so there does not need to be an exact match between the `Environment` property name and the bean property name. +Common examples where this is useful include dash-separated environment properties (for example, `context-path` binds to `contextPath`), and capitalized environment properties (for example, `PORT` binds to `port`). + +As an example, consider the following `@ConfigurationProperties` class: + +``` +import org.springframework.boot.context.properties.ConfigurationProperties; + +@ConfigurationProperties(prefix = "my.main-project.person") +public class MyPersonProperties { + + private String firstName; + + public String getFirstName() { + return this.firstName; + } + + public void setFirstName(String firstName) { + this.firstName = firstName; + } + +} + +``` + +With the preceding code, the following properties names can all be used: + +| Property | Note | +|-----------------------------------|----------------------------------------------------------------------------------------------| +|`my.main-project.person.first-name`| Kebab case, which is recommended for use in `.properties` and `.yml` files. | +|`my.main-project.person.firstName` | Standard camel case syntax. | +|`my.main-project.person.first_name`|Underscore notation, which is an alternative format for use in `.properties` and `.yml` files.| +| `MY_MAINPROJECT_PERSON_FIRSTNAME` | Upper case format, which is recommended when using system environment variables. | + +| |The `prefix` value for the annotation *must* be in kebab case (lowercase and separated by `-`, such as `my.main-project.person`).| +|---|---------------------------------------------------------------------------------------------------------------------------------| + +| Property Source | Simple | List | +|---------------------|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| Properties Files | Camel case, kebab case, or underscore notation | Standard list syntax using `[ ]` or comma-separated values | +| YAML Files | Camel case, kebab case, or underscore notation | Standard YAML list syntax or comma-separated values | +|Environment Variables|Upper case format with underscore as the delimiter (see [Binding from Environment Variables](#features.external-config.typesafe-configuration-properties.relaxed-binding.environment-variables)).|Numeric values surrounded by underscores (see [Binding from Environment Variables](#features.external-config.typesafe-configuration-properties.relaxed-binding.environment-variables))| +| System properties | Camel case, kebab case, or underscore notation | Standard list syntax using `[ ]` or comma-separated values | + +| |We recommend that, when possible, properties are stored in lower-case kebab format, such as `my.person.first-name=Rod`.| +|---|-----------------------------------------------------------------------------------------------------------------------| + +##### Binding Maps# + +When binding to `Map` properties you may need to use a special bracket notation so that the original `key` value is preserved. +If the key is not surrounded by `[]`, any characters that are not alpha-numeric, `-` or `.` are removed. + +For example, consider binding the following properties to a `Map`: + +Properties + +``` +my.map.[/key1]=value1 +my.map.[/key2]=value2 +my.map./key3=value3 +``` + +Yaml + +``` +my: + map: + "[/key1]": "value1" + "[/key2]": "value2" + "/key3": "value3" +``` + +| |For YAML files, the brackets need to be surrounded by quotes for the keys to be parsed properly.| +|---|------------------------------------------------------------------------------------------------| + +The properties above will bind to a `Map` with `/key1`, `/key2` and `key3` as the keys in the map. +The slash has been removed from `key3` because it was not surrounded by square brackets. + +You may also occasionally need to use the bracket notation if your `key` contains a `.` and you are binding to non-scalar value. +For example, binding `a.b=c` to `Map` will return a Map with the entry `{"a"={"b"="c"}}` whereas `[a.b]=c` will return a Map with the entry `{"a.b"="c"}`. + +##### Binding from Environment Variables + +Most operating systems impose strict rules around the names that can be used for environment variables. +For example, Linux shell variables can contain only letters (`a` to `z` or `A` to `Z`), numbers (`0` to `9`) or the underscore character (`_`). +By convention, Unix shell variables will also have their names in UPPERCASE. + +Spring Boot’s relaxed binding rules are, as much as possible, designed to be compatible with these naming restrictions. + +To convert a property name in the canonical-form to an environment variable name you can follow these rules: + +* Replace dots (`.`) with underscores (`_`). + +* Remove any dashes (`-`). + +* Convert to uppercase. + +For example, the configuration property `spring.main.log-startup-info` would be an environment variable named `SPRING_MAIN_LOGSTARTUPINFO`. + +Environment variables can also be used when binding to object lists. +To bind to a `List`, the element number should be surrounded with underscores in the variable name. + +For example, the configuration property `my.service[0].other` would use an environment variable named `MY_SERVICE_0_OTHER`. + +#### 2.8.7. Merging Complex Types + +When lists are configured in more than one place, overriding works by replacing the entire list. + +For example, assume a `MyPojo` object with `name` and `description` attributes that are `null` by default. +The following example exposes a list of `MyPojo` objects from `MyProperties`: + +``` +import java.util.ArrayList; +import java.util.List; + +import org.springframework.boot.context.properties.ConfigurationProperties; + +@ConfigurationProperties("my") +public class MyProperties { + + private final List list = new ArrayList<>(); + + public List getList() { + return this.list; + } + +} + +``` + +Consider the following configuration: + +Properties + +``` +my.list[0].name=my name +my.list[0].description=my description +#--- +spring.config.activate.on-profile=dev +my.list[0].name=my another name +``` + +Yaml + +``` +my: + list: + - name: "my name" + description: "my description" +--- +spring: + config: + activate: + on-profile: "dev" +my: + list: + - name: "my another name" +``` + +If the `dev` profile is not active, `MyProperties.list` contains one `MyPojo` entry, as previously defined. +If the `dev` profile is enabled, however, the `list` *still* contains only one entry (with a name of `my another name` and a description of `null`). +This configuration *does not* add a second `MyPojo` instance to the list, and it does not merge the items. + +When a `List` is specified in multiple profiles, the one with the highest priority (and only that one) is used. +Consider the following example: + +Properties + +``` +my.list[0].name=my name +my.list[0].description=my description +my.list[1].name=another name +my.list[1].description=another description +#--- +spring.config.activate.on-profile=dev +my.list[0].name=my another name +``` + +Yaml + +``` +my: + list: + - name: "my name" + description: "my description" + - name: "another name" + description: "another description" +--- +spring: + config: + activate: + on-profile: "dev" +my: + list: + - name: "my another name" +``` + +In the preceding example, if the `dev` profile is active, `MyProperties.list` contains *one* `MyPojo` entry (with a name of `my another name` and a description of `null`). +For YAML, both comma-separated lists and YAML lists can be used for completely overriding the contents of the list. + +For `Map` properties, you can bind with property values drawn from multiple sources. +However, for the same property in multiple sources, the one with the highest priority is used. +The following example exposes a `Map` from `MyProperties`: + +``` +import java.util.LinkedHashMap; +import java.util.Map; + +import org.springframework.boot.context.properties.ConfigurationProperties; + +@ConfigurationProperties("my") +public class MyProperties { + + private final Map map = new LinkedHashMap<>(); + + public Map getMap() { + return this.map; + } + +} + +``` + +Consider the following configuration: + +Properties + +``` +my.map.key1.name=my name 1 +my.map.key1.description=my description 1 +#--- +spring.config.activate.on-profile=dev +my.map.key1.name=dev name 1 +my.map.key2.name=dev name 2 +my.map.key2.description=dev description 2 +``` + +Yaml + +``` +my: + map: + key1: + name: "my name 1" + description: "my description 1" +--- +spring: + config: + activate: + on-profile: "dev" +my: + map: + key1: + name: "dev name 1" + key2: + name: "dev name 2" + description: "dev description 2" +``` + +If the `dev` profile is not active, `MyProperties.map` contains one entry with key `key1` (with a name of `my name 1` and a description of `my description 1`). +If the `dev` profile is enabled, however, `map` contains two entries with keys `key1` (with a name of `dev name 1` and a description of `my description 1`) and `key2` (with a name of `dev name 2` and a description of `dev description 2`). + +| |The preceding merging rules apply to properties from all property sources, and not just files.| +|---|----------------------------------------------------------------------------------------------| + +#### 2.8.8. Properties Conversion + +Spring Boot attempts to coerce the external application properties to the right type when it binds to the `@ConfigurationProperties` beans. +If you need custom type conversion, you can provide a `ConversionService` bean (with a bean named `conversionService`) or custom property editors (through a `CustomEditorConfigurer` bean) or custom `Converters` (with bean definitions annotated as `@ConfigurationPropertiesBinding`). + +| |As this bean is requested very early during the application lifecycle, make sure to limit the dependencies that your `ConversionService` is using.
Typically, any dependency that you require may not be fully initialized at creation time.
You may want to rename your custom `ConversionService` if it is not required for configuration keys coercion and only rely on custom converters qualified with `@ConfigurationPropertiesBinding`.| +|---|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +##### Converting Durations + +Spring Boot has dedicated support for expressing durations. +If you expose a `java.time.Duration` property, the following formats in application properties are available: + +* A regular `long` representation (using milliseconds as the default unit unless a `@DurationUnit` has been specified) + +* The standard ISO-8601 format [used by `java.time.Duration`](https://docs.oracle.com/javase/8/docs/api/java/time/Duration.html#parse-java.lang.CharSequence-) + +* A more readable format where the value and the unit are coupled (`10s` means 10 seconds) + +Consider the following example: + +``` +import java.time.Duration; +import java.time.temporal.ChronoUnit; + +import org.springframework.boot.context.properties.ConfigurationProperties; +import org.springframework.boot.convert.DurationUnit; + +@ConfigurationProperties("my") +public class MyProperties { + + @DurationUnit(ChronoUnit.SECONDS) + private Duration sessionTimeout = Duration.ofSeconds(30); + + private Duration readTimeout = Duration.ofMillis(1000); + + // getters / setters... + + public Duration getSessionTimeout() { + return this.sessionTimeout; + } + + public void setSessionTimeout(Duration sessionTimeout) { + this.sessionTimeout = sessionTimeout; + } + + public Duration getReadTimeout() { + return this.readTimeout; + } + + public void setReadTimeout(Duration readTimeout) { + this.readTimeout = readTimeout; + } + +} + +``` + +To specify a session timeout of 30 seconds, `30`, `PT30S` and `30s` are all equivalent. +A read timeout of 500ms can be specified in any of the following form: `500`, `PT0.5S` and `500ms`. + +You can also use any of the supported units. +These are: + +* `ns` for nanoseconds + +* `us` for microseconds + +* `ms` for milliseconds + +* `s` for seconds + +* `m` for minutes + +* `h` for hours + +* `d` for days + +The default unit is milliseconds and can be overridden using `@DurationUnit` as illustrated in the sample above. + +If you prefer to use constructor binding, the same properties can be exposed, as shown in the following example: + +``` +import java.time.Duration; +import java.time.temporal.ChronoUnit; + +import org.springframework.boot.context.properties.ConfigurationProperties; +import org.springframework.boot.context.properties.ConstructorBinding; +import org.springframework.boot.context.properties.bind.DefaultValue; +import org.springframework.boot.convert.DurationUnit; + +@ConfigurationProperties("my") +@ConstructorBinding +public class MyProperties { + + // fields... + + private final Duration sessionTimeout; + + private final Duration readTimeout; + + public MyProperties(@DurationUnit(ChronoUnit.SECONDS) @DefaultValue("30s") Duration sessionTimeout, + @DefaultValue("1000ms") Duration readTimeout) { + this.sessionTimeout = sessionTimeout; + this.readTimeout = readTimeout; + } + + // getters... + + public Duration getSessionTimeout() { + return this.sessionTimeout; + } + + public Duration getReadTimeout() { + return this.readTimeout; + } + +} + +``` + +| |If you are upgrading a `Long` property, make sure to define the unit (using `@DurationUnit`) if it is not milliseconds.
Doing so gives a transparent upgrade path while supporting a much richer format.| +|---|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +##### Converting periods + +In addition to durations, Spring Boot can also work with `java.time.Period` type. +The following formats can be used in application properties: + +* An regular `int` representation (using days as the default unit unless a `@PeriodUnit` has been specified) + +* The standard ISO-8601 format [used by `java.time.Period`](https://docs.oracle.com/javase/8/docs/api/java/time/Period.html#parse-java.lang.CharSequence-) + +* A simpler format where the value and the unit pairs are coupled (`1y3d` means 1 year and 3 days) + +The following units are supported with the simple format: + +* `y` for years + +* `m` for months + +* `w` for weeks + +* `d` for days + +| |The `java.time.Period` type never actually stores the number of weeks, it is a shortcut that means “7 days”.| +|---|------------------------------------------------------------------------------------------------------------| + +##### Converting Data Sizes + +Spring Framework has a `DataSize` value type that expresses a size in bytes. +If you expose a `DataSize` property, the following formats in application properties are available: + +* A regular `long` representation (using bytes as the default unit unless a `@DataSizeUnit` has been specified) + +* A more readable format where the value and the unit are coupled (`10MB` means 10 megabytes) + +Consider the following example: + +``` +import org.springframework.boot.context.properties.ConfigurationProperties; +import org.springframework.boot.convert.DataSizeUnit; +import org.springframework.util.unit.DataSize; +import org.springframework.util.unit.DataUnit; + +@ConfigurationProperties("my") +public class MyProperties { + + @DataSizeUnit(DataUnit.MEGABYTES) + private DataSize bufferSize = DataSize.ofMegabytes(2); + + private DataSize sizeThreshold = DataSize.ofBytes(512); + + // getters/setters... + + public DataSize getBufferSize() { + return this.bufferSize; + } + + public void setBufferSize(DataSize bufferSize) { + this.bufferSize = bufferSize; + } + + public DataSize getSizeThreshold() { + return this.sizeThreshold; + } + + public void setSizeThreshold(DataSize sizeThreshold) { + this.sizeThreshold = sizeThreshold; + } + +} + +``` + +To specify a buffer size of 10 megabytes, `10` and `10MB` are equivalent. +A size threshold of 256 bytes can be specified as `256` or `256B`. + +You can also use any of the supported units. +These are: + +* `B` for bytes + +* `KB` for kilobytes + +* `MB` for megabytes + +* `GB` for gigabytes + +* `TB` for terabytes + +The default unit is bytes and can be overridden using `@DataSizeUnit` as illustrated in the sample above. + +If you prefer to use constructor binding, the same properties can be exposed, as shown in the following example: + +``` +import org.springframework.boot.context.properties.ConfigurationProperties; +import org.springframework.boot.context.properties.ConstructorBinding; +import org.springframework.boot.context.properties.bind.DefaultValue; +import org.springframework.boot.convert.DataSizeUnit; +import org.springframework.util.unit.DataSize; +import org.springframework.util.unit.DataUnit; + +@ConfigurationProperties("my") +@ConstructorBinding +public class MyProperties { + + // fields... + + private final DataSize bufferSize; + + private final DataSize sizeThreshold; + + public MyProperties(@DataSizeUnit(DataUnit.MEGABYTES) @DefaultValue("2MB") DataSize bufferSize, + @DefaultValue("512B") DataSize sizeThreshold) { + this.bufferSize = bufferSize; + this.sizeThreshold = sizeThreshold; + } + + // getters... + + public DataSize getBufferSize() { + return this.bufferSize; + } + + public DataSize getSizeThreshold() { + return this.sizeThreshold; + } + +} + +``` + +| |If you are upgrading a `Long` property, make sure to define the unit (using `@DataSizeUnit`) if it is not bytes.
Doing so gives a transparent upgrade path while supporting a much richer format.| +|---|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +#### 2.8.9. @ConfigurationProperties Validation + +Spring Boot attempts to validate `@ConfigurationProperties` classes whenever they are annotated with Spring’s `@Validated` annotation. +You can use JSR-303 `javax.validation` constraint annotations directly on your configuration class. +To do so, ensure that a compliant JSR-303 implementation is on your classpath and then add constraint annotations to your fields, as shown in the following example: + +``` +import java.net.InetAddress; + +import javax.validation.constraints.NotNull; + +import org.springframework.boot.context.properties.ConfigurationProperties; +import org.springframework.validation.annotation.Validated; + +@ConfigurationProperties("my.service") +@Validated +public class MyProperties { + + @NotNull + private InetAddress remoteAddress; + + // getters/setters... + + public InetAddress getRemoteAddress() { + return this.remoteAddress; + } + + public void setRemoteAddress(InetAddress remoteAddress) { + this.remoteAddress = remoteAddress; + } + +} + +``` + +| |You can also trigger validation by annotating the `@Bean` method that creates the configuration properties with `@Validated`.| +|---|-----------------------------------------------------------------------------------------------------------------------------| + +To ensure that validation is always triggered for nested properties, even when no properties are found, the associated field must be annotated with `@Valid`. +The following example builds on the preceding `MyProperties` example: + +``` +import java.net.InetAddress; + +import javax.validation.Valid; +import javax.validation.constraints.NotEmpty; +import javax.validation.constraints.NotNull; + +import org.springframework.boot.context.properties.ConfigurationProperties; +import org.springframework.validation.annotation.Validated; + +@ConfigurationProperties("my.service") +@Validated +public class MyProperties { + + @NotNull + private InetAddress remoteAddress; + + @Valid + private final Security security = new Security(); + + // getters/setters... + + public InetAddress getRemoteAddress() { + return this.remoteAddress; + } + + public void setRemoteAddress(InetAddress remoteAddress) { + this.remoteAddress = remoteAddress; + } + + public Security getSecurity() { + return this.security; + } + + public static class Security { + + @NotEmpty + private String username; + + // getters/setters... + + public String getUsername() { + return this.username; + } + + public void setUsername(String username) { + this.username = username; + } + + } + +} + +``` + +You can also add a custom Spring `Validator` by creating a bean definition called `configurationPropertiesValidator`. +The `@Bean` method should be declared `static`. +The configuration properties validator is created very early in the application’s lifecycle, and declaring the `@Bean` method as static lets the bean be created without having to instantiate the `@Configuration` class. +Doing so avoids any problems that may be caused by early instantiation. + +| |The `spring-boot-actuator` module includes an endpoint that exposes all `@ConfigurationProperties` beans.
Point your web browser to `/actuator/configprops` or use the equivalent JMX endpoint.
See the "[Production ready features](actuator.html#actuator.endpoints)" section for details.| +|---|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +#### 2.8.10. @ConfigurationProperties vs. @Value + +The `@Value` annotation is a core container feature, and it does not provide the same features as type-safe configuration properties. +The following table summarizes the features that are supported by `@ConfigurationProperties` and `@Value`: + +| Feature |`@ConfigurationProperties`| `@Value` | +|----------------------------------------------------------------------------------------------|--------------------------|----------------------------------------------------------------------------------------------------------------| +|[Relaxed binding](#features.external-config.typesafe-configuration-properties.relaxed-binding)| Yes |Limited (see [note below](#features.external-config.typesafe-configuration-properties.vs-value-annotation.note))| +| [Meta-data support](configuration-metadata.html#appendix.configuration-metadata) | Yes | No | +| `SpEL` evaluation | No | Yes | + +| |If you do want to use `@Value`, we recommend that you refer to property names using their canonical form (kebab-case using only lowercase letters).
This will allow Spring Boot to use the same logic as it does when relaxed binding `@ConfigurationProperties`.
For example, `@Value("{demo.item-price}")` will pick up `demo.item-price` and `demo.itemPrice` forms from the `application.properties` file, as well as `DEMO_ITEMPRICE` from the system environment.
If you used `@Value("{demo.itemPrice}")` instead, `demo.item-price` and `DEMO_ITEMPRICE` would not be considered.| +|---|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +If you define a set of configuration keys for your own components, we recommend you group them in a POJO annotated with `@ConfigurationProperties`. +Doing so will provide you with structured, type-safe object that you can inject into your own beans. + +`SpEL` expressions from [application property files](#features.external-config.files) are not processed at time of parsing these files and populating the environment. +However, it is possible to write a `SpEL` expression in `@Value`. +If the value of a property from an application property file is a `SpEL` expression, it will be evaluated when consumed through `@Value`. + +## 3. Profiles + + +Spring Profiles provide a way to segregate parts of your application configuration and make it be available only in certain environments. +Any `@Component`, `@Configuration` or `@ConfigurationProperties` can be marked with `@Profile` to limit when it is loaded, as shown in the following example: + +``` +import org.springframework.context.annotation.Configuration; +import org.springframework.context.annotation.Profile; + +@Configuration(proxyBeanMethods = false) +@Profile("production") +public class ProductionConfiguration { + + // ... + +} + +``` + +| |If `@ConfigurationProperties` beans are registered through `@EnableConfigurationProperties` instead of automatic scanning, the `@Profile` annotation needs to be specified on the `@Configuration` class that has the `@EnableConfigurationProperties` annotation.
In the case where `@ConfigurationProperties` are scanned, `@Profile` can be specified on the `@ConfigurationProperties` class itself.| +|---|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +You can use a `spring.profiles.active` `Environment` property to specify which profiles are active. +You can specify the property in any of the ways described earlier in this chapter. +For example, you could include it in your `application.properties`, as shown in the following example: + +Properties + +``` +spring.profiles.active=dev,hsqldb +``` + +Yaml + +``` +spring: + profiles: + active: "dev,hsqldb" +``` + +You could also specify it on the command line by using the following switch: `--spring.profiles.active=dev,hsqldb`. + +If no profile is active, a default profile is enabled. +The name of the default profile is `default` and it can be tuned using the `spring.profiles.default` `Environment` property, as shown in the following example: + +Properties + +``` +spring.profiles.default=none +``` + +Yaml + +``` +spring: + profiles: + default: "none" +``` + +### 3.1. Adding Active Profiles + +The `spring.profiles.active` property follows the same ordering rules as other properties: The highest `PropertySource` wins. +This means that you can specify active profiles in `application.properties` and then **replace** them by using the command line switch. + +Sometimes, it is useful to have properties that **add** to the active profiles rather than replace them. +The `SpringApplication` entry point has a Java API for setting additional profiles (that is, on top of those activated by the `spring.profiles.active` property). +See the `setAdditionalProfiles()` method in [SpringApplication](https://docs.spring.io/spring-boot/docs/2.6.4/api/org/springframework/boot/SpringApplication.html). +Profile groups, which are described in the [next section](#features.profiles.groups) can also be used to add active profiles if a given profile is active. + +### 3.2. Profile Groups + +Occasionally the profiles that you define and use in your application are too fine-grained and become cumbersome to use. +For example, you might have `proddb` and `prodmq` profiles that you use to enable database and messaging features independently. + +To help with this, Spring Boot lets you define profile groups. +A profile group allows you to define a logical name for a related group of profiles. + +For example, we can create a `production` group that consists of our `proddb` and `prodmq` profiles. + +Properties + +``` +spring.profiles.group.production[0]=proddb +spring.profiles.group.production[1]=prodmq +``` + +Yaml + +``` +spring: + profiles: + group: + production: + - "proddb" + - "prodmq" +``` + +Our application can now be started using `--spring.profiles.active=production` to active the `production`, `proddb` and `prodmq` profiles in one hit. + +### 3.3. Programmatically Setting Profiles + +You can programmatically set active profiles by calling `SpringApplication.setAdditionalProfiles(…​)` before your application runs. +It is also possible to activate profiles by using Spring’s `ConfigurableEnvironment` interface. + +### 3.4. Profile-specific Configuration Files + +Profile-specific variants of both `application.properties` (or `application.yml`) and files referenced through `@ConfigurationProperties` are considered as files and loaded. +See "[Profile Specific Files](#features.external-config.files.profile-specific)" for details. + +## 4. Logging + +Spring Boot uses [Commons Logging](https://commons.apache.org/logging) for all internal logging but leaves the underlying log implementation open. +Default configurations are provided for [Java Util Logging](https://docs.oracle.com/javase/8/docs/api/java/util/logging/package-summary.html), [Log4J2](https://logging.apache.org/log4j/2.x/), and [Logback](https://logback.qos.ch/). +In each case, loggers are pre-configured to use console output with optional file output also available. + +By default, if you use the “Starters”, Logback is used for logging. +Appropriate Logback routing is also included to ensure that dependent libraries that use Java Util Logging, Commons Logging, Log4J, or SLF4J all work correctly. + +| |There are a lot of logging frameworks available for Java.
Do not worry if the above list seems confusing.
Generally, you do not need to change your logging dependencies and the Spring Boot defaults work just fine.| +|---|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +| |When you deploy your application to a servlet container or application server, logging performed with the Java Util Logging API is not routed into your application’s logs.
This prevents logging performed by the container or other applications that have been deployed to it from appearing in your application’s logs.| +|---|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +### 4.1. Log Format + +The default log output from Spring Boot resembles the following example: + +``` +2019-03-05 10:57:51.112 INFO 45469 --- [ main] org.apache.catalina.core.StandardEngine : Starting Servlet Engine: Apache Tomcat/7.0.52 +2019-03-05 10:57:51.253 INFO 45469 --- [ost-startStop-1] o.a.c.c.C.[Tomcat].[localhost].[/] : Initializing Spring embedded WebApplicationContext +2019-03-05 10:57:51.253 INFO 45469 --- [ost-startStop-1] o.s.web.context.ContextLoader : Root WebApplicationContext: initialization completed in 1358 ms +2019-03-05 10:57:51.698 INFO 45469 --- [ost-startStop-1] o.s.b.c.e.ServletRegistrationBean : Mapping servlet: 'dispatcherServlet' to [/] +2019-03-05 10:57:51.702 INFO 45469 --- [ost-startStop-1] o.s.b.c.embedded.FilterRegistrationBean : Mapping filter: 'hiddenHttpMethodFilter' to: [/*] +``` + +The following items are output: + +* Date and Time: Millisecond precision and easily sortable. + +* Log Level: `ERROR`, `WARN`, `INFO`, `DEBUG`, or `TRACE`. + +* Process ID. + +* A `---` separator to distinguish the start of actual log messages. + +* Thread name: Enclosed in square brackets (may be truncated for console output). + +* Logger name: This is usually the source class name (often abbreviated). + +* The log message. + +| |Logback does not have a `FATAL` level.
It is mapped to `ERROR`.| +|---|-------------------------------------------------------------------| + +### 4.2. Console Output + +The default log configuration echoes messages to the console as they are written. +By default, `ERROR`-level, `WARN`-level, and `INFO`-level messages are logged. +You can also enable a “debug” mode by starting your application with a `--debug` flag. + +``` +$ java -jar myapp.jar --debug +``` + +| |You can also specify `debug=true` in your `application.properties`.| +|---|-------------------------------------------------------------------| + +When the debug mode is enabled, a selection of core loggers (embedded container, Hibernate, and Spring Boot) are configured to output more information. +Enabling the debug mode does *not* configure your application to log all messages with `DEBUG` level. + +Alternatively, you can enable a “trace” mode by starting your application with a `--trace` flag (or `trace=true` in your `application.properties`). +Doing so enables trace logging for a selection of core loggers (embedded container, Hibernate schema generation, and the whole Spring portfolio). + +#### 4.2.1. Color-coded Output + +If your terminal supports ANSI, color output is used to aid readability. +You can set `spring.output.ansi.enabled` to a [supported value](https://docs.spring.io/spring-boot/docs/2.6.4/api/org/springframework/boot/ansi/AnsiOutput.Enabled.html) to override the auto-detection. + +Color coding is configured by using the `%clr` conversion word. +In its simplest form, the converter colors the output according to the log level, as shown in the following example: + +``` +%clr(%5p) +``` + +The following table describes the mapping of log levels to colors: + +| Level |Color | +|-------|------| +|`FATAL`| Red | +|`ERROR`| Red | +|`WARN` |Yellow| +|`INFO` |Green | +|`DEBUG`|Green | +|`TRACE`|Green | + +Alternatively, you can specify the color or style that should be used by providing it as an option to the conversion. +For example, to make the text yellow, use the following setting: + +``` +%clr(%d{yyyy-MM-dd HH:mm:ss.SSS}){yellow} +``` + +The following colors and styles are supported: + +* `blue` + +* `cyan` + +* `faint` + +* `green` + +* `magenta` + +* `red` + +* `yellow` + +### 4.3. File Output + +By default, Spring Boot logs only to the console and does not write log files. +If you want to write log files in addition to the console output, you need to set a `logging.file.name` or `logging.file.path` property (for example, in your `application.properties`). + +The following table shows how the `logging.*` properties can be used together: + +|`logging.file.name`|`logging.file.path`| Example | Description | +|-------------------|-------------------|----------|------------------------------------------------------------------------------------------------------------------------| +| *(none)* | *(none)* | | Console only logging. | +| Specific file | *(none)* | `my.log` | Writes to the specified log file.
Names can be an exact location or relative to the current directory. | +| *(none)* |Specific directory |`/var/log`|Writes `spring.log` to the specified directory.
Names can be an exact location or relative to the current directory.| + +Log files rotate when they reach 10 MB and, as with console output, `ERROR`-level, `WARN`-level, and `INFO`-level messages are logged by default. + +| |Logging properties are independent of the actual logging infrastructure.
As a result, specific configuration keys (such as `logback.configurationFile` for Logback) are not managed by spring Boot.| +|---|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +### 4.4. File Rotation + +If you are using the Logback, it is possible to fine-tune log rotation settings using your `application.properties` or `application.yaml` file. +For all other logging system, you will need to configure rotation settings directly yourself (for example, if you use Log4J2 then you could add a `log4j2.xml` or `log4j2-spring.xml` file). + +The following rotation policy properties are supported: + +| Name | Description | +|------------------------------------------------------|----------------------------------------------------------------------| +| `logging.logback.rollingpolicy.file-name-pattern` | The filename pattern used to create log archives. | +|`logging.logback.rollingpolicy.clean-history-on-start`| If log archive cleanup should occur when the application starts. | +| `logging.logback.rollingpolicy.max-file-size` | The maximum size of log file before it is archived. | +| `logging.logback.rollingpolicy.total-size-cap` |The maximum amount of size log archives can take before being deleted.| +| `logging.logback.rollingpolicy.max-history` | The maximum number of archive log files to keep (defaults to 7). | + +### 4.5. Log Levels + +All the supported logging systems can have the logger levels set in the Spring `Environment` (for example, in `application.properties`) by using `logging.level.=` where `level` is one of TRACE, DEBUG, INFO, WARN, ERROR, FATAL, or OFF. +The `root` logger can be configured by using `logging.level.root`. + +The following example shows potential logging settings in `application.properties`: + +Properties + +``` +logging.level.root=warn +logging.level.org.springframework.web=debug +logging.level.org.hibernate=error +``` + +Yaml + +``` +logging: + level: + root: "warn" + org.springframework.web: "debug" + org.hibernate: "error" +``` + +It is also possible to set logging levels using environment variables. +For example, `LOGGING_LEVEL_ORG_SPRINGFRAMEWORK_WEB=DEBUG` will set `org.springframework.web` to `DEBUG`. + +| |The above approach will only work for package level logging.
Since relaxed binding always converts environment variables to lowercase, it is not possible to configure logging for an individual class in this way.
If you need to configure logging for a class, you can use [the `SPRING_APPLICATION_JSON`](#features.external-config.application-json) variable.| +|---|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +### 4.6. Log Groups + +It is often useful to be able to group related loggers together so that they can all be configured at the same time. +For example, you might commonly change the logging levels for *all* Tomcat related loggers, but you can not easily remember top level packages. + +To help with this, Spring Boot allows you to define logging groups in your Spring `Environment`. +For example, here is how you could define a “tomcat” group by adding it to your `application.properties`: + +Properties + +``` +logging.group.tomcat=org.apache.catalina,org.apache.coyote,org.apache.tomcat +``` + +Yaml + +``` +logging: + group: + tomcat: "org.apache.catalina,org.apache.coyote,org.apache.tomcat" +``` + +Once defined, you can change the level for all the loggers in the group with a single line: + +Properties + +``` +logging.level.tomcat=trace +``` + +Yaml + +``` +logging: + level: + tomcat: "trace" +``` + +Spring Boot includes the following pre-defined logging groups that can be used out-of-the-box: + +|Name| Loggers | +|----|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +|web |`org.springframework.core.codec`, `org.springframework.http`, `org.springframework.web`, `org.springframework.boot.actuate.endpoint.web`, `org.springframework.boot.web.servlet.ServletContextInitializerBeans`| +|sql | `org.springframework.jdbc.core`, `org.hibernate.SQL`, `org.jooq.tools.LoggerListener` | + +### 4.7. Using a Log Shutdown Hook + +In order to release logging resources when your application terminates, a shutdown hook that will trigger log system cleanup when the JVM exits is provided. +This shutdown hook is registered automatically unless your application is deployed as a war file. +If your application has complex context hierarchies the shutdown hook may not meet your needs. +If it does not, disable the shutdown hook and investigate the options provided directly by the underlying logging system. +For example, Logback offers [context selectors](http://logback.qos.ch/manual/loggingSeparation.html) which allow each Logger to be created in its own context. +You can use the `logging.register-shutdown-hook` property to disable the shutdown hook. +Setting it to `false` will disable the registration. +You can set the property in your `application.properties` or `application.yaml` file: + +Properties + +``` +logging.register-shutdown-hook=false +``` + +Yaml + +``` +logging: + register-shutdown-hook: false +``` + +### 4.8. Custom Log Configuration + +The various logging systems can be activated by including the appropriate libraries on the classpath and can be further customized by providing a suitable configuration file in the root of the classpath or in a location specified by the following Spring `Environment` property: `logging.config`. + +You can force Spring Boot to use a particular logging system by using the `org.springframework.boot.logging.LoggingSystem` system property. +The value should be the fully qualified class name of a `LoggingSystem` implementation. +You can also disable Spring Boot’s logging configuration entirely by using a value of `none`. + +| |Since logging is initialized **before** the `ApplicationContext` is created, it is not possible to control logging from `@PropertySources` in Spring `@Configuration` files.
The only way to change the logging system or disable it entirely is through System properties.| +|---|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +Depending on your logging system, the following files are loaded: + +| Logging System | Customization | +|-----------------------|---------------------------------------------------------------------------------| +| Logback |`logback-spring.xml`, `logback-spring.groovy`, `logback.xml`, or `logback.groovy`| +| Log4j2 | `log4j2-spring.xml` or `log4j2.xml` | +|JDK (Java Util Logging)| `logging.properties` | + +| |When possible, we recommend that you use the `-spring` variants for your logging configuration (for example, `logback-spring.xml` rather than `logback.xml`).
If you use standard configuration locations, Spring cannot completely control log initialization.| +|---|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +| |There are known classloading issues with Java Util Logging that cause problems when running from an 'executable jar'.
We recommend that you avoid it when running from an 'executable jar' if at all possible.| +|---|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +To help with the customization, some other properties are transferred from the Spring `Environment` to System properties, as described in the following table: + +| Spring Environment | System Property | Comments | +|-----------------------------------|-------------------------------|-----------------------------------------------------------------------------------------------------------| +|`logging.exception-conversion-word`|`LOG_EXCEPTION_CONVERSION_WORD`| The conversion word used when logging exceptions. | +| `logging.file.name` | `LOG_FILE` | If defined, it is used in the default log configuration. | +| `logging.file.path` | `LOG_PATH` | If defined, it is used in the default log configuration. | +| `logging.pattern.console` | `CONSOLE_LOG_PATTERN` | The log pattern to use on the console (stdout). | +| `logging.pattern.dateformat` | `LOG_DATEFORMAT_PATTERN` | Appender pattern for log date format. | +| `logging.charset.console` | `CONSOLE_LOG_CHARSET` | The charset to use for console logging. | +| `logging.pattern.file` | `FILE_LOG_PATTERN` | The log pattern to use in a file (if `LOG_FILE` is enabled). | +| `logging.charset.file` | `FILE_LOG_CHARSET` | The charset to use for file logging (if `LOG_FILE` is enabled). | +| `logging.pattern.level` | `LOG_LEVEL_PATTERN` | The format to use when rendering the log level (default `%5p`). | +| `PID` | `PID` |The current process ID (discovered if possible and when not already defined as an OS environment variable).| + +If you use Logback, the following properties are also transferred: + +| Spring Environment | System Property | Comments | +|------------------------------------------------------|----------------------------------------------|------------------------------------------------------------------------------------| +| `logging.logback.rollingpolicy.file-name-pattern` | `LOGBACK_ROLLINGPOLICY_FILE_NAME_PATTERN` |Pattern for rolled-over log file names (default `${LOG_FILE}.%d{yyyy-MM-dd}.%i.gz`).| +|`logging.logback.rollingpolicy.clean-history-on-start`|`LOGBACK_ROLLINGPOLICY_CLEAN_HISTORY_ON_START`| Whether to clean the archive log files on startup. | +| `logging.logback.rollingpolicy.max-file-size` | `LOGBACK_ROLLINGPOLICY_MAX_FILE_SIZE` | Maximum log file size. | +| `logging.logback.rollingpolicy.total-size-cap` | `LOGBACK_ROLLINGPOLICY_TOTAL_SIZE_CAP` | Total size of log backups to be kept. | +| `logging.logback.rollingpolicy.max-history` | `LOGBACK_ROLLINGPOLICY_MAX_HISTORY` | Maximum number of archive log files to keep. | + +All the supported logging systems can consult System properties when parsing their configuration files. +See the default configurations in `spring-boot.jar` for examples: + +* [Logback](https://github.com/spring-projects/spring-boot/tree/v2.6.4/spring-boot-project/spring-boot/src/main/resources/org/springframework/boot/logging/logback/defaults.xml) + +* [Log4j 2](https://github.com/spring-projects/spring-boot/tree/v2.6.4/spring-boot-project/spring-boot/src/main/resources/org/springframework/boot/logging/log4j2/log4j2.xml) + +* [Java Util logging](https://github.com/spring-projects/spring-boot/tree/v2.6.4/spring-boot-project/spring-boot/src/main/resources/org/springframework/boot/logging/java/logging-file.properties) + +| |If you want to use a placeholder in a logging property, you should use [Spring Boot’s syntax](#features.external-config.files.property-placeholders) and not the syntax of the underlying framework.
Notably, if you use Logback, you should use `:` as the delimiter between a property name and its default value and not use `:-`.| +|---|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +| |You can add MDC and other ad-hoc content to log lines by overriding only the `LOG_LEVEL_PATTERN` (or `logging.pattern.level` with Logback).
For example, if you use `logging.pattern.level=user:%X{user} %5p`, then the default log format contains an MDC entry for "user", if it exists, as shown in the following example.

```
2019-08-30 12:30:04.031 user:someone INFO 22174 --- [ nio-8080-exec-0] demo.Controller
Handling authenticated request
```| +|---|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +### 4.9. Logback Extensions + +Spring Boot includes a number of extensions to Logback that can help with advanced configuration. +You can use these extensions in your `logback-spring.xml` configuration file. + +| |Because the standard `logback.xml` configuration file is loaded too early, you cannot use extensions in it.
You need to either use `logback-spring.xml` or define a `logging.config` property.| +|---|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +| |The extensions cannot be used with Logback’s [configuration scanning](https://logback.qos.ch/manual/configuration.html#autoScan).
If you attempt to do so, making changes to the configuration file results in an error similar to one of the following being logged:| +|---|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +``` +ERROR in [email protected]:71 - no applicable action for [springProperty], current ElementPath is [[configuration][springProperty]] +ERROR in ch.qos.logback.core.joran.spi.Interpret[email protected]:71 - no applicable action for [springProfile], current ElementPath is [[configuration][springProfile]] +``` + +#### 4.9.1. Profile-specific Configuration + +The `` tag lets you optionally include or exclude sections of configuration based on the active Spring profiles. +Profile sections are supported anywhere within the `` element. +Use the `name` attribute to specify which profile accepts the configuration. +The `` tag can contain a profile name (for example `staging`) or a profile expression. +A profile expression allows for more complicated profile logic to be expressed, for example `production & (eu-central | eu-west)`. +Check the [reference guide](https://docs.spring.io/spring-framework/docs/5.3.16/reference/html/core.html#beans-definition-profiles-java) for more details. +The following listing shows three sample profiles: + +``` + + + + + + + + + + + +``` + +#### 4.9.2. Environment Properties + +The `` tag lets you expose properties from the Spring `Environment` for use within Logback. +Doing so can be useful if you want to access values from your `application.properties` file in your Logback configuration. +The tag works in a similar way to Logback’s standard `` tag. +However, rather than specifying a direct `value`, you specify the `source` of the property (from the `Environment`). +If you need to store the property somewhere other than in `local` scope, you can use the `scope` attribute. +If you need a fallback value (in case the property is not set in the `Environment`), you can use the `defaultValue` attribute. +The following example shows how to expose properties for use within Logback: + +``` + + + ${fluentHost} + ... + +``` + +| |The `source` must be specified in kebab case (such as `my.property-name`).
However, properties can be added to the `Environment` by using the relaxed rules.| +|---|----------------------------------------------------------------------------------------------------------------------------------------------------------------| + +## 5. Internationalization + +Spring Boot supports localized messages so that your application can cater to users of different language preferences. +By default, Spring Boot looks for the presence of a `messages` resource bundle at the root of the classpath. + +| |The auto-configuration applies when the default properties file for the configured resource bundle is available (`messages.properties` by default).
If your resource bundle contains only language-specific properties files, you are required to add the default.
If no properties file is found that matches any of the configured base names, there will be no auto-configured `MessageSource`.| +|---|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +The basename of the resource bundle as well as several other attributes can be configured using the `spring.messages` namespace, as shown in the following example: + +Properties + +``` +spring.messages.basename=messages,config.i18n.messages +spring.messages.fallback-to-system-locale=false +``` + +Yaml + +``` +spring: + messages: + basename: "messages,config.i18n.messages" + fallback-to-system-locale: false +``` + +| |`spring.messages.basename` supports comma-separated list of locations, either a package qualifier or a resource resolved from the classpath root.| +|---|-------------------------------------------------------------------------------------------------------------------------------------------------| + +See [`MessageSourceProperties`](https://github.com/spring-projects/spring-boot/tree/v2.6.4/spring-boot-project/spring-boot-autoconfigure/src/main/java/org/springframework/boot/autoconfigure/context/MessageSourceProperties.java) for more supported options. + +## 6. JSON + +Spring Boot provides integration with three JSON mapping libraries: + +* Gson + +* Jackson + +* JSON-B + +Jackson is the preferred and default library. + +### 6.1. Jackson + +Auto-configuration for Jackson is provided and Jackson is part of `spring-boot-starter-json`. +When Jackson is on the classpath an `ObjectMapper` bean is automatically configured. +Several configuration properties are provided for [customizing the configuration of the `ObjectMapper`](howto.html#howto.spring-mvc.customize-jackson-objectmapper). + +### 6.2. Gson + +Auto-configuration for Gson is provided. +When Gson is on the classpath a `Gson` bean is automatically configured. +Several `spring.gson.*` configuration properties are provided for customizing the configuration. +To take more control, one or more `GsonBuilderCustomizer` beans can be used. + +### 6.3. JSON-B + +Auto-configuration for JSON-B is provided. +When the JSON-B API and an implementation are on the classpath a `Jsonb` bean will be automatically configured. +The preferred JSON-B implementation is Apache Johnzon for which dependency management is provided. + +## 7. Task Execution and Scheduling + +In the absence of an `Executor` bean in the context, Spring Boot auto-configures a `ThreadPoolTaskExecutor` with sensible defaults that can be automatically associated to asynchronous task execution (`@EnableAsync`) and Spring MVC asynchronous request processing. + +| |If you have defined a custom `Executor` in the context, regular task execution (that is `@EnableAsync`) will use it transparently but the Spring MVC support will not be configured as it requires an `AsyncTaskExecutor` implementation (named `applicationTaskExecutor`).
Depending on your target arrangement, you could change your `Executor` into a `ThreadPoolTaskExecutor` or define both a `ThreadPoolTaskExecutor` and an `AsyncConfigurer` wrapping your custom `Executor`.

The auto-configured `TaskExecutorBuilder` allows you to easily create instances that reproduce what the auto-configuration does by default.| +|---|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +The thread pool uses 8 core threads that can grow and shrink according to the load. +Those default settings can be fine-tuned using the `spring.task.execution` namespace, as shown in the following example: + +Properties + +``` +spring.task.execution.pool.max-size=16 +spring.task.execution.pool.queue-capacity=100 +spring.task.execution.pool.keep-alive=10s +``` + +Yaml + +``` +spring: + task: + execution: + pool: + max-size: 16 + queue-capacity: 100 + keep-alive: "10s" +``` + +This changes the thread pool to use a bounded queue so that when the queue is full (100 tasks), the thread pool increases to maximum 16 threads. +Shrinking of the pool is more aggressive as threads are reclaimed when they are idle for 10 seconds (rather than 60 seconds by default). + +A `ThreadPoolTaskScheduler` can also be auto-configured if need to be associated to scheduled task execution (using `@EnableScheduling` for instance). +The thread pool uses one thread by default and its settings can be fine-tuned using the `spring.task.scheduling` namespace, as shown in the following example: + +Properties + +``` +spring.task.scheduling.thread-name-prefix=scheduling- +spring.task.scheduling.pool.size=2 +``` + +Yaml + +``` +spring: + task: + scheduling: + thread-name-prefix: "scheduling-" + pool: + size: 2 +``` + +Both a `TaskExecutorBuilder` bean and a `TaskSchedulerBuilder` bean are made available in the context if a custom executor or scheduler needs to be created. + +## 8. Testing + +Spring Boot provides a number of utilities and annotations to help when testing your application. +Test support is provided by two modules: `spring-boot-test` contains core items, and `spring-boot-test-autoconfigure` supports auto-configuration for tests. + +Most developers use the `spring-boot-starter-test` “Starter”, which imports both Spring Boot test modules as well as JUnit Jupiter, AssertJ, Hamcrest, and a number of other useful libraries. + +| |If you have tests that use JUnit 4, JUnit 5’s vintage engine can be used to run them.
To use the vintage engine, add a dependency on `junit-vintage-engine`, as shown in the following example:

```

org.junit.vintage
junit-vintage-engine
test


org.hamcrest
hamcrest-core



```| +|---|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +`hamcrest-core` is excluded in favor of `org.hamcrest:hamcrest` that is part of `spring-boot-starter-test`. + +### 8.1. Test Scope Dependencies + +The `spring-boot-starter-test` “Starter” (in the `test` `scope`) contains the following provided libraries: + +* [JUnit 5](https://junit.org/junit5/): The de-facto standard for unit testing Java applications. + +* [Spring Test](https://docs.spring.io/spring-framework/docs/5.3.16/reference/html/testing.html#integration-testing) & Spring Boot Test: Utilities and integration test support for Spring Boot applications. + +* [AssertJ](https://assertj.github.io/doc/): A fluent assertion library. + +* [Hamcrest](https://github.com/hamcrest/JavaHamcrest): A library of matcher objects (also known as constraints or predicates). + +* [Mockito](https://site.mockito.org/): A Java mocking framework. + +* [JSONassert](https://github.com/skyscreamer/JSONassert): An assertion library for JSON. + +* [JsonPath](https://github.com/jayway/JsonPath): XPath for JSON. + +We generally find these common libraries to be useful when writing tests. +If these libraries do not suit your needs, you can add additional test dependencies of your own. + +### 8.2. Testing Spring Applications + +One of the major advantages of dependency injection is that it should make your code easier to unit test. +You can instantiate objects by using the `new` operator without even involving Spring. +You can also use *mock objects* instead of real dependencies. + +Often, you need to move beyond unit testing and start integration testing (with a Spring `ApplicationContext`). +It is useful to be able to perform integration testing without requiring deployment of your application or needing to connect to other infrastructure. + +The Spring Framework includes a dedicated test module for such integration testing. +You can declare a dependency directly to `org.springframework:spring-test` or use the `spring-boot-starter-test` “Starter” to pull it in transitively. + +If you have not used the `spring-test` module before, you should start by reading the [relevant section](https://docs.spring.io/spring-framework/docs/5.3.16/reference/html/testing.html#testing) of the Spring Framework reference documentation. + +### 8.3. Testing Spring Boot Applications + +A Spring Boot application is a Spring `ApplicationContext`, so nothing very special has to be done to test it beyond what you would normally do with a vanilla Spring context. + +| |External properties, logging, and other features of Spring Boot are installed in the context by default only if you use `SpringApplication` to create it.| +|---|---------------------------------------------------------------------------------------------------------------------------------------------------------| + +Spring Boot provides a `@SpringBootTest` annotation, which can be used as an alternative to the standard `spring-test` `@ContextConfiguration` annotation when you need Spring Boot features. +The annotation works by [creating the `ApplicationContext` used in your tests through `SpringApplication`](#features.testing.spring-boot-applications.detecting-configuration). +In addition to `@SpringBootTest` a number of other annotations are also provided for [testing more specific slices](#features.testing.spring-boot-applications.autoconfigured-tests) of an application. + +| |If you are using JUnit 4, do not forget to also add `@RunWith(SpringRunner.class)` to your test, otherwise the annotations will be ignored.
If you are using JUnit 5, there is no need to add the equivalent `@ExtendWith(SpringExtension.class)` as `@SpringBootTest` and the other `@…​Test` annotations are already annotated with it.| +|---|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +By default, `@SpringBootTest` will not start a server. +You can use the `webEnvironment` attribute of `@SpringBootTest` to further refine how your tests run: + +* `MOCK`(Default) : Loads a web `ApplicationContext` and provides a mock web environment. + Embedded servers are not started when using this annotation. + If a web environment is not available on your classpath, this mode transparently falls back to creating a regular non-web `ApplicationContext`. + It can be used in conjunction with [`@AutoConfigureMockMvc` or `@AutoConfigureWebTestClient`](#features.testing.spring-boot-applications.with-mock-environment) for mock-based testing of your web application. + +* `RANDOM_PORT`: Loads a `WebServerApplicationContext` and provides a real web environment. + Embedded servers are started and listen on a random port. + +* `DEFINED_PORT`: Loads a `WebServerApplicationContext` and provides a real web environment. + Embedded servers are started and listen on a defined port (from your `application.properties`) or on the default port of `8080`. + +* `NONE`: Loads an `ApplicationContext` by using `SpringApplication` but does not provide *any* web environment (mock or otherwise). + +| |If your test is `@Transactional`, it rolls back the transaction at the end of each test method by default.
However, as using this arrangement with either `RANDOM_PORT` or `DEFINED_PORT` implicitly provides a real servlet environment, the HTTP client and server run in separate threads and, thus, in separate transactions.
Any transaction initiated on the server does not roll back in this case.| +|---|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +| |`@SpringBootTest` with `webEnvironment = WebEnvironment.RANDOM_PORT` will also start the management server on a separate random port if your application uses a different port for the management server.| +|---|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +#### 8.3.1. Detecting Web Application Type + +If Spring MVC is available, a regular MVC-based application context is configured. +If you have only Spring WebFlux, we will detect that and configure a WebFlux-based application context instead. + +If both are present, Spring MVC takes precedence. +If you want to test a reactive web application in this scenario, you must set the `spring.main.web-application-type` property: + +``` +import org.springframework.boot.test.context.SpringBootTest; + +@SpringBootTest(properties = "spring.main.web-application-type=reactive") +class MyWebFluxTests { + + // ... + +} + +``` + +#### 8.3.2. Detecting Test Configuration + +If you are familiar with the Spring Test Framework, you may be used to using `@ContextConfiguration(classes=…​)` in order to specify which Spring `@Configuration` to load. +Alternatively, you might have often used nested `@Configuration` classes within your test. + +When testing Spring Boot applications, this is often not required. +Spring Boot’s `@*Test` annotations search for your primary configuration automatically whenever you do not explicitly define one. + +The search algorithm works up from the package that contains the test until it finds a class annotated with `@SpringBootApplication` or `@SpringBootConfiguration`. +As long as you [structured your code](using.html#using.structuring-your-code) in a sensible way, your main configuration is usually found. + +| |If you use a [test annotation to test a more specific slice of your application](#features.testing.spring-boot-applications.autoconfigured-tests), you should avoid adding configuration settings that are specific to a particular area on the [main method’s application class](#features.testing.spring-boot-applications.user-configuration-and-slicing).

The underlying component scan configuration of `@SpringBootApplication` defines exclude filters that are used to make sure slicing works as expected.
If you are using an explicit `@ComponentScan` directive on your `@SpringBootApplication`-annotated class, be aware that those filters will be disabled.
If you are using slicing, you should define them again.| +|---|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +If you want to customize the primary configuration, you can use a nested `@TestConfiguration` class. +Unlike a nested `@Configuration` class, which would be used instead of your application’s primary configuration, a nested `@TestConfiguration` class is used in addition to your application’s primary configuration. + +| |Spring’s test framework caches application contexts between tests.
Therefore, as long as your tests share the same configuration (no matter how it is discovered), the potentially time-consuming process of loading the context happens only once.| +|---|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +#### 8.3.3. Excluding Test Configuration + +If your application uses component scanning (for example, if you use `@SpringBootApplication` or `@ComponentScan`), you may find top-level configuration classes that you created only for specific tests accidentally get picked up everywhere. + +As we [have seen earlier](#features.testing.spring-boot-applications.detecting-configuration), `@TestConfiguration` can be used on an inner class of a test to customize the primary configuration. +When placed on a top-level class, `@TestConfiguration` indicates that classes in `src/test/java` should not be picked up by scanning. +You can then import that class explicitly where it is required, as shown in the following example: + +``` +import org.junit.jupiter.api.Test; + +import org.springframework.boot.test.context.SpringBootTest; +import org.springframework.context.annotation.Import; + +@SpringBootTest +@Import(MyTestsConfiguration.class) +class MyTests { + + @Test + void exampleTest() { + // ... + } + +} + +``` + +| |If you directly use `@ComponentScan` (that is, not through `@SpringBootApplication`) you need to register the `TypeExcludeFilter` with it.
See [the Javadoc](https://docs.spring.io/spring-boot/docs/2.6.4/api/org/springframework/boot/context/TypeExcludeFilter.html) for details.| +|---|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +#### 8.3.4. Using Application Arguments + +If your application expects [arguments](#features.spring-application.application-arguments), you can +have `@SpringBootTest` inject them using the `args` attribute. + +``` +import org.junit.jupiter.api.Test; + +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.boot.ApplicationArguments; +import org.springframework.boot.test.context.SpringBootTest; + +import static org.assertj.core.api.Assertions.assertThat; + +@SpringBootTest(args = "--app.test=one") +class MyApplicationArgumentTests { + + @Test + void applicationArgumentsPopulated(@Autowired ApplicationArguments args) { + assertThat(args.getOptionNames()).containsOnly("app.test"); + assertThat(args.getOptionValues("app.test")).containsOnly("one"); + } + +} + +``` + +#### 8.3.5. Testing with a mock environment + +By default, `@SpringBootTest` does not start the server but instead sets up a mock environment for testing web endpoints. + +With Spring MVC, we can query our web endpoints using [`MockMvc`](https://docs.spring.io/spring-framework/docs/5.3.16/reference/html/testing.html#spring-mvc-test-framework) or `WebTestClient`, as shown in the following example: + +``` +import org.junit.jupiter.api.Test; + +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.boot.test.autoconfigure.web.servlet.AutoConfigureMockMvc; +import org.springframework.boot.test.context.SpringBootTest; +import org.springframework.test.web.reactive.server.WebTestClient; +import org.springframework.test.web.servlet.MockMvc; + +import static org.springframework.test.web.servlet.request.MockMvcRequestBuilders.get; +import static org.springframework.test.web.servlet.result.MockMvcResultMatchers.content; +import static org.springframework.test.web.servlet.result.MockMvcResultMatchers.status; + +@SpringBootTest +@AutoConfigureMockMvc +class MyMockMvcTests { + + @Test + void testWithMockMvc(@Autowired MockMvc mvc) throws Exception { + mvc.perform(get("/")).andExpect(status().isOk()).andExpect(content().string("Hello World")); + } + + // If Spring WebFlux is on the classpath, you can drive MVC tests with a WebTestClient + @Test + void testWithWebTestClient(@Autowired WebTestClient webClient) { + webClient + .get().uri("/") + .exchange() + .expectStatus().isOk() + .expectBody(String.class).isEqualTo("Hello World"); + } + +} + +``` + +| |If you want to focus only on the web layer and not start a complete `ApplicationContext`, consider [using `@WebMvcTest` instead](#features.testing.spring-boot-applications.spring-mvc-tests).| +|---|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +With Spring WebFlux endpoints, you can use [`WebTestClient`](https://docs.spring.io/spring-framework/docs/5.3.16/reference/html/testing.html#webtestclient-tests) as shown in the following example: + +``` +import org.junit.jupiter.api.Test; + +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.boot.test.autoconfigure.web.reactive.AutoConfigureWebTestClient; +import org.springframework.boot.test.context.SpringBootTest; +import org.springframework.test.web.reactive.server.WebTestClient; + +@SpringBootTest +@AutoConfigureWebTestClient +class MyMockWebTestClientTests { + + @Test + void exampleTest(@Autowired WebTestClient webClient) { + webClient + .get().uri("/") + .exchange() + .expectStatus().isOk() + .expectBody(String.class).isEqualTo("Hello World"); + } + +} + +``` + +| |Testing within a mocked environment is usually faster than running with a full servlet container.
However, since mocking occurs at the Spring MVC layer, code that relies on lower-level servlet container behavior cannot be directly tested with MockMvc.

For example, Spring Boot’s error handling is based on the “error page” support provided by the servlet container.
This means that, whilst you can test your MVC layer throws and handles exceptions as expected, you cannot directly test that a specific [custom error page](web.html#web.servlet.spring-mvc.error-handling.error-pages) is rendered.
If you need to test these lower-level concerns, you can start a fully running server as described in the next section.| +|---|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +#### 8.3.6. Testing with a running server + +If you need to start a full running server, we recommend that you use random ports. +If you use `@SpringBootTest(webEnvironment=WebEnvironment.RANDOM_PORT)`, an available port is picked at random each time your test runs. + +The `@LocalServerPort` annotation can be used to [inject the actual port used](howto.html#howto.webserver.discover-port) into your test. +For convenience, tests that need to make REST calls to the started server can additionally `@Autowire` a [`WebTestClient`](https://docs.spring.io/spring-framework/docs/5.3.16/reference/html/testing.html#webtestclient-tests), which resolves relative links to the running server and comes with a dedicated API for verifying responses, as shown in the following example: + +``` +import org.junit.jupiter.api.Test; + +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.boot.test.context.SpringBootTest; +import org.springframework.boot.test.context.SpringBootTest.WebEnvironment; +import org.springframework.test.web.reactive.server.WebTestClient; + +@SpringBootTest(webEnvironment = WebEnvironment.RANDOM_PORT) +class MyRandomPortWebTestClientTests { + + @Test + void exampleTest(@Autowired WebTestClient webClient) { + webClient + .get().uri("/") + .exchange() + .expectStatus().isOk() + .expectBody(String.class).isEqualTo("Hello World"); + } + +} + +``` + +| |`WebTestClient` can be used against both live servers and [mock environments](#features.testing.spring-boot-applications.with-mock-environment).| +|---|------------------------------------------------------------------------------------------------------------------------------------------------| + +This setup requires `spring-webflux` on the classpath. +If you can not or will not add webflux, Spring Boot also provides a `TestRestTemplate` facility: + +``` +import org.junit.jupiter.api.Test; + +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.boot.test.context.SpringBootTest; +import org.springframework.boot.test.context.SpringBootTest.WebEnvironment; +import org.springframework.boot.test.web.client.TestRestTemplate; + +import static org.assertj.core.api.Assertions.assertThat; + +@SpringBootTest(webEnvironment = WebEnvironment.RANDOM_PORT) +class MyRandomPortTestRestTemplateTests { + + @Test + void exampleTest(@Autowired TestRestTemplate restTemplate) { + String body = restTemplate.getForObject("/", String.class); + assertThat(body).isEqualTo("Hello World"); + } + +} + +``` + +#### 8.3.7. Customizing WebTestClient + +To customize the `WebTestClient` bean, configure a `WebTestClientBuilderCustomizer` bean. +Any such beans are called with the `WebTestClient.Builder` that is used to create the `WebTestClient`. + +#### 8.3.8. Using JMX + +As the test context framework caches context, JMX is disabled by default to prevent identical components to register on the same domain. +If such test needs access to an `MBeanServer`, consider marking it dirty as well: + +``` +import javax.management.MBeanServer; +import javax.management.MalformedObjectNameException; + +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.extension.ExtendWith; + +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.boot.test.context.SpringBootTest; +import org.springframework.test.annotation.DirtiesContext; +import org.springframework.test.context.junit.jupiter.SpringExtension; + +import static org.assertj.core.api.Assertions.assertThat; + +@ExtendWith(SpringExtension.class) +@SpringBootTest(properties = "spring.jmx.enabled=true") +@DirtiesContext +class MyJmxTests { + + @Autowired + private MBeanServer mBeanServer; + + @Test + void exampleTest() throws MalformedObjectNameException { + assertThat(this.mBeanServer.getDomains()).contains("java.lang"); + // ... + } + +} + +``` + +#### 8.3.9. Using Metrics + +Regardless of your classpath, meter registries, except the in-memory backed, are not auto-configured when using `@SpringBootTest`. + +If you need to export metrics to a different backend as part of an integration test, annotate it with `@AutoConfigureMetrics`. + +#### 8.3.10. Mocking and Spying Beans + +When running tests, it is sometimes necessary to mock certain components within your application context. +For example, you may have a facade over some remote service that is unavailable during development. +Mocking can also be useful when you want to simulate failures that might be hard to trigger in a real environment. + +Spring Boot includes a `@MockBean` annotation that can be used to define a Mockito mock for a bean inside your `ApplicationContext`. +You can use the annotation to add new beans or replace a single existing bean definition. +The annotation can be used directly on test classes, on fields within your test, or on `@Configuration` classes and fields. +When used on a field, the instance of the created mock is also injected. +Mock beans are automatically reset after each test method. + +| |If your test uses one of Spring Boot’s test annotations (such as `@SpringBootTest`), this feature is automatically enabled.
To use this feature with a different arrangement, listeners must be explicitly added, as shown in the following example:

```
import org.springframework.boot.test.mock.mockito.MockitoTestExecutionListener;
import org.springframework.boot.test.mock.mockito.ResetMocksTestExecutionListener;
import org.springframework.test.context.ContextConfiguration;
import org.springframework.test.context.TestExecutionListeners;

@ContextConfiguration(classes = MyConfig.class)
@TestExecutionListeners({ MockitoTestExecutionListener.class, ResetMocksTestExecutionListener.class })
class MyTests {

// ...

}

```| +|---|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +The following example replaces an existing `RemoteService` bean with a mock implementation: + +``` +import org.junit.jupiter.api.Test; + +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.boot.test.context.SpringBootTest; +import org.springframework.boot.test.mock.mockito.MockBean; + +import static org.assertj.core.api.Assertions.assertThat; +import static org.mockito.BDDMockito.given; + +@SpringBootTest +class MyTests { + + @Autowired + private Reverser reverser; + + @MockBean + private RemoteService remoteService; + + @Test + void exampleTest() { + given(this.remoteService.getValue()).willReturn("spring"); + String reverse = this.reverser.getReverseValue(); // Calls injected RemoteService + assertThat(reverse).isEqualTo("gnirps"); + } + +} + +``` + +| |`@MockBean` cannot be used to mock the behavior of a bean that is exercised during application context refresh.
By the time the test is executed, the application context refresh has completed and it is too late to configure the mocked behavior.
We recommend using a `@Bean` method to create and configure the mock in this situation.| +|---|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +Additionally, you can use `@SpyBean` to wrap any existing bean with a Mockito `spy`. +See the [Javadoc](https://docs.spring.io/spring-boot/docs/2.6.4/api/org/springframework/boot/test/mock/mockito/SpyBean.html) for full details. + +| |CGLib proxies, such as those created for scoped beans, declare the proxied methods as `final`.
This stops Mockito from functioning correctly as it cannot mock or spy on `final` methods in its default configuration.
If you want to mock or spy on such a bean, configure Mockito to use its inline mock maker by adding `org.mockito:mockito-inline` to your application’s test dependencies.
This allows Mockito to mock and spy on `final` methods.| +|---|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +| |While Spring’s test framework caches application contexts between tests and reuses a context for tests sharing the same configuration, the use of `@MockBean` or `@SpyBean` influences the cache key, which will most likely increase the number of contexts.| +|---|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +| |If you are using `@SpyBean` to spy on a bean with `@Cacheable` methods that refer to parameters by name, your application must be compiled with `-parameters`.
This ensures that the parameter names are available to the caching infrastructure once the bean has been spied upon.| +|---|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +| |When you are using `@SpyBean` to spy on a bean that is proxied by Spring, you may need to remove Spring’s proxy in some situations, for example when setting expectations using `given` or `when`.
Use `AopTestUtils.getTargetObject(yourProxiedSpy)` to do so.| +|---|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +#### 8.3.11. Auto-configured Tests + +Spring Boot’s auto-configuration system works well for applications but can sometimes be a little too much for tests. +It often helps to load only the parts of the configuration that are required to test a “slice” of your application. +For example, you might want to test that Spring MVC controllers are mapping URLs correctly, and you do not want to involve database calls in those tests, or you might want to test JPA entities, and you are not interested in the web layer when those tests run. + +The `spring-boot-test-autoconfigure` module includes a number of annotations that can be used to automatically configure such “slices”. +Each of them works in a similar way, providing a `@…​Test` annotation that loads the `ApplicationContext` and one or more `@AutoConfigure…​` annotations that can be used to customize auto-configuration settings. + +| |Each slice restricts component scan to appropriate components and loads a very restricted set of auto-configuration classes.
If you need to exclude one of them, most `@…​Test` annotations provide an `excludeAutoConfiguration` attribute.
Alternatively, you can use `@ImportAutoConfiguration#exclude`.| +|---|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +| |Including multiple “slices” by using several `@…​Test` annotations in one test is not supported.
If you need multiple “slices”, pick one of the `@…​Test` annotations and include the `@AutoConfigure…​` annotations of the other “slices” by hand.| +|---|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +| |It is also possible to use the `@AutoConfigure…​` annotations with the standard `@SpringBootTest` annotation.
You can use this combination if you are not interested in “slicing” your application but you want some of the auto-configured test beans.| +|---|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +#### 8.3.12. Auto-configured JSON Tests + +To test that object JSON serialization and deserialization is working as expected, you can use the `@JsonTest` annotation.`@JsonTest` auto-configures the available supported JSON mapper, which can be one of the following libraries: + +* Jackson `ObjectMapper`, any `@JsonComponent` beans and any Jackson `Module`s + +* `Gson` + +* `Jsonb` + +| |A list of the auto-configurations that are enabled by `@JsonTest` can be [found in the appendix](test-auto-configuration.html#appendix.test-auto-configuration).| +|---|----------------------------------------------------------------------------------------------------------------------------------------------------------------| + +If you need to configure elements of the auto-configuration, you can use the `@AutoConfigureJsonTesters` annotation. + +Spring Boot includes AssertJ-based helpers that work with the JSONAssert and JsonPath libraries to check that JSON appears as expected. +The `JacksonTester`, `GsonTester`, `JsonbTester`, and `BasicJsonTester` classes can be used for Jackson, Gson, Jsonb, and Strings respectively. +Any helper fields on the test class can be `@Autowired` when using `@JsonTest`. +The following example shows a test class for Jackson: + +``` +import org.junit.jupiter.api.Test; + +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.boot.test.autoconfigure.json.JsonTest; +import org.springframework.boot.test.json.JacksonTester; + +import static org.assertj.core.api.Assertions.assertThat; + +@JsonTest +class MyJsonTests { + + @Autowired + private JacksonTester json; + + @Test + void serialize() throws Exception { + VehicleDetails details = new VehicleDetails("Honda", "Civic"); + // Assert against a `.json` file in the same package as the test + assertThat(this.json.write(details)).isEqualToJson("expected.json"); + // Or use JSON path based assertions + assertThat(this.json.write(details)).hasJsonPathStringValue("@.make"); + assertThat(this.json.write(details)).extractingJsonPathStringValue("@.make").isEqualTo("Honda"); + } + + @Test + void deserialize() throws Exception { + String content = "{\"make\":\"Ford\",\"model\":\"Focus\"}"; + assertThat(this.json.parse(content)).isEqualTo(new VehicleDetails("Ford", "Focus")); + assertThat(this.json.parseObject(content).getMake()).isEqualTo("Ford"); + } + +} + +``` + +| |JSON helper classes can also be used directly in standard unit tests.
To do so, call the `initFields` method of the helper in your `@Before` method if you do not use `@JsonTest`.| +|---|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +If you use Spring Boot’s AssertJ-based helpers to assert on a number value at a given JSON path, you might not be able to use `isEqualTo` depending on the type. +Instead, you can use AssertJ’s `satisfies` to assert that the value matches the given condition. +For instance, the following example asserts that the actual number is a float value close to `0.15` within an offset of `0.01`. + +``` +@Test +void someTest() throws Exception { + SomeObject value = new SomeObject(0.152f); + assertThat(this.json.write(value)).extractingJsonPathNumberValue("@.test.numberValue") + .satisfies((number) -> assertThat(number.floatValue()).isCloseTo(0.15f, within(0.01f))); +} + +``` + +#### 8.3.13. Auto-configured Spring MVC Tests + +To test whether Spring MVC controllers are working as expected, use the `@WebMvcTest` annotation.`@WebMvcTest` auto-configures the Spring MVC infrastructure and limits scanned beans to `@Controller`, `@ControllerAdvice`, `@JsonComponent`, `Converter`, `GenericConverter`, `Filter`, `HandlerInterceptor`, `WebMvcConfigurer`, `WebMvcRegistrations`, and `HandlerMethodArgumentResolver`. +Regular `@Component` and `@ConfigurationProperties` beans are not scanned when the `@WebMvcTest` annotation is used.`@EnableConfigurationProperties` can be used to include `@ConfigurationProperties` beans. + +| |A list of the auto-configuration settings that are enabled by `@WebMvcTest` can be [found in the appendix](test-auto-configuration.html#appendix.test-auto-configuration).| +|---|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +| |If you need to register extra components, such as the Jackson `Module`, you can import additional configuration classes by using `@Import` on your test.| +|---|--------------------------------------------------------------------------------------------------------------------------------------------------------| + +Often, `@WebMvcTest` is limited to a single controller and is used in combination with `@MockBean` to provide mock implementations for required collaborators. + +`@WebMvcTest` also auto-configures `MockMvc`. +Mock MVC offers a powerful way to quickly test MVC controllers without needing to start a full HTTP server. + +| |You can also auto-configure `MockMvc` in a non-`@WebMvcTest` (such as `@SpringBootTest`) by annotating it with `@AutoConfigureMockMvc`.
The following example uses `MockMvc`:| +|---|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +``` +import org.junit.jupiter.api.Test; + +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.boot.test.autoconfigure.web.servlet.WebMvcTest; +import org.springframework.boot.test.mock.mockito.MockBean; +import org.springframework.http.MediaType; +import org.springframework.test.web.servlet.MockMvc; + +import static org.mockito.BDDMockito.given; +import static org.springframework.test.web.servlet.request.MockMvcRequestBuilders.get; +import static org.springframework.test.web.servlet.result.MockMvcResultMatchers.content; +import static org.springframework.test.web.servlet.result.MockMvcResultMatchers.status; + +@WebMvcTest(UserVehicleController.class) +class MyControllerTests { + + @Autowired + private MockMvc mvc; + + @MockBean + private UserVehicleService userVehicleService; + + @Test + void testExample() throws Exception { + given(this.userVehicleService.getVehicleDetails("sboot")) + .willReturn(new VehicleDetails("Honda", "Civic")); + this.mvc.perform(get("/sboot/vehicle").accept(MediaType.TEXT_PLAIN)) + .andExpect(status().isOk()) + .andExpect(content().string("Honda Civic")); + } + +} + +``` + +| |If you need to configure elements of the auto-configuration (for example, when servlet filters should be applied) you can use attributes in the `@AutoConfigureMockMvc` annotation.| +|---|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +If you use HtmlUnit and Selenium, auto-configuration also provides an HtmlUnit `WebClient` bean and/or a Selenium `WebDriver` bean. +The following example uses HtmlUnit: + +``` +import com.gargoylesoftware.htmlunit.WebClient; +import com.gargoylesoftware.htmlunit.html.HtmlPage; +import org.junit.jupiter.api.Test; + +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.boot.test.autoconfigure.web.servlet.WebMvcTest; +import org.springframework.boot.test.mock.mockito.MockBean; + +import static org.assertj.core.api.Assertions.assertThat; +import static org.mockito.BDDMockito.given; + +@WebMvcTest(UserVehicleController.class) +class MyHtmlUnitTests { + + @Autowired + private WebClient webClient; + + @MockBean + private UserVehicleService userVehicleService; + + @Test + void testExample() throws Exception { + given(this.userVehicleService.getVehicleDetails("sboot")).willReturn(new VehicleDetails("Honda", "Civic")); + HtmlPage page = this.webClient.getPage("/sboot/vehicle.html"); + assertThat(page.getBody().getTextContent()).isEqualTo("Honda Civic"); + } + +} + +``` + +| |By default, Spring Boot puts `WebDriver` beans in a special “scope” to ensure that the driver exits after each test and that a new instance is injected.
If you do not want this behavior, you can add `@Scope("singleton")` to your `WebDriver` `@Bean` definition.| +|---|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +| |The `webDriver` scope created by Spring Boot will replace any user defined scope of the same name.
If you define your own `webDriver` scope you may find it stops working when you use `@WebMvcTest`.| +|---|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +If you have Spring Security on the classpath, `@WebMvcTest` will also scan `WebSecurityConfigurer` beans. +Instead of disabling security completely for such tests, you can use Spring Security’s test support. +More details on how to use Spring Security’s `MockMvc` support can be found in this *[howto.html](howto.html#howto.testing.with-spring-security)* how-to section. + +| |Sometimes writing Spring MVC tests is not enough; Spring Boot can help you run [full end-to-end tests with an actual server](#features.testing.spring-boot-applications.with-running-server).| +|---|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +#### 8.3.14. Auto-configured Spring WebFlux Tests + +To test that [Spring WebFlux](https://docs.spring.io/spring-framework/docs/5.3.16/reference/html/web-reactive.html) controllers are working as expected, you can use the `@WebFluxTest` annotation.`@WebFluxTest` auto-configures the Spring WebFlux infrastructure and limits scanned beans to `@Controller`, `@ControllerAdvice`, `@JsonComponent`, `Converter`, `GenericConverter`, `WebFilter`, and `WebFluxConfigurer`. +Regular `@Component` and `@ConfigurationProperties` beans are not scanned when the `@WebFluxTest` annotation is used.`@EnableConfigurationProperties` can be used to include `@ConfigurationProperties` beans. + +| |A list of the auto-configurations that are enabled by `@WebFluxTest` can be [found in the appendix](test-auto-configuration.html#appendix.test-auto-configuration).| +|---|-------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +| |If you need to register extra components, such as Jackson `Module`, you can import additional configuration classes using `@Import` on your test.| +|---|-------------------------------------------------------------------------------------------------------------------------------------------------| + +Often, `@WebFluxTest` is limited to a single controller and used in combination with the `@MockBean` annotation to provide mock implementations for required collaborators. + +`@WebFluxTest` also auto-configures [`WebTestClient`](https://docs.spring.io/spring-framework/docs/5.3.16/reference/html/testing.html#webtestclient), which offers a powerful way to quickly test WebFlux controllers without needing to start a full HTTP server. + +| |You can also auto-configure `WebTestClient` in a non-`@WebFluxTest` (such as `@SpringBootTest`) by annotating it with `@AutoConfigureWebTestClient`.
The following example shows a class that uses both `@WebFluxTest` and a `WebTestClient`:| +|---|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +``` +import org.junit.jupiter.api.Test; + +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.boot.test.autoconfigure.web.reactive.WebFluxTest; +import org.springframework.boot.test.mock.mockito.MockBean; +import org.springframework.http.MediaType; +import org.springframework.test.web.reactive.server.WebTestClient; + +import static org.mockito.BDDMockito.given; + +@WebFluxTest(UserVehicleController.class) +class MyControllerTests { + + @Autowired + private WebTestClient webClient; + + @MockBean + private UserVehicleService userVehicleService; + + @Test + void testExample() throws Exception { + given(this.userVehicleService.getVehicleDetails("sboot")) + .willReturn(new VehicleDetails("Honda", "Civic")); + this.webClient.get().uri("/sboot/vehicle").accept(MediaType.TEXT_PLAIN).exchange() + .expectStatus().isOk() + .expectBody(String.class).isEqualTo("Honda Civic"); + } + +} + +``` + +| |This setup is only supported by WebFlux applications as using `WebTestClient` in a mocked web application only works with WebFlux at the moment.| +|---|------------------------------------------------------------------------------------------------------------------------------------------------| + +| |`@WebFluxTest` cannot detect routes registered through the functional web framework.
For testing `RouterFunction` beans in the context, consider importing your `RouterFunction` yourself by using `@Import` or by using `@SpringBootTest`.| +|---|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +| |`@WebFluxTest` cannot detect custom security configuration registered as a `@Bean` of type `SecurityWebFilterChain`.
To include that in your test, you will need to import the configuration that registers the bean by using `@Import` or by using `@SpringBootTest`.| +|---|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +| |Sometimes writing Spring WebFlux tests is not enough; Spring Boot can help you run [full end-to-end tests with an actual server](#features.testing.spring-boot-applications.with-running-server).| +|---|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +#### 8.3.15. Auto-configured Data Cassandra Tests + +You can use `@DataCassandraTest` to test Cassandra applications. +By default, it configures a `CassandraTemplate`, scans for `@Table` classes, and configures Spring Data Cassandra repositories. +Regular `@Component` and `@ConfigurationProperties` beans are not scanned when the `@DataCassandraTest` annotation is used.`@EnableConfigurationProperties` can be used to include `@ConfigurationProperties` beans. +(For more about using Cassandra with Spring Boot, see "[data.html](data.html#data.nosql.cassandra)", earlier in this chapter.) + +| |A list of the auto-configuration settings that are enabled by `@DataCassandraTest` can be [found in the appendix](test-auto-configuration.html#appendix.test-auto-configuration).| +|---|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +The following example shows a typical setup for using Cassandra tests in Spring Boot: + +``` +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.boot.test.autoconfigure.data.cassandra.DataCassandraTest; + +@DataCassandraTest +class MyDataCassandraTests { + + @Autowired + private SomeRepository repository; + +} + +``` + +#### 8.3.16. Auto-configured Data JPA Tests + +You can use the `@DataJpaTest` annotation to test JPA applications. +By default, it scans for `@Entity` classes and configures Spring Data JPA repositories. +If an embedded database is available on the classpath, it configures one as well. +SQL queries are logged by default by setting the `spring.jpa.show-sql` property to `true`. +This can be disabled using the `showSql()` attribute of the annotation. + +Regular `@Component` and `@ConfigurationProperties` beans are not scanned when the `@DataJpaTest` annotation is used.`@EnableConfigurationProperties` can be used to include `@ConfigurationProperties` beans. + +| |A list of the auto-configuration settings that are enabled by `@DataJpaTest` can be [found in the appendix](test-auto-configuration.html#appendix.test-auto-configuration).| +|---|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +By default, data JPA tests are transactional and roll back at the end of each test. +See the [relevant section](https://docs.spring.io/spring-framework/docs/5.3.16/reference/html/testing.html#testcontext-tx-enabling-transactions) in the Spring Framework Reference Documentation for more details. +If that is not what you want, you can disable transaction management for a test or for the whole class as follows: + +``` +import org.springframework.boot.test.autoconfigure.orm.jpa.DataJpaTest; +import org.springframework.transaction.annotation.Propagation; +import org.springframework.transaction.annotation.Transactional; + +@DataJpaTest +@Transactional(propagation = Propagation.NOT_SUPPORTED) +class MyNonTransactionalTests { + + // ... + +} + +``` + +Data JPA tests may also inject a [`TestEntityManager`](https://github.com/spring-projects/spring-boot/tree/v2.6.4/spring-boot-project/spring-boot-test-autoconfigure/src/main/java/org/springframework/boot/test/autoconfigure/orm/jpa/TestEntityManager.java) bean, which provides an alternative to the standard JPA `EntityManager` that is specifically designed for tests. + +| |`TestEntityManager` can also be auto-configured to any of your Spring-based test class by adding `@AutoConfigureTestEntityManager`.
When doing so, make sure that your test is running in a transaction, for instance by adding `@Transactional` on your test class or method.| +|---|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +A `JdbcTemplate` is also available if you need that. +The following example shows the `@DataJpaTest` annotation in use: + +``` +import org.junit.jupiter.api.Test; + +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.boot.test.autoconfigure.orm.jpa.DataJpaTest; +import org.springframework.boot.test.autoconfigure.orm.jpa.TestEntityManager; + +import static org.assertj.core.api.Assertions.assertThat; + +@DataJpaTest +class MyRepositoryTests { + + @Autowired + private TestEntityManager entityManager; + + @Autowired + private UserRepository repository; + + @Test + void testExample() throws Exception { + this.entityManager.persist(new User("sboot", "1234")); + User user = this.repository.findByUsername("sboot"); + assertThat(user.getUsername()).isEqualTo("sboot"); + assertThat(user.getEmployeeNumber()).isEqualTo("1234"); + } + +} + +``` + +In-memory embedded databases generally work well for tests, since they are fast and do not require any installation. +If, however, you prefer to run tests against a real database you can use the `@AutoConfigureTestDatabase` annotation, as shown in the following example: + +``` +import org.springframework.boot.test.autoconfigure.jdbc.AutoConfigureTestDatabase; +import org.springframework.boot.test.autoconfigure.jdbc.AutoConfigureTestDatabase.Replace; +import org.springframework.boot.test.autoconfigure.orm.jpa.DataJpaTest; + +@DataJpaTest +@AutoConfigureTestDatabase(replace = Replace.NONE) +class MyRepositoryTests { + + // ... + +} + +``` + +#### 8.3.17. Auto-configured JDBC Tests + +`@JdbcTest` is similar to `@DataJpaTest` but is for tests that only require a `DataSource` and do not use Spring Data JDBC. +By default, it configures an in-memory embedded database and a `JdbcTemplate`. +Regular `@Component` and `@ConfigurationProperties` beans are not scanned when the `@JdbcTest` annotation is used.`@EnableConfigurationProperties` can be used to include `@ConfigurationProperties` beans. + +| |A list of the auto-configurations that are enabled by `@JdbcTest` can be [found in the appendix](test-auto-configuration.html#appendix.test-auto-configuration).| +|---|----------------------------------------------------------------------------------------------------------------------------------------------------------------| + +By default, JDBC tests are transactional and roll back at the end of each test. +See the [relevant section](https://docs.spring.io/spring-framework/docs/5.3.16/reference/html/testing.html#testcontext-tx-enabling-transactions) in the Spring Framework Reference Documentation for more details. +If that is not what you want, you can disable transaction management for a test or for the whole class, as follows: + +``` +import org.springframework.boot.test.autoconfigure.jdbc.JdbcTest; +import org.springframework.transaction.annotation.Propagation; +import org.springframework.transaction.annotation.Transactional; + +@JdbcTest +@Transactional(propagation = Propagation.NOT_SUPPORTED) +class MyTransactionalTests { + +} + +``` + +If you prefer your test to run against a real database, you can use the `@AutoConfigureTestDatabase` annotation in the same way as for `DataJpaTest`. +(See "[Auto-configured Data JPA Tests](#features.testing.spring-boot-applications.autoconfigured-spring-data-jpa)".) + +#### 8.3.18. Auto-configured Data JDBC Tests + +`@DataJdbcTest` is similar to `@JdbcTest` but is for tests that use Spring Data JDBC repositories. +By default, it configures an in-memory embedded database, a `JdbcTemplate`, and Spring Data JDBC repositories. +Regular `@Component` and `@ConfigurationProperties` beans are not scanned when the `@DataJdbcTest` annotation is used.`@EnableConfigurationProperties` can be used to include `@ConfigurationProperties` beans. + +| |A list of the auto-configurations that are enabled by `@DataJdbcTest` can be [found in the appendix](test-auto-configuration.html#appendix.test-auto-configuration).| +|---|--------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +By default, Data JDBC tests are transactional and roll back at the end of each test. +See the [relevant section](https://docs.spring.io/spring-framework/docs/5.3.16/reference/html/testing.html#testcontext-tx-enabling-transactions) in the Spring Framework Reference Documentation for more details. +If that is not what you want, you can disable transaction management for a test or for the whole test class as [shown in the JDBC example](#features.testing.spring-boot-applications.autoconfigured-jdbc). + +If you prefer your test to run against a real database, you can use the `@AutoConfigureTestDatabase` annotation in the same way as for `DataJpaTest`. +(See "[Auto-configured Data JPA Tests](#features.testing.spring-boot-applications.autoconfigured-spring-data-jpa)".) + +#### 8.3.19. Auto-configured jOOQ Tests + +You can use `@JooqTest` in a similar fashion as `@JdbcTest` but for jOOQ-related tests. +As jOOQ relies heavily on a Java-based schema that corresponds with the database schema, the existing `DataSource` is used. +If you want to replace it with an in-memory database, you can use `@AutoConfigureTestDatabase` to override those settings. +(For more about using jOOQ with Spring Boot, see "[data.html](data.html#data.sql.jooq)", earlier in this chapter.) +Regular `@Component` and `@ConfigurationProperties` beans are not scanned when the `@JooqTest` annotation is used.`@EnableConfigurationProperties` can be used to include `@ConfigurationProperties` beans. + +| |A list of the auto-configurations that are enabled by `@JooqTest` can be [found in the appendix](test-auto-configuration.html#appendix.test-auto-configuration).| +|---|----------------------------------------------------------------------------------------------------------------------------------------------------------------| + +`@JooqTest` configures a `DSLContext`. +The following example shows the `@JooqTest` annotation in use: + +``` +import org.jooq.DSLContext; + +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.boot.test.autoconfigure.jooq.JooqTest; + +@JooqTest +class MyJooqTests { + + @Autowired + private DSLContext dslContext; + + // ... + +} + +``` + +JOOQ tests are transactional and roll back at the end of each test by default. +If that is not what you want, you can disable transaction management for a test or for the whole test class as [shown in the JDBC example](#features.testing.spring-boot-applications.autoconfigured-jdbc). + +#### 8.3.20. Auto-configured Data MongoDB Tests + +You can use `@DataMongoTest` to test MongoDB applications. +By default, it configures an in-memory embedded MongoDB (if available), configures a `MongoTemplate`, scans for `@Document` classes, and configures Spring Data MongoDB repositories. +Regular `@Component` and `@ConfigurationProperties` beans are not scanned when the `@DataMongoTest` annotation is used.`@EnableConfigurationProperties` can be used to include `@ConfigurationProperties` beans. +(For more about using MongoDB with Spring Boot, see "[data.html](data.html#data.nosql.mongodb)", earlier in this chapter.) + +| |A list of the auto-configuration settings that are enabled by `@DataMongoTest` can be [found in the appendix](test-auto-configuration.html#appendix.test-auto-configuration).| +|---|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +The following class shows the `@DataMongoTest` annotation in use: + +``` +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.boot.test.autoconfigure.data.mongo.DataMongoTest; +import org.springframework.data.mongodb.core.MongoTemplate; + +@DataMongoTest +class MyDataMongoDbTests { + + @Autowired + private MongoTemplate mongoTemplate; + + // ... + +} + +``` + +In-memory embedded MongoDB generally works well for tests, since it is fast and does not require any developer installation. +If, however, you prefer to run tests against a real MongoDB server, you should exclude the embedded MongoDB auto-configuration, as shown in the following example: + +``` +import org.springframework.boot.autoconfigure.mongo.embedded.EmbeddedMongoAutoConfiguration; +import org.springframework.boot.test.autoconfigure.data.mongo.DataMongoTest; + +@DataMongoTest(excludeAutoConfiguration = EmbeddedMongoAutoConfiguration.class) +class MyDataMongoDbTests { + + // ... + +} + +``` + +#### 8.3.21. Auto-configured Data Neo4j Tests + +You can use `@DataNeo4jTest` to test Neo4j applications. +By default, it scans for `@Node` classes, and configures Spring Data Neo4j repositories. +Regular `@Component` and `@ConfigurationProperties` beans are not scanned when the `@DataNeo4jTest` annotation is used.`@EnableConfigurationProperties` can be used to include `@ConfigurationProperties` beans. +(For more about using Neo4J with Spring Boot, see "[data.html](data.html#data.nosql.neo4j)", earlier in this chapter.) + +| |A list of the auto-configuration settings that are enabled by `@DataNeo4jTest` can be [found in the appendix](test-auto-configuration.html#appendix.test-auto-configuration).| +|---|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +The following example shows a typical setup for using Neo4J tests in Spring Boot: + +``` +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.boot.test.autoconfigure.data.neo4j.DataNeo4jTest; + +@DataNeo4jTest +class MyDataNeo4jTests { + + @Autowired + private SomeRepository repository; + + // ... + +} + +``` + +By default, Data Neo4j tests are transactional and roll back at the end of each test. +See the [relevant section](https://docs.spring.io/spring-framework/docs/5.3.16/reference/html/testing.html#testcontext-tx-enabling-transactions) in the Spring Framework Reference Documentation for more details. +If that is not what you want, you can disable transaction management for a test or for the whole class, as follows: + +``` +import org.springframework.boot.test.autoconfigure.data.neo4j.DataNeo4jTest; +import org.springframework.transaction.annotation.Propagation; +import org.springframework.transaction.annotation.Transactional; + +@DataNeo4jTest +@Transactional(propagation = Propagation.NOT_SUPPORTED) +class MyDataNeo4jTests { + +} + +``` + +| |Transactional tests are not supported with reactive access.
If you are using this style, you must configure `@DataNeo4jTest` tests as described above.| +|---|----------------------------------------------------------------------------------------------------------------------------------------------------------| + +#### 8.3.22. Auto-configured Data Redis Tests + +You can use `@DataRedisTest` to test Redis applications. +By default, it scans for `@RedisHash` classes and configures Spring Data Redis repositories. +Regular `@Component` and `@ConfigurationProperties` beans are not scanned when the `@DataRedisTest` annotation is used.`@EnableConfigurationProperties` can be used to include `@ConfigurationProperties` beans. +(For more about using Redis with Spring Boot, see "[data.html](data.html#data.nosql.redis)", earlier in this chapter.) + +| |A list of the auto-configuration settings that are enabled by `@DataRedisTest` can be [found in the appendix](test-auto-configuration.html#appendix.test-auto-configuration).| +|---|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +The following example shows the `@DataRedisTest` annotation in use: + +``` +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.boot.test.autoconfigure.data.redis.DataRedisTest; + +@DataRedisTest +class MyDataRedisTests { + + @Autowired + private SomeRepository repository; + + // ... + +} + +``` + +#### 8.3.23. Auto-configured Data LDAP Tests + +You can use `@DataLdapTest` to test LDAP applications. +By default, it configures an in-memory embedded LDAP (if available), configures an `LdapTemplate`, scans for `@Entry` classes, and configures Spring Data LDAP repositories. +Regular `@Component` and `@ConfigurationProperties` beans are not scanned when the `@DataLdapTest` annotation is used.`@EnableConfigurationProperties` can be used to include `@ConfigurationProperties` beans. +(For more about using LDAP with Spring Boot, see "[data.html](data.html#data.nosql.ldap)", earlier in this chapter.) + +| |A list of the auto-configuration settings that are enabled by `@DataLdapTest` can be [found in the appendix](test-auto-configuration.html#appendix.test-auto-configuration).| +|---|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +The following example shows the `@DataLdapTest` annotation in use: + +``` +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.boot.test.autoconfigure.data.ldap.DataLdapTest; +import org.springframework.ldap.core.LdapTemplate; + +@DataLdapTest +class MyDataLdapTests { + + @Autowired + private LdapTemplate ldapTemplate; + + // ... + +} + +``` + +In-memory embedded LDAP generally works well for tests, since it is fast and does not require any developer installation. +If, however, you prefer to run tests against a real LDAP server, you should exclude the embedded LDAP auto-configuration, as shown in the following example: + +``` +import org.springframework.boot.autoconfigure.ldap.embedded.EmbeddedLdapAutoConfiguration; +import org.springframework.boot.test.autoconfigure.data.ldap.DataLdapTest; + +@DataLdapTest(excludeAutoConfiguration = EmbeddedLdapAutoConfiguration.class) +class MyDataLdapTests { + + // ... + +} + +``` + +#### 8.3.24. Auto-configured REST Clients + +You can use the `@RestClientTest` annotation to test REST clients. +By default, it auto-configures Jackson, GSON, and Jsonb support, configures a `RestTemplateBuilder`, and adds support for `MockRestServiceServer`. +Regular `@Component` and `@ConfigurationProperties` beans are not scanned when the `@RestClientTest` annotation is used.`@EnableConfigurationProperties` can be used to include `@ConfigurationProperties` beans. + +| |A list of the auto-configuration settings that are enabled by `@RestClientTest` can be [found in the appendix](test-auto-configuration.html#appendix.test-auto-configuration).| +|---|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +The specific beans that you want to test should be specified by using the `value` or `components` attribute of `@RestClientTest`, as shown in the following example: + +``` +import org.junit.jupiter.api.Test; + +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.boot.test.autoconfigure.web.client.RestClientTest; +import org.springframework.http.MediaType; +import org.springframework.test.web.client.MockRestServiceServer; + +import static org.assertj.core.api.Assertions.assertThat; +import static org.springframework.test.web.client.match.MockRestRequestMatchers.requestTo; +import static org.springframework.test.web.client.response.MockRestResponseCreators.withSuccess; + +@RestClientTest(RemoteVehicleDetailsService.class) +class MyRestClientTests { + + @Autowired + private RemoteVehicleDetailsService service; + + @Autowired + private MockRestServiceServer server; + + @Test + void getVehicleDetailsWhenResultIsSuccessShouldReturnDetails() throws Exception { + this.server.expect(requestTo("/greet/details")).andRespond(withSuccess("hello", MediaType.TEXT_PLAIN)); + String greeting = this.service.callRestService(); + assertThat(greeting).isEqualTo("hello"); + } + +} + +``` + +#### 8.3.25. Auto-configured Spring REST Docs Tests + +You can use the `@AutoConfigureRestDocs` annotation to use [Spring REST Docs](https://spring.io/projects/spring-restdocs) in your tests with Mock MVC, REST Assured, or WebTestClient. +It removes the need for the JUnit extension in Spring REST Docs. + +`@AutoConfigureRestDocs` can be used to override the default output directory (`target/generated-snippets` if you are using Maven or `build/generated-snippets` if you are using Gradle). +It can also be used to configure the host, scheme, and port that appears in any documented URIs. + +##### Auto-configured Spring REST Docs Tests with Mock MVC + +`@AutoConfigureRestDocs` customizes the `MockMvc` bean to use Spring REST Docs when testing servlet-based web applications. +You can inject it by using `@Autowired` and use it in your tests as you normally would when using Mock MVC and Spring REST Docs, as shown in the following example: + +``` +import org.junit.jupiter.api.Test; + +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.boot.test.autoconfigure.restdocs.AutoConfigureRestDocs; +import org.springframework.boot.test.autoconfigure.web.servlet.WebMvcTest; +import org.springframework.http.MediaType; +import org.springframework.test.web.servlet.MockMvc; + +import static org.springframework.restdocs.mockmvc.MockMvcRestDocumentation.document; +import static org.springframework.test.web.servlet.request.MockMvcRequestBuilders.get; +import static org.springframework.test.web.servlet.result.MockMvcResultMatchers.status; + +@WebMvcTest(UserController.class) +@AutoConfigureRestDocs +class MyUserDocumentationTests { + + @Autowired + private MockMvc mvc; + + @Test + void listUsers() throws Exception { + this.mvc.perform(get("/users").accept(MediaType.TEXT_PLAIN)) + .andExpect(status().isOk()) + .andDo(document("list-users")); + } + +} + +``` + +If you require more control over Spring REST Docs configuration than offered by the attributes of `@AutoConfigureRestDocs`, you can use a `RestDocsMockMvcConfigurationCustomizer` bean, as shown in the following example: + +``` +import org.springframework.boot.test.autoconfigure.restdocs.RestDocsMockMvcConfigurationCustomizer; +import org.springframework.boot.test.context.TestConfiguration; +import org.springframework.restdocs.mockmvc.MockMvcRestDocumentationConfigurer; +import org.springframework.restdocs.templates.TemplateFormats; + +@TestConfiguration(proxyBeanMethods = false) +public class MyRestDocsConfiguration implements RestDocsMockMvcConfigurationCustomizer { + + @Override + public void customize(MockMvcRestDocumentationConfigurer configurer) { + configurer.snippets().withTemplateFormat(TemplateFormats.markdown()); + } + +} + +``` + +If you want to make use of Spring REST Docs support for a parameterized output directory, you can create a `RestDocumentationResultHandler` bean. +The auto-configuration calls `alwaysDo` with this result handler, thereby causing each `MockMvc` call to automatically generate the default snippets. +The following example shows a `RestDocumentationResultHandler` being defined: + +``` +import org.springframework.boot.test.context.TestConfiguration; +import org.springframework.context.annotation.Bean; +import org.springframework.restdocs.mockmvc.MockMvcRestDocumentation; +import org.springframework.restdocs.mockmvc.RestDocumentationResultHandler; + +@TestConfiguration(proxyBeanMethods = false) +public class MyResultHandlerConfiguration { + + @Bean + public RestDocumentationResultHandler restDocumentation() { + return MockMvcRestDocumentation.document("{method-name}"); + } + +} + +``` + +##### Auto-configured Spring REST Docs Tests with WebTestClient + +`@AutoConfigureRestDocs` can also be used with `WebTestClient` when testing reactive web applications. +You can inject it by using `@Autowired` and use it in your tests as you normally would when using `@WebFluxTest` and Spring REST Docs, as shown in the following example: + +``` +import org.junit.jupiter.api.Test; + +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.boot.test.autoconfigure.restdocs.AutoConfigureRestDocs; +import org.springframework.boot.test.autoconfigure.web.reactive.WebFluxTest; +import org.springframework.test.web.reactive.server.WebTestClient; + +import static org.springframework.restdocs.webtestclient.WebTestClientRestDocumentation.document; + +@WebFluxTest +@AutoConfigureRestDocs +class MyUsersDocumentationTests { + + @Autowired + private WebTestClient webTestClient; + + @Test + void listUsers() { + this.webTestClient + .get().uri("/") + .exchange() + .expectStatus() + .isOk() + .expectBody() + .consumeWith(document("list-users")); + } + +} + +``` + +If you require more control over Spring REST Docs configuration than offered by the attributes of `@AutoConfigureRestDocs`, you can use a `RestDocsWebTestClientConfigurationCustomizer` bean, as shown in the following example: + +``` +import org.springframework.boot.test.autoconfigure.restdocs.RestDocsWebTestClientConfigurationCustomizer; +import org.springframework.boot.test.context.TestConfiguration; +import org.springframework.restdocs.webtestclient.WebTestClientRestDocumentationConfigurer; + +@TestConfiguration(proxyBeanMethods = false) +public class MyRestDocsConfiguration implements RestDocsWebTestClientConfigurationCustomizer { + + @Override + public void customize(WebTestClientRestDocumentationConfigurer configurer) { + configurer.snippets().withEncoding("UTF-8"); + } + +} + +``` + +If you want to make use of Spring REST Docs support for a parameterized output directory, you can use a `WebTestClientBuilderCustomizer` to configure a consumer for every entity exchange result. +The following example shows such a `WebTestClientBuilderCustomizer` being defined: + +``` +import org.springframework.boot.test.context.TestConfiguration; +import org.springframework.boot.test.web.reactive.server.WebTestClientBuilderCustomizer; +import org.springframework.context.annotation.Bean; + +import static org.springframework.restdocs.webtestclient.WebTestClientRestDocumentation.document; + +@TestConfiguration(proxyBeanMethods = false) +public class MyWebTestClientBuilderCustomizerConfiguration { + + @Bean + public WebTestClientBuilderCustomizer restDocumentation() { + return (builder) -> builder.entityExchangeResultConsumer(document("{method-name}")); + } + +} + +``` + +##### Auto-configured Spring REST Docs Tests with REST Assured + +`@AutoConfigureRestDocs` makes a `RequestSpecification` bean, preconfigured to use Spring REST Docs, available to your tests. +You can inject it by using `@Autowired` and use it in your tests as you normally would when using REST Assured and Spring REST Docs, as shown in the following example: + +``` +import io.restassured.specification.RequestSpecification; +import org.junit.jupiter.api.Test; + +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.boot.test.autoconfigure.restdocs.AutoConfigureRestDocs; +import org.springframework.boot.test.context.SpringBootTest; +import org.springframework.boot.test.context.SpringBootTest.WebEnvironment; +import org.springframework.boot.web.server.LocalServerPort; + +import static io.restassured.RestAssured.given; +import static org.hamcrest.Matchers.is; +import static org.springframework.restdocs.restassured3.RestAssuredRestDocumentation.document; + +@SpringBootTest(webEnvironment = WebEnvironment.RANDOM_PORT) +@AutoConfigureRestDocs +class MyUserDocumentationTests { + + @Test + void listUsers(@Autowired RequestSpecification documentationSpec, @LocalServerPort int port) { + given(documentationSpec) + .filter(document("list-users")) + .when() + .port(port) + .get("/") + .then().assertThat() + .statusCode(is(200)); + } + +} + +``` + +If you require more control over Spring REST Docs configuration than offered by the attributes of `@AutoConfigureRestDocs`, a `RestDocsRestAssuredConfigurationCustomizer` bean can be used, as shown in the following example: + +``` +import org.springframework.boot.test.autoconfigure.restdocs.RestDocsRestAssuredConfigurationCustomizer; +import org.springframework.boot.test.context.TestConfiguration; +import org.springframework.restdocs.restassured3.RestAssuredRestDocumentationConfigurer; +import org.springframework.restdocs.templates.TemplateFormats; + +@TestConfiguration(proxyBeanMethods = false) +public class MyRestDocsConfiguration implements RestDocsRestAssuredConfigurationCustomizer { + + @Override + public void customize(RestAssuredRestDocumentationConfigurer configurer) { + configurer.snippets().withTemplateFormat(TemplateFormats.markdown()); + } + +} + +``` + +#### 8.3.26. Auto-configured Spring Web Services Tests + +##### Auto-configured Spring Web Services Client Tests + +You can use `@WebServiceClientTest` to test applications that call web services using the Spring Web Services project. +By default, it configures a mock `WebServiceServer` bean and automatically customizes your `WebServiceTemplateBuilder`. +(For more about using Web Services with Spring Boot, see "[io.html](io.html#io.webservices)", earlier in this chapter.) + +| |A list of the auto-configuration settings that are enabled by `@WebServiceClientTest` can be [found in the appendix](test-auto-configuration.html#appendix.test-auto-configuration).| +|---|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +The following example shows the `@WebServiceClientTest` annotation in use: + +``` +import org.junit.jupiter.api.Test; + +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.boot.test.autoconfigure.webservices.client.WebServiceClientTest; +import org.springframework.ws.test.client.MockWebServiceServer; +import org.springframework.xml.transform.StringSource; + +import static org.assertj.core.api.Assertions.assertThat; +import static org.springframework.ws.test.client.RequestMatchers.payload; +import static org.springframework.ws.test.client.ResponseCreators.withPayload; + +@WebServiceClientTest(SomeWebService.class) +class MyWebServiceClientTests { + + @Autowired + private MockWebServiceServer server; + + @Autowired + private SomeWebService someWebService; + + @Test + void mockServerCall() { + this.server + .expect(payload(new StringSource(""))) + .andRespond(withPayload(new StringSource("200"))); + assertThat(this.someWebService.test()) + .extracting(Response::getStatus) + .isEqualTo(200); + } + +} + +``` + +##### Auto-configured Spring Web Services Server Tests + +You can use `@WebServiceServerTest` to test applications that implement web services using the Spring Web Services project. +By default, it configures a `MockWebServiceClient` bean that can be used to call your web service endpoints. +(For more about using Web Services with Spring Boot, see "[io.html](io.html#io.webservices)", earlier in this chapter.) + +| |A list of the auto-configuration settings that are enabled by `@WebServiceServerTest` can be [found in the appendix](test-auto-configuration.html#appendix.test-auto-configuration).| +|---|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +The following example shows the `@WebServiceServerTest` annotation in use: + +``` +import org.junit.jupiter.api.Test; + +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.boot.test.autoconfigure.webservices.server.WebServiceServerTest; +import org.springframework.ws.test.server.MockWebServiceClient; +import org.springframework.ws.test.server.RequestCreators; +import org.springframework.ws.test.server.ResponseMatchers; +import org.springframework.xml.transform.StringSource; + +@WebServiceServerTest(ExampleEndpoint.class) +class MyWebServiceServerTests { + + @Autowired + private MockWebServiceClient client; + + @Test + void mockServerCall() { + this.client + .sendRequest(RequestCreators.withPayload(new StringSource(""))) + .andExpect(ResponseMatchers.payload(new StringSource("42"))); + } + +} + +``` + +#### 8.3.27. Additional Auto-configuration and Slicing + +Each slice provides one or more `@AutoConfigure…​` annotations that namely defines the auto-configurations that should be included as part of a slice. +Additional auto-configurations can be added on a test-by-test basis by creating a custom `@AutoConfigure…​` annotation or by adding `@ImportAutoConfiguration` to the test as shown in the following example: + +``` +import org.springframework.boot.autoconfigure.ImportAutoConfiguration; +import org.springframework.boot.autoconfigure.integration.IntegrationAutoConfiguration; +import org.springframework.boot.test.autoconfigure.jdbc.JdbcTest; + +@JdbcTest +@ImportAutoConfiguration(IntegrationAutoConfiguration.class) +class MyJdbcTests { + +} + +``` + +| |Make sure to not use the regular `@Import` annotation to import auto-configurations as they are handled in a specific way by Spring Boot.| +|---|-----------------------------------------------------------------------------------------------------------------------------------------| + +Alternatively, additional auto-configurations can be added for any use of a slice annotation by registering them in `META-INF/spring.factories` as shown in the following example: + +``` +org.springframework.boot.test.autoconfigure.jdbc.JdbcTest=com.example.IntegrationAutoConfiguration +``` + +| |A slice or `@AutoConfigure…​` annotation can be customized this way as long as it is meta-annotated with `@ImportAutoConfiguration`.| +|---|------------------------------------------------------------------------------------------------------------------------------------| + +#### 8.3.28. User Configuration and Slicing + +If you [structure your code](using.html#using.structuring-your-code) in a sensible way, your `@SpringBootApplication` class is [used by default](#features.testing.spring-boot-applications.detecting-configuration) as the configuration of your tests. + +It then becomes important not to litter the application’s main class with configuration settings that are specific to a particular area of its functionality. + +Assume that you are using Spring Batch and you rely on the auto-configuration for it. +You could define your `@SpringBootApplication` as follows: + +``` +import org.springframework.batch.core.configuration.annotation.EnableBatchProcessing; +import org.springframework.boot.autoconfigure.SpringBootApplication; + +@SpringBootApplication +@EnableBatchProcessing +public class MyApplication { + + // ... + +} + +``` + +Because this class is the source configuration for the test, any slice test actually tries to start Spring Batch, which is definitely not what you want to do. +A recommended approach is to move that area-specific configuration to a separate `@Configuration` class at the same level as your application, as shown in the following example: + +``` +import org.springframework.batch.core.configuration.annotation.EnableBatchProcessing; +import org.springframework.context.annotation.Configuration; + +@Configuration(proxyBeanMethods = false) +@EnableBatchProcessing +public class MyBatchConfiguration { + + // ... + +} + +``` + +| |Depending on the complexity of your application, you may either have a single `@Configuration` class for your customizations or one class per domain area.
The latter approach lets you enable it in one of your tests, if necessary, with the `@Import` annotation.| +|---|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +Test slices exclude `@Configuration` classes from scanning. +For example, for a `@WebMvcTest`, the following configuration will not include the given `WebMvcConfigurer` bean in the application context loaded by the test slice: + +``` +import org.springframework.context.annotation.Bean; +import org.springframework.context.annotation.Configuration; +import org.springframework.web.servlet.config.annotation.WebMvcConfigurer; + +@Configuration(proxyBeanMethods = false) +public class MyWebConfiguration { + + @Bean + public WebMvcConfigurer testConfigurer() { + return new WebMvcConfigurer() { + // ... + }; + } + +} + +``` + +The configuration below will, however, cause the custom `WebMvcConfigurer` to be loaded by the test slice. + +``` +import org.springframework.stereotype.Component; +import org.springframework.web.servlet.config.annotation.WebMvcConfigurer; + +@Component +public class MyWebMvcConfigurer implements WebMvcConfigurer { + + // ... + +} + +``` + +Another source of confusion is classpath scanning. +Assume that, while you structured your code in a sensible way, you need to scan an additional package. +Your application may resemble the following code: + +``` +import org.springframework.boot.autoconfigure.SpringBootApplication; +import org.springframework.context.annotation.ComponentScan; + +@SpringBootApplication +@ComponentScan({ "com.example.app", "com.example.another" }) +public class MyApplication { + + // ... + +} + +``` + +Doing so effectively overrides the default component scan directive with the side effect of scanning those two packages regardless of the slice that you chose. +For instance, a `@DataJpaTest` seems to suddenly scan components and user configurations of your application. +Again, moving the custom directive to a separate class is a good way to fix this issue. + +| |If this is not an option for you, you can create a `@SpringBootConfiguration` somewhere in the hierarchy of your test so that it is used instead.
Alternatively, you can specify a source for your test, which disables the behavior of finding a default one.| +|---|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +#### 8.3.29. Using Spock to Test Spring Boot Applications + +Spock 2.x can be used to test a Spring Boot application. +To do so, add a dependency on Spock’s `spock-spring` module to your application’s build.`spock-spring` integrates Spring’s test framework into Spock. +See [the documentation for Spock’s Spring module](https://spockframework.org/spock/docs/2.0/modules.html#_spring_module) for further details. + +### 8.4. Test Utilities + +A few test utility classes that are generally useful when testing your application are packaged as part of `spring-boot`. + +#### 8.4.1. ConfigDataApplicationContextInitializer + +`ConfigDataApplicationContextInitializer` is an `ApplicationContextInitializer` that you can apply to your tests to load Spring Boot `application.properties` files. +You can use it when you do not need the full set of features provided by `@SpringBootTest`, as shown in the following example: + +``` +import org.springframework.boot.test.context.ConfigDataApplicationContextInitializer; +import org.springframework.test.context.ContextConfiguration; + +@ContextConfiguration(classes = Config.class, initializers = ConfigDataApplicationContextInitializer.class) +class MyConfigFileTests { + + // ... + +} + +``` + +| |Using `ConfigDataApplicationContextInitializer` alone does not provide support for `@Value("${…​}")` injection.
Its only job is to ensure that `application.properties` files are loaded into Spring’s `Environment`.
For `@Value` support, you need to either additionally configure a `PropertySourcesPlaceholderConfigurer` or use `@SpringBootTest`, which auto-configures one for you.| +|---|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +#### 8.4.2. TestPropertyValues + +`TestPropertyValues` lets you quickly add properties to a `ConfigurableEnvironment` or `ConfigurableApplicationContext`. +You can call it with `key=value` strings, as follows: + +``` +import org.junit.jupiter.api.Test; + +import org.springframework.boot.test.util.TestPropertyValues; +import org.springframework.mock.env.MockEnvironment; + +import static org.assertj.core.api.Assertions.assertThat; + +class MyEnvironmentTests { + + @Test + void testPropertySources() { + MockEnvironment environment = new MockEnvironment(); + TestPropertyValues.of("org=Spring", "name=Boot").applyTo(environment); + assertThat(environment.getProperty("name")).isEqualTo("Boot"); + } + +} + +``` + +#### 8.4.3. OutputCapture + +`OutputCapture` is a JUnit `Extension` that you can use to capture `System.out` and `System.err` output. +To use add `@ExtendWith(OutputCaptureExtension.class)` and inject `CapturedOutput` as an argument to your test class constructor or test method as follows: + +``` +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.extension.ExtendWith; + +import org.springframework.boot.test.system.CapturedOutput; +import org.springframework.boot.test.system.OutputCaptureExtension; + +import static org.assertj.core.api.Assertions.assertThat; + +@ExtendWith(OutputCaptureExtension.class) +class MyOutputCaptureTests { + + @Test + void testName(CapturedOutput output) { + System.out.println("Hello World!"); + assertThat(output).contains("World"); + } + +} + +``` + +#### 8.4.4. TestRestTemplate + +`TestRestTemplate` is a convenience alternative to Spring’s `RestTemplate` that is useful in integration tests. +You can get a vanilla template or one that sends Basic HTTP authentication (with a username and password). +In either case, the template is fault tolerant. +This means that it behaves in a test-friendly way by not throwing exceptions on 4xx and 5xx errors. +Instead, such errors can be detected through the returned `ResponseEntity` and its status code. + +| |Spring Framework 5.0 provides a new `WebTestClient` that works for [WebFlux integration tests](#features.testing.spring-boot-applications.spring-webflux-tests) and both [WebFlux and MVC end-to-end testing](#features.testing.spring-boot-applications.with-running-server).
It provides a fluent API for assertions, unlike `TestRestTemplate`.| +|---|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +It is recommended, but not mandatory, to use the Apache HTTP Client (version 4.3.2 or better). +If you have that on your classpath, the `TestRestTemplate` responds by configuring the client appropriately. +If you do use Apache’s HTTP client, some additional test-friendly features are enabled: + +* Redirects are not followed (so you can assert the response location). + +* Cookies are ignored (so the template is stateless). + +`TestRestTemplate` can be instantiated directly in your integration tests, as shown in the following example: + +``` +import org.junit.jupiter.api.Test; + +import org.springframework.boot.test.web.client.TestRestTemplate; +import org.springframework.http.ResponseEntity; + +import static org.assertj.core.api.Assertions.assertThat; + +class MyTests { + + private TestRestTemplate template = new TestRestTemplate(); + + @Test + void testRequest() throws Exception { + ResponseEntity headers = this.template.getForEntity("https://myhost.example.com/example", String.class); + assertThat(headers.getHeaders().getLocation()).hasHost("other.example.com"); + } + +} + +``` + +Alternatively, if you use the `@SpringBootTest` annotation with `WebEnvironment.RANDOM_PORT` or `WebEnvironment.DEFINED_PORT`, you can inject a fully configured `TestRestTemplate` and start using it. +If necessary, additional customizations can be applied through the `RestTemplateBuilder` bean. +Any URLs that do not specify a host and port automatically connect to the embedded server, as shown in the following example: + +``` +import java.time.Duration; + +import org.junit.jupiter.api.Test; + +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.boot.test.context.SpringBootTest; +import org.springframework.boot.test.context.SpringBootTest.WebEnvironment; +import org.springframework.boot.test.context.TestConfiguration; +import org.springframework.boot.test.web.client.TestRestTemplate; +import org.springframework.boot.web.client.RestTemplateBuilder; +import org.springframework.context.annotation.Bean; +import org.springframework.http.HttpHeaders; + +import static org.assertj.core.api.Assertions.assertThat; + +@SpringBootTest(webEnvironment = WebEnvironment.RANDOM_PORT) +class MySpringBootTests { + + @Autowired + private TestRestTemplate template; + + @Test + void testRequest() { + HttpHeaders headers = this.template.getForEntity("/example", String.class).getHeaders(); + assertThat(headers.getLocation()).hasHost("other.example.com"); + } + + @TestConfiguration(proxyBeanMethods = false) + static class RestTemplateBuilderConfiguration { + + @Bean + RestTemplateBuilder restTemplateBuilder() { + return new RestTemplateBuilder().setConnectTimeout(Duration.ofSeconds(1)) + .setReadTimeout(Duration.ofSeconds(1)); + } + + } + +} + +``` + +## 9. Creating Your Own Auto-configuration + +If you work in a company that develops shared libraries, or if you work on an open-source or commercial library, you might want to develop your own auto-configuration. +Auto-configuration classes can be bundled in external jars and still be picked-up by Spring Boot. + +Auto-configuration can be associated to a “starter” that provides the auto-configuration code as well as the typical libraries that you would use with it. +We first cover what you need to know to build your own auto-configuration and then we move on to the [typical steps required to create a custom starter](#features.developing-auto-configuration.custom-starter). + +| |A [demo project](https://github.com/snicoll-demos/spring-boot-master-auto-configuration) is available to showcase how you can create a starter step-by-step.| +|---|------------------------------------------------------------------------------------------------------------------------------------------------------------| + +### 9.1. Understanding Auto-configured Beans + +Under the hood, auto-configuration is implemented with standard `@Configuration` classes. +Additional `@Conditional` annotations are used to constrain when the auto-configuration should apply. +Usually, auto-configuration classes use `@ConditionalOnClass` and `@ConditionalOnMissingBean` annotations. +This ensures that auto-configuration applies only when relevant classes are found and when you have not declared your own `@Configuration`. + +You can browse the source code of [`spring-boot-autoconfigure`](https://github.com/spring-projects/spring-boot/tree/v2.6.4/spring-boot-project/spring-boot-autoconfigure/src/main/java/org/springframework/boot/autoconfigure) to see the `@Configuration` classes that Spring provides (see the [`META-INF/spring.factories`](https://github.com/spring-projects/spring-boot/tree/v2.6.4/spring-boot-project/spring-boot-autoconfigure/src/main/resources/META-INF/spring.factories) file). + +### 9.2. Locating Auto-configuration Candidates + +Spring Boot checks for the presence of a `META-INF/spring.factories` file within your published jar. +The file should list your configuration classes under the `EnableAutoConfiguration` key, as shown in the following example: + +``` +org.springframework.boot.autoconfigure.EnableAutoConfiguration=\ +com.mycorp.libx.autoconfigure.LibXAutoConfiguration,\ +com.mycorp.libx.autoconfigure.LibXWebAutoConfiguration +``` + +| |Auto-configurations must be loaded that way *only*.
Make sure that they are defined in a specific package space and that they are never the target of component scanning.
Furthermore, auto-configuration classes should not enable component scanning to find additional components.
Specific `@Import`s should be used instead.| +|---|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +You can use the [`@AutoConfigureAfter`](https://github.com/spring-projects/spring-boot/tree/v2.6.4/spring-boot-project/spring-boot-autoconfigure/src/main/java/org/springframework/boot/autoconfigure/AutoConfigureAfter.java) or [`@AutoConfigureBefore`](https://github.com/spring-projects/spring-boot/tree/v2.6.4/spring-boot-project/spring-boot-autoconfigure/src/main/java/org/springframework/boot/autoconfigure/AutoConfigureBefore.java) annotations if your configuration needs to be applied in a specific order. +For example, if you provide web-specific configuration, your class may need to be applied after `WebMvcAutoConfiguration`. + +If you want to order certain auto-configurations that should not have any direct knowledge of each other, you can also use `@AutoConfigureOrder`. +That annotation has the same semantic as the regular `@Order` annotation but provides a dedicated order for auto-configuration classes. + +As with standard `@Configuration` classes, the order in which auto-configuration classes are applied only affects the order in which their beans are defined. +The order in which those beans are subsequently created is unaffected and is determined by each bean’s dependencies and any `@DependsOn` relationships. + +### 9.3. Condition Annotations + +You almost always want to include one or more `@Conditional` annotations on your auto-configuration class. +The `@ConditionalOnMissingBean` annotation is one common example that is used to allow developers to override auto-configuration if they are not happy with your defaults. + +Spring Boot includes a number of `@Conditional` annotations that you can reuse in your own code by annotating `@Configuration` classes or individual `@Bean` methods. +These annotations include: + +* [Class Conditions](#features.developing-auto-configuration.condition-annotations.class-conditions) + +* [Bean Conditions](#features.developing-auto-configuration.condition-annotations.bean-conditions) + +* [Property Conditions](#features.developing-auto-configuration.condition-annotations.property-conditions) + +* [Resource Conditions](#features.developing-auto-configuration.condition-annotations.resource-conditions) + +* [Web Application Conditions](#features.developing-auto-configuration.condition-annotations.web-application-conditions) + +* [SpEL Expression Conditions](#features.developing-auto-configuration.condition-annotations.spel-conditions) + +#### 9.3.1. Class Conditions + +The `@ConditionalOnClass` and `@ConditionalOnMissingClass` annotations let `@Configuration` classes be included based on the presence or absence of specific classes. +Due to the fact that annotation metadata is parsed by using [ASM](https://asm.ow2.io/), you can use the `value` attribute to refer to the real class, even though that class might not actually appear on the running application classpath. +You can also use the `name` attribute if you prefer to specify the class name by using a `String` value. + +This mechanism does not apply the same way to `@Bean` methods where typically the return type is the target of the condition: before the condition on the method applies, the JVM will have loaded the class and potentially processed method references which will fail if the class is not present. + +To handle this scenario, a separate `@Configuration` class can be used to isolate the condition, as shown in the following example: + +``` +import org.springframework.boot.autoconfigure.condition.ConditionalOnClass; +import org.springframework.boot.autoconfigure.condition.ConditionalOnMissingBean; +import org.springframework.context.annotation.Bean; +import org.springframework.context.annotation.Configuration; + +@Configuration(proxyBeanMethods = false) +// Some conditions ... +public class MyAutoConfiguration { + + // Auto-configured beans ... + + @Configuration(proxyBeanMethods = false) + @ConditionalOnClass(SomeService.class) + public static class SomeServiceConfiguration { + + @Bean + @ConditionalOnMissingBean + public SomeService someService() { + return new SomeService(); + } + + } + +} + +``` + +| |If you use `@ConditionalOnClass` or `@ConditionalOnMissingClass` as a part of a meta-annotation to compose your own composed annotations, you must use `name` as referring to the class in such a case is not handled.| +|---|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +#### 9.3.2. Bean Conditions + +The `@ConditionalOnBean` and `@ConditionalOnMissingBean` annotations let a bean be included based on the presence or absence of specific beans. +You can use the `value` attribute to specify beans by type or `name` to specify beans by name. +The `search` attribute lets you limit the `ApplicationContext` hierarchy that should be considered when searching for beans. + +When placed on a `@Bean` method, the target type defaults to the return type of the method, as shown in the following example: + +``` +import org.springframework.boot.autoconfigure.condition.ConditionalOnMissingBean; +import org.springframework.context.annotation.Bean; +import org.springframework.context.annotation.Configuration; + +@Configuration(proxyBeanMethods = false) +public class MyAutoConfiguration { + + @Bean + @ConditionalOnMissingBean + public SomeService someService() { + return new SomeService(); + } + +} + +``` + +In the preceding example, the `someService` bean is going to be created if no bean of type `SomeService` is already contained in the `ApplicationContext`. + +| |You need to be very careful about the order in which bean definitions are added, as these conditions are evaluated based on what has been processed so far.
For this reason, we recommend using only `@ConditionalOnBean` and `@ConditionalOnMissingBean` annotations on auto-configuration classes (since these are guaranteed to load after any user-defined bean definitions have been added).| +|---|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +| |`@ConditionalOnBean` and `@ConditionalOnMissingBean` do not prevent `@Configuration` classes from being created.
The only difference between using these conditions at the class level and marking each contained `@Bean` method with the annotation is that the former prevents registration of the `@Configuration` class as a bean if the condition does not match.| +|---|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +| |When declaring a `@Bean` method, provide as much type information as possible in the method’s return type.
For example, if your bean’s concrete class implements an interface the bean method’s return type should be the concrete class and not the interface.
Providing as much type information as possible in `@Bean` methods is particularly important when using bean conditions as their evaluation can only rely upon to type information that is available in the method signature.| +|---|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +#### 9.3.3. Property Conditions + +The `@ConditionalOnProperty` annotation lets configuration be included based on a Spring Environment property. +Use the `prefix` and `name` attributes to specify the property that should be checked. +By default, any property that exists and is not equal to `false` is matched. +You can also create more advanced checks by using the `havingValue` and `matchIfMissing` attributes. + +#### 9.3.4. Resource Conditions + +The `@ConditionalOnResource` annotation lets configuration be included only when a specific resource is present. +Resources can be specified by using the usual Spring conventions, as shown in the following example: `file:/home/user/test.dat`. + +#### 9.3.5. Web Application Conditions + +The `@ConditionalOnWebApplication` and `@ConditionalOnNotWebApplication` annotations let configuration be included depending on whether the application is a “web application”. +A servlet-based web application is any application that uses a Spring `WebApplicationContext`, defines a `session` scope, or has a `ConfigurableWebEnvironment`. +A reactive web application is any application that uses a `ReactiveWebApplicationContext`, or has a `ConfigurableReactiveWebEnvironment`. + +The `@ConditionalOnWarDeployment` annotation lets configuration be included depending on whether the application is a traditional WAR application that is deployed to a container. +This condition will not match for applications that are run with an embedded server. + +#### 9.3.6. SpEL Expression Conditions + +The `@ConditionalOnExpression` annotation lets configuration be included based on the result of a [SpEL expression](https://docs.spring.io/spring-framework/docs/5.3.16/reference/html/core.html#expressions). + +| |Referencing a bean in the expression will cause that bean to be initialized very early in context refresh processing.
As a result, the bean won’t be eligible for post-processing (such as configuration properties binding) and its state may be incomplete.| +|---|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +### 9.4. Testing your Auto-configuration + +An auto-configuration can be affected by many factors: user configuration (`@Bean` definition and `Environment` customization), condition evaluation (presence of a particular library), and others. +Concretely, each test should create a well defined `ApplicationContext` that represents a combination of those customizations.`ApplicationContextRunner` provides a great way to achieve that. + +`ApplicationContextRunner` is usually defined as a field of the test class to gather the base, common configuration. +The following example makes sure that `MyServiceAutoConfiguration` is always invoked: + +``` +private final ApplicationContextRunner contextRunner = new ApplicationContextRunner() + .withConfiguration(AutoConfigurations.of(MyServiceAutoConfiguration.class)); + +``` + +| |If multiple auto-configurations have to be defined, there is no need to order their declarations as they are invoked in the exact same order as when running the application.| +|---|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +Each test can use the runner to represent a particular use case. +For instance, the sample below invokes a user configuration (`UserConfiguration`) and checks that the auto-configuration backs off properly. +Invoking `run` provides a callback context that can be used with `AssertJ`. + +``` +@Test +void defaultServiceBacksOff() { + this.contextRunner.withUserConfiguration(UserConfiguration.class).run((context) -> { + assertThat(context).hasSingleBean(MyService.class); + assertThat(context).getBean("myCustomService").isSameAs(context.getBean(MyService.class)); + }); +} + +@Configuration(proxyBeanMethods = false) +static class UserConfiguration { + + @Bean + MyService myCustomService() { + return new MyService("mine"); + } + +} + +``` + +It is also possible to easily customize the `Environment`, as shown in the following example: + +``` +@Test +void serviceNameCanBeConfigured() { + this.contextRunner.withPropertyValues("user.name=test123").run((context) -> { + assertThat(context).hasSingleBean(MyService.class); + assertThat(context.getBean(MyService.class).getName()).isEqualTo("test123"); + }); +} + +``` + +The runner can also be used to display the `ConditionEvaluationReport`. +The report can be printed at `INFO` or `DEBUG` level. +The following example shows how to use the `ConditionEvaluationReportLoggingListener` to print the report in auto-configuration tests. + +``` +import org.junit.jupiter.api.Test; + +import org.springframework.boot.autoconfigure.logging.ConditionEvaluationReportLoggingListener; +import org.springframework.boot.logging.LogLevel; +import org.springframework.boot.test.context.runner.ApplicationContextRunner; + +class MyConditionEvaluationReportingTests { + + @Test + void autoConfigTest() { + new ApplicationContextRunner() + .withInitializer(new ConditionEvaluationReportLoggingListener(LogLevel.INFO)) + .run((context) -> { + // Test something... + }); + } + +} + +``` + +#### 9.4.1. Simulating a Web Context + +If you need to test an auto-configuration that only operates in a servlet or reactive web application context, use the `WebApplicationContextRunner` or `ReactiveWebApplicationContextRunner` respectively. + +#### 9.4.2. Overriding the Classpath + +It is also possible to test what happens when a particular class and/or package is not present at runtime. +Spring Boot ships with a `FilteredClassLoader` that can easily be used by the runner. +In the following example, we assert that if `MyService` is not present, the auto-configuration is properly disabled: + +``` +@Test +void serviceIsIgnoredIfLibraryIsNotPresent() { + this.contextRunner.withClassLoader(new FilteredClassLoader(MyService.class)) + .run((context) -> assertThat(context).doesNotHaveBean("myService")); +} + +``` + +### 9.5. Creating Your Own Starter + +A typical Spring Boot starter contains code to auto-configure and customize the infrastructure of a given technology, let’s call that "acme". +To make it easily extensible, a number of configuration keys in a dedicated namespace can be exposed to the environment. +Finally, a single "starter" dependency is provided to help users get started as easily as possible. + +Concretely, a custom starter can contain the following: + +* The `autoconfigure` module that contains the auto-configuration code for "acme". + +* The `starter` module that provides a dependency to the `autoconfigure` module as well as "acme" and any additional dependencies that are typically useful. + In a nutshell, adding the starter should provide everything needed to start using that library. + +This separation in two modules is in no way necessary. +If "acme" has several flavors, options or optional features, then it is better to separate the auto-configuration as you can clearly express the fact some features are optional. +Besides, you have the ability to craft a starter that provides an opinion about those optional dependencies. +At the same time, others can rely only on the `autoconfigure` module and craft their own starter with different opinions. + +If the auto-configuration is relatively straightforward and does not have optional feature, merging the two modules in the starter is definitely an option. + +#### 9.5.1. Naming + +You should make sure to provide a proper namespace for your starter. +Do not start your module names with `spring-boot`, even if you use a different Maven `groupId`. +We may offer official support for the thing you auto-configure in the future. + +As a rule of thumb, you should name a combined module after the starter. +For example, assume that you are creating a starter for "acme" and that you name the auto-configure module `acme-spring-boot` and the starter `acme-spring-boot-starter`. +If you only have one module that combines the two, name it `acme-spring-boot-starter`. + +#### 9.5.2. Configuration keys + +If your starter provides configuration keys, use a unique namespace for them. +In particular, do not include your keys in the namespaces that Spring Boot uses (such as `server`, `management`, `spring`, and so on). +If you use the same namespace, we may modify these namespaces in the future in ways that break your modules. +As a rule of thumb, prefix all your keys with a namespace that you own (for example `acme`). + +Make sure that configuration keys are documented by adding field javadoc for each property, as shown in the following example: + +``` +import java.time.Duration; + +import org.springframework.boot.context.properties.ConfigurationProperties; + +@ConfigurationProperties("acme") +public class AcmeProperties { + + /** + * Whether to check the location of acme resources. + */ + private boolean checkLocation = true; + + /** + * Timeout for establishing a connection to the acme server. + */ + private Duration loginTimeout = Duration.ofSeconds(3); + + // getters/setters ... + + public boolean isCheckLocation() { + return this.checkLocation; + } + + public void setCheckLocation(boolean checkLocation) { + this.checkLocation = checkLocation; + } + + public Duration getLoginTimeout() { + return this.loginTimeout; + } + + public void setLoginTimeout(Duration loginTimeout) { + this.loginTimeout = loginTimeout; + } + +} + +``` + +| |You should only use plain text with `@ConfigurationProperties` field Javadoc, since they are not processed before being added to the JSON.| +|---|------------------------------------------------------------------------------------------------------------------------------------------| + +Here are some rules we follow internally to make sure descriptions are consistent: + +* Do not start the description by "The" or "A". + +* For `boolean` types, start the description with "Whether" or "Enable". + +* For collection-based types, start the description with "Comma-separated list" + +* Use `java.time.Duration` rather than `long` and describe the default unit if it differs from milliseconds, such as "If a duration suffix is not specified, seconds will be used". + +* Do not provide the default value in the description unless it has to be determined at runtime. + +Make sure to [trigger meta-data generation](configuration-metadata.html#appendix.configuration-metadata.annotation-processor) so that IDE assistance is available for your keys as well. +You may want to review the generated metadata (`META-INF/spring-configuration-metadata.json`) to make sure your keys are properly documented. +Using your own starter in a compatible IDE is also a good idea to validate that quality of the metadata. + +#### 9.5.3. The “autoconfigure” Module + +The `autoconfigure` module contains everything that is necessary to get started with the library. +It may also contain configuration key definitions (such as `@ConfigurationProperties`) and any callback interface that can be used to further customize how the components are initialized. + +| |You should mark the dependencies to the library as optional so that you can include the `autoconfigure` module in your projects more easily.
If you do it that way, the library is not provided and, by default, Spring Boot backs off.| +|---|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +Spring Boot uses an annotation processor to collect the conditions on auto-configurations in a metadata file (`META-INF/spring-autoconfigure-metadata.properties`). +If that file is present, it is used to eagerly filter auto-configurations that do not match, which will improve startup time. +It is recommended to add the following dependency in a module that contains auto-configurations: + +``` + + org.springframework.boot + spring-boot-autoconfigure-processor + true + +``` + +If you have defined auto-configurations directly in your application, make sure to configure the `spring-boot-maven-plugin` to prevent the `repackage` goal from adding the dependency into the fat jar: + +``` + + + + + org.springframework.boot + spring-boot-maven-plugin + + + + org.springframework.boot + spring-boot-autoconfigure-processor + + + + + + + +``` + +With Gradle 4.5 and earlier, the dependency should be declared in the `compileOnly` configuration, as shown in the following example: + +``` +dependencies { + compileOnly "org.springframework.boot:spring-boot-autoconfigure-processor" +} +``` + +With Gradle 4.6 and later, the dependency should be declared in the `annotationProcessor` configuration, as shown in the following example: + +``` +dependencies { + annotationProcessor "org.springframework.boot:spring-boot-autoconfigure-processor" +} +``` + +#### 9.5.4. Starter Module + +The starter is really an empty jar. +Its only purpose is to provide the necessary dependencies to work with the library. +You can think of it as an opinionated view of what is required to get started. + +Do not make assumptions about the project in which your starter is added. +If the library you are auto-configuring typically requires other starters, mention them as well. +Providing a proper set of *default* dependencies may be hard if the number of optional dependencies is high, as you should avoid including dependencies that are unnecessary for a typical usage of the library. +In other words, you should not include optional dependencies. + +| |Either way, your starter must reference the core Spring Boot starter (`spring-boot-starter`) directly or indirectly (there is no need to add it if your starter relies on another starter).
If a project is created with only your custom starter, Spring Boot’s core features will be honoured by the presence of the core starter.| +|---|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +## 10. Kotlin support + +[Kotlin](https://kotlinlang.org) is a statically-typed language targeting the JVM (and other platforms) which allows writing concise and elegant code while providing [interoperability](https://kotlinlang.org/docs/reference/java-interop.html) with existing libraries written in Java. + +Spring Boot provides Kotlin support by leveraging the support in other Spring projects such as Spring Framework, Spring Data, and Reactor. +See the [Spring Framework Kotlin support documentation](https://docs.spring.io/spring-framework/docs/5.3.16/reference/html/languages.html#kotlin) for more information. + +The easiest way to start with Spring Boot and Kotlin is to follow [this comprehensive tutorial](https://spring.io/guides/tutorials/spring-boot-kotlin/). +You can create new Kotlin projects by using [start.spring.io](https://start.spring.io/#!language=kotlin). +Feel free to join the #spring channel of [Kotlin Slack](https://slack.kotlinlang.org/) or ask a question with the `spring` and `kotlin` tags on [Stack Overflow](https://stackoverflow.com/questions/tagged/spring+kotlin) if you need support. + +### 10.1. Requirements + +Spring Boot requires at least Kotlin 1.3.x and manages a suitable Kotlin version through dependency management. +To use Kotlin, `org.jetbrains.kotlin:kotlin-stdlib` and `org.jetbrains.kotlin:kotlin-reflect` must be present on the classpath. +The `kotlin-stdlib` variants `kotlin-stdlib-jdk7` and `kotlin-stdlib-jdk8` can also be used. + +Since [Kotlin classes are final by default](https://discuss.kotlinlang.org/t/classes-final-by-default/166), you are likely to want to configure [kotlin-spring](https://kotlinlang.org/docs/reference/compiler-plugins.html#spring-support) plugin in order to automatically open Spring-annotated classes so that they can be proxied. + +[Jackson’s Kotlin module](https://github.com/FasterXML/jackson-module-kotlin) is required for serializing / deserializing JSON data in Kotlin. +It is automatically registered when found on the classpath. +A warning message is logged if Jackson and Kotlin are present but the Jackson Kotlin module is not. + +| |These dependencies and plugins are provided by default if one bootstraps a Kotlin project on [start.spring.io](https://start.spring.io/#!language=kotlin).| +|---|----------------------------------------------------------------------------------------------------------------------------------------------------------| + +### 10.2. Null-safety + +One of Kotlin’s key features is [null-safety](https://kotlinlang.org/docs/reference/null-safety.html). +It deals with `null` values at compile time rather than deferring the problem to runtime and encountering a `NullPointerException`. +This helps to eliminate a common source of bugs without paying the cost of wrappers like `Optional`. +Kotlin also allows using functional constructs with nullable values as described in this [comprehensive guide to null-safety in Kotlin](https://www.baeldung.com/kotlin-null-safety). + +Although Java does not allow one to express null-safety in its type system, Spring Framework, Spring Data, and Reactor now provide null-safety of their API through tooling-friendly annotations. +By default, types from Java APIs used in Kotlin are recognized as [platform types](https://kotlinlang.org/docs/reference/java-interop.html#null-safety-and-platform-types) for which null-checks are relaxed.[Kotlin’s support for JSR 305 annotations](https://kotlinlang.org/docs/reference/java-interop.html#jsr-305-support) combined with nullability annotations provide null-safety for the related Spring API in Kotlin. + +The JSR 305 checks can be configured by adding the `-Xjsr305` compiler flag with the following options: `-Xjsr305={strict|warn|ignore}`. +The default behavior is the same as `-Xjsr305=warn`. +The `strict` value is required to have null-safety taken in account in Kotlin types inferred from Spring API but should be used with the knowledge that Spring API nullability declaration could evolve even between minor releases and more checks may be added in the future). + +| |Generic type arguments, varargs and array elements nullability are not yet supported.
See [SPR-15942](https://jira.spring.io/browse/SPR-15942) for up-to-date information.
Also be aware that Spring Boot’s own API is [not yet annotated](https://github.com/spring-projects/spring-boot/issues/10712).| +|---|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +### 10.3. Kotlin API + +#### 10.3.1. runApplication + +Spring Boot provides an idiomatic way to run an application with `runApplication(*args)` as shown in the following example: + +``` +import org.springframework.boot.autoconfigure.SpringBootApplication +import org.springframework.boot.runApplication + +@SpringBootApplication +class MyApplication + +fun main(args: Array) { + runApplication(*args) +} + +``` + +This is a drop-in replacement for `SpringApplication.run(MyApplication::class.java, *args)`. +It also allows customization of the application as shown in the following example: + +``` +runApplication(*args) { + setBannerMode(OFF) +} + +``` + +#### 10.3.2. Extensions + +Kotlin [extensions](https://kotlinlang.org/docs/reference/extensions.html) provide the ability to extend existing classes with additional functionality. +The Spring Boot Kotlin API makes use of these extensions to add new Kotlin specific conveniences to existing APIs. + +`TestRestTemplate` extensions, similar to those provided by Spring Framework for `RestOperations` in Spring Framework, are provided. +Among other things, the extensions make it possible to take advantage of Kotlin reified type parameters. + +### 10.4. Dependency management + +In order to avoid mixing different versions of Kotlin dependencies on the classpath, Spring Boot imports the Kotlin BOM. + +With Maven, the Kotlin version can be customized by setting the `kotlin.version` property and plugin management is provided for `kotlin-maven-plugin`. +With Gradle, the Spring Boot plugin automatically aligns the `kotlin.version` with the version of the Kotlin plugin. + +Spring Boot also manages the version of Coroutines dependencies by importing the Kotlin Coroutines BOM. +The version can be customized by setting the `kotlin-coroutines.version` property. + +| |`org.jetbrains.kotlinx:kotlinx-coroutines-reactor` dependency is provided by default if one bootstraps a Kotlin project with at least one reactive dependency on [start.spring.io](https://start.spring.io/#!language=kotlin).| +|---|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +### 10.5. @ConfigurationProperties + +`@ConfigurationProperties` when used in combination with [`@ConstructorBinding`](#features.external-config.typesafe-configuration-properties.constructor-binding) supports classes with immutable `val` properties as shown in the following example: + +``` +@ConstructorBinding +@ConfigurationProperties("example.kotlin") +data class KotlinExampleProperties( + val name: String, + val description: String, + val myService: MyService) { + + data class MyService( + val apiToken: String, + val uri: URI + ) +} + +``` + +| |To generate [your own metadata](configuration-metadata.html#appendix.configuration-metadata.annotation-processor) using the annotation processor, [`kapt` should be configured](https://kotlinlang.org/docs/reference/kapt.html) with the `spring-boot-configuration-processor` dependency.
Note that some features (such as detecting the default value or deprecated items) are not working due to limitations in the model kapt provides.| +|---|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +### 10.6. Testing + +While it is possible to use JUnit 4 to test Kotlin code, JUnit 5 is provided by default and is recommended. +JUnit 5 enables a test class to be instantiated once and reused for all of the class’s tests. +This makes it possible to use `@BeforeAll` and `@AfterAll` annotations on non-static methods, which is a good fit for Kotlin. + +To mock Kotlin classes, [MockK](https://mockk.io/) is recommended. +If you need the `Mockk` equivalent of the Mockito specific [`@MockBean` and `@SpyBean` annotations](#features.testing.spring-boot-applications.mocking-beans), you can use [SpringMockK](https://github.com/Ninja-Squad/springmockk) which provides similar `@MockkBean` and `@SpykBean` annotations. + +### 10.7. Resources + +#### 10.7.1. Further reading + +* [Kotlin language reference](https://kotlinlang.org/docs/reference/) + +* [Kotlin Slack](https://kotlinlang.slack.com/) (with a dedicated #spring channel) + +* [Stackoverflow with `spring` and `kotlin` tags](https://stackoverflow.com/questions/tagged/spring+kotlin) + +* [Try Kotlin in your browser](https://try.kotlinlang.org/) + +* [Kotlin blog](https://blog.jetbrains.com/kotlin/) + +* [Awesome Kotlin](https://kotlin.link/) + +* [Tutorial: building web applications with Spring Boot and Kotlin](https://spring.io/guides/tutorials/spring-boot-kotlin/) + +* [Developing Spring Boot applications with Kotlin](https://spring.io/blog/2016/02/15/developing-spring-boot-applications-with-kotlin) + +* [A Geospatial Messenger with Kotlin, Spring Boot and PostgreSQL](https://spring.io/blog/2016/03/20/a-geospatial-messenger-with-kotlin-spring-boot-and-postgresql) + +* [Introducing Kotlin support in Spring Framework 5.0](https://spring.io/blog/2017/01/04/introducing-kotlin-support-in-spring-framework-5-0) + +* [Spring Framework 5 Kotlin APIs, the functional way](https://spring.io/blog/2017/08/01/spring-framework-5-kotlin-apis-the-functional-way) + +#### 10.7.2. Examples + +* [spring-boot-kotlin-demo](https://github.com/sdeleuze/spring-boot-kotlin-demo): regular Spring Boot + Spring Data JPA project + +* [mixit](https://github.com/mixitconf/mixit): Spring Boot 2 + WebFlux + Reactive Spring Data MongoDB + +* [spring-kotlin-fullstack](https://github.com/sdeleuze/spring-kotlin-fullstack): WebFlux Kotlin fullstack example with Kotlin2js for frontend instead of JavaScript or TypeScript + +* [spring-petclinic-kotlin](https://github.com/spring-petclinic/spring-petclinic-kotlin): Kotlin version of the Spring PetClinic Sample Application + +* [spring-kotlin-deepdive](https://github.com/sdeleuze/spring-kotlin-deepdive): a step by step migration for Boot 1.0 + Java to Boot 2.0 + Kotlin + +* [spring-boot-coroutines-demo](https://github.com/sdeleuze/spring-boot-coroutines-demo): Coroutines sample project + +## 11. What to Read Next + +If you want to learn more about any of the classes discussed in this section, see the [Spring Boot API documentation](https://docs.spring.io/spring-boot/docs/2.6.4/api/) or you can browse the [source code directly](https://github.com/spring-projects/spring-boot/tree/v2.6.4). +If you have specific questions, see the [how-to](howto.html#howto) section. + +If you are comfortable with Spring Boot’s core features, you can continue on and read about [production-ready features](actuator.html#actuator). + diff --git a/docs/en/spring-boot/getting-help.md b/docs/en/spring-boot/getting-help.md new file mode 100644 index 0000000000000000000000000000000000000000..c433bcdc81fa138a9625ec5b1ad1fd53aa821300 --- /dev/null +++ b/docs/en/spring-boot/getting-help.md @@ -0,0 +1,20 @@ +# Getting Help + +If you have trouble with Spring Boot, we would like to help. + +* Try the [How-to documents](howto.html#howto). + They provide solutions to the most common questions. + +* Learn the Spring basics. + Spring Boot builds on many other Spring projects. + Check the [spring.io](https://spring.io) web-site for a wealth of reference documentation. + If you are starting out with Spring, try one of the [guides](https://spring.io/guides). + +* Ask a question. + We monitor [stackoverflow.com](https://stackoverflow.com) for questions tagged with [`spring-boot`](https://stackoverflow.com/tags/spring-boot). + +* Report bugs with Spring Boot at [github.com/spring-projects/spring-boot/issues](https://github.com/spring-projects/spring-boot/issues). + +Note: + +All of Spring Boot is open source, including the documentation. If you find problems with the docs or if you want to improve them, please [get involved](https://github.com/spring-projects/spring-boot/tree/v2.6.4). \ No newline at end of file diff --git a/docs/en/spring-boot/getting-started.md b/docs/en/spring-boot/getting-started.md new file mode 100644 index 0000000000000000000000000000000000000000..2ae725b45a965454644aadfff501017ba0d4c444 --- /dev/null +++ b/docs/en/spring-boot/getting-started.md @@ -0,0 +1,532 @@ +# Getting Started + +If you are getting started with Spring Boot, or “Spring” in general, start by reading this section. +It answers the basic “what?”, “how?” and “why?” questions. +It includes an introduction to Spring Boot, along with installation instructions. +We then walk you through building your first Spring Boot application, discussing some core principles as we go. + +## 1. Introducing Spring Boot + +Spring Boot helps you to create stand-alone, production-grade Spring-based applications that you can run. +We take an opinionated view of the Spring platform and third-party libraries, so that you can get started with minimum fuss. +Most Spring Boot applications need very little Spring configuration. + +You can use Spring Boot to create Java applications that can be started by using `java -jar` or more traditional war deployments. +We also provide a command line tool that runs “spring scripts”. + +Our primary goals are: + +* Provide a radically faster and widely accessible getting-started experience for all Spring development. + +* Be opinionated out of the box but get out of the way quickly as requirements start to diverge from the defaults. + +* Provide a range of non-functional features that are common to large classes of projects (such as embedded servers, security, metrics, health checks, and externalized configuration). + +* Absolutely no code generation and no requirement for XML configuration. + +## 2. System Requirements + +Spring Boot 2.6.4 requires [Java 8](https://www.java.com) and is compatible up to and including Java 17.[Spring Framework 5.3.16](https://docs.spring.io/spring-framework/docs/5.3.16/reference/html/) or above is also required. + +Explicit build support is provided for the following build tools: + +|Build Tool| Version | +|----------|---------------------| +| Maven | 3.5+ | +| Gradle |6.8.x, 6.9.x, and 7.x| + +### 2.1. Servlet Containers + +Spring Boot supports the following embedded servlet containers: + +| Name |Servlet Version| +|------------|---------------| +| Tomcat 9.0 | 4.0 | +| Jetty 9.4 | 3.1 | +| Jetty 10.0 | 4.0 | +|Undertow 2.0| 4.0 | + +You can also deploy Spring Boot applications to any servlet 3.1+ compatible container. + +## 3. Installing Spring Boot + +Spring Boot can be used with “classic” Java development tools or installed as a command line tool. +Either way, you need [Java SDK v1.8](https://www.java.com) or higher. +Before you begin, you should check your current Java installation by using the following command: + +``` +$ java -version +``` + +If you are new to Java development or if you want to experiment with Spring Boot, you might want to try the [Spring Boot CLI](#getting-started.installing.cli) (Command Line Interface) first. +Otherwise, read on for “classic” installation instructions. + +### 3.1. Installation Instructions for the Java Developer + +You can use Spring Boot in the same way as any standard Java library. +To do so, include the appropriate `spring-boot-*.jar` files on your classpath. +Spring Boot does not require any special tools integration, so you can use any IDE or text editor. +Also, there is nothing special about a Spring Boot application, so you can run and debug a Spring Boot application as you would any other Java program. + +Although you *could* copy Spring Boot jars, we generally recommend that you use a build tool that supports dependency management (such as Maven or Gradle). + +#### 3.1.1. Maven Installation + +Spring Boot is compatible with Apache Maven 3.3 or above. +If you do not already have Maven installed, you can follow the instructions at [maven.apache.org](https://maven.apache.org). + +| |On many operating systems, Maven can be installed with a package manager.
If you use OSX Homebrew, try `brew install maven`.
Ubuntu users can run `sudo apt-get install maven`.
Windows users with [Chocolatey](https://chocolatey.org/) can run `choco install maven` from an elevated (administrator) prompt.| +|---|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +Spring Boot dependencies use the `org.springframework.boot` `groupId`. +Typically, your Maven POM file inherits from the `spring-boot-starter-parent` project and declares dependencies to one or more [“Starters”](using.html#using.build-systems.starters). +Spring Boot also provides an optional [Maven plugin](build-tool-plugins.html#build-tool-plugins.maven) to create executable jars. + +More details on getting started with Spring Boot and Maven can be found in the [Getting Started section](https://docs.spring.io/spring-boot/docs/2.6.4/maven-plugin/reference/htmlsingle/#getting-started) of the Maven plugin’s reference guide. + +#### 3.1.2. Gradle Installation + +Spring Boot is compatible with Gradle 6.8, 6.9, and 7.x. +If you do not already have Gradle installed, you can follow the instructions at [gradle.org](https://gradle.org). + +Spring Boot dependencies can be declared by using the `org.springframework.boot` `group`. +Typically, your project declares dependencies to one or more [“Starters”](using.html#using.build-systems.starters). +Spring Boot provides a useful [Gradle plugin](build-tool-plugins.html#build-tool-plugins.gradle) that can be used to simplify dependency declarations and to create executable jars. + +Gradle Wrapper + +The Gradle Wrapper provides a nice way of “obtaining” Gradle when you need to build a project. +It is a small script and library that you commit alongside your code to bootstrap the build process. +See [docs.gradle.org/current/userguide/gradle\_wrapper.html](https://docs.gradle.org/current/userguide/gradle_wrapper.html) for details. + +More details on getting started with Spring Boot and Gradle can be found in the [Getting Started section](https://docs.spring.io/spring-boot/docs/2.6.4/gradle-plugin/reference/htmlsingle/#getting-started) of the Gradle plugin’s reference guide. + +### 3.2. Installing the Spring Boot CLI + +The Spring Boot CLI (Command Line Interface) is a command line tool that you can use to quickly prototype with Spring. +It lets you run [Groovy](https://groovy-lang.org/) scripts, which means that you have a familiar Java-like syntax without so much boilerplate code. + +You do not need to use the CLI to work with Spring Boot, but it is a quick way to get a Spring application off the ground without an IDE. + +#### 3.2.1. Manual Installation + +You can download the Spring CLI distribution from the Spring software repository: + +* [spring-boot-cli-2.6.4-bin.zip](https://repo.spring.io/release/org/springframework/boot/spring-boot-cli/2.6.4/spring-boot-cli-2.6.4-bin.zip) + +* [spring-boot-cli-2.6.4-bin.tar.gz](https://repo.spring.io/release/org/springframework/boot/spring-boot-cli/2.6.4/spring-boot-cli-2.6.4-bin.tar.gz) + +Cutting edge[snapshot distributions](https://repo.spring.io/snapshot/org/springframework/boot/spring-boot-cli/) are also available. + +Once downloaded, follow the [INSTALL.txt](https://raw.githubusercontent.com/spring-projects/spring-boot/v2.6.4/spring-boot-project/spring-boot-cli/src/main/content/INSTALL.txt) instructions from the unpacked archive. +In summary, there is a `spring` script (`spring.bat` for Windows) in a `bin/` directory in the `.zip` file. +Alternatively, you can use `java -jar` with the `.jar` file (the script helps you to be sure that the classpath is set correctly). + +#### 3.2.2. Installation with SDKMAN! + +SDKMAN! (The Software Development Kit Manager) can be used for managing multiple versions of various binary SDKs, including Groovy and the Spring Boot CLI. +Get SDKMAN! from [sdkman.io](https://sdkman.io) and install Spring Boot by using the following commands: + +``` +$ sdk install springboot +$ spring --version +Spring CLI v2.6.4 +``` + +If you develop features for the CLI and want access to the version you built, use the following commands: + +``` +$ sdk install springboot dev /path/to/spring-boot/spring-boot-cli/target/spring-boot-cli-2.6.4-bin/spring-2.6.4/ +$ sdk default springboot dev +$ spring --version +Spring CLI v2.6.4 +``` + +The preceding instructions install a local instance of `spring` called the `dev` instance. +It points at your target build location, so every time you rebuild Spring Boot, `spring` is up-to-date. + +You can see it by running the following command: + +``` +$ sdk ls springboot + +================================================================================ +Available Springboot Versions +================================================================================ +> + dev +* 2.6.4 + +================================================================================ ++ - local version +* - installed +> - currently in use +================================================================================ +``` + +#### 3.2.3. OSX Homebrew Installation + +If you are on a Mac and use [Homebrew](https://brew.sh/), you can install the Spring Boot CLI by using the following commands: + +``` +$ brew tap spring-io/tap +$ brew install spring-boot +``` + +Homebrew installs `spring` to `/usr/local/bin`. + +| |If you do not see the formula, your installation of brew might be out-of-date.
In that case, run `brew update` and try again.| +|---|---------------------------------------------------------------------------------------------------------------------------------| + +#### 3.2.4. MacPorts Installation + +If you are on a Mac and use [MacPorts](https://www.macports.org/), you can install the Spring Boot CLI by using the following command: + +``` +$ sudo port install spring-boot-cli +``` + +#### 3.2.5. Command-line Completion + +The Spring Boot CLI includes scripts that provide command completion for the [BASH](https://en.wikipedia.org/wiki/Bash_%28Unix_shell%29) and [zsh](https://en.wikipedia.org/wiki/Z_shell) shells. +You can `source` the script (also named `spring`) in any shell or put it in your personal or system-wide bash completion initialization. +On a Debian system, the system-wide scripts are in `/shell-completion/bash` and all scripts in that directory are executed when a new shell starts. +For example, to run the script manually if you have installed by using SDKMAN!, use the following commands: + +``` +$ . ~/.sdkman/candidates/springboot/current/shell-completion/bash/spring +$ spring + grab help jar run test version +``` + +| |If you install the Spring Boot CLI by using Homebrew or MacPorts, the command-line completion scripts are automatically registered with your shell.| +|---|---------------------------------------------------------------------------------------------------------------------------------------------------| + +#### 3.2.6. Windows Scoop Installation + +If you are on a Windows and use [Scoop](https://scoop.sh/), you can install the Spring Boot CLI by using the following commands: + +``` +> scoop bucket add extras +> scoop install springboot +``` + +Scoop installs `spring` to `~/scoop/apps/springboot/current/bin`. + +| |If you do not see the app manifest, your installation of scoop might be out-of-date.
In that case, run `scoop update` and try again.| +|---|----------------------------------------------------------------------------------------------------------------------------------------| + +#### 3.2.7. Quick-start Spring CLI Example + +You can use the following web application to test your installation. +To start, create a file called `app.groovy`, as follows: + +``` +@RestController +class ThisWillActuallyRun { + + @RequestMapping("/") + String home() { + "Hello World!" + } + +} + +``` + +Then run it from a shell, as follows: + +``` +$ spring run app.groovy +``` + +| |The first run of your application is slow, as dependencies are downloaded.
Subsequent runs are much quicker.| +|---|----------------------------------------------------------------------------------------------------------------| + +Open `[localhost:8080](http://localhost:8080)` in your favorite web browser. +You should see the following output: + +``` +Hello World! +``` + +## 4. Developing Your First Spring Boot Application + +This section describes how to develop a small “Hello World!” web application that highlights some of Spring Boot’s key features. +We use Maven to build this project, since most IDEs support it. + +| |The [spring.io](https://spring.io) web site contains many “Getting Started” [guides](https://spring.io/guides) that use Spring Boot.
If you need to solve a specific problem, check there first.

You can shortcut the steps below by going to [start.spring.io](https://start.spring.io) and choosing the "Web" starter from the dependencies searcher.
Doing so generates a new project structure so that you can [start coding right away](#getting-started.first-application.code).
Check the [start.spring.io user guide](https://github.com/spring-io/start.spring.io/blob/main/USING.adoc) for more details.| +|---|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +Before we begin, open a terminal and run the following commands to ensure that you have valid versions of Java and Maven installed: + +``` +$ java -version +java version "1.8.0_102" +Java(TM) SE Runtime Environment (build 1.8.0_102-b14) +Java HotSpot(TM) 64-Bit Server VM (build 25.102-b14, mixed mode) +``` + +``` +$ mvn -v +Apache Maven 3.5.4 (1edded0938998edf8bf061f1ceb3cfdeccf443fe; 2018-06-17T14:33:14-04:00) +Maven home: /usr/local/Cellar/maven/3.3.9/libexec +Java version: 1.8.0_102, vendor: Oracle Corporation +``` + +| |This sample needs to be created in its own directory.
Subsequent instructions assume that you have created a suitable directory and that it is your current directory.| +|---|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +### 4.1. Creating the POM + +We need to start by creating a Maven `pom.xml` file. +The `pom.xml` is the recipe that is used to build your project. +Open your favorite text editor and add the following: + +``` + + + 4.0.0 + + com.example + myproject + 0.0.1-SNAPSHOT + + + org.springframework.boot + spring-boot-starter-parent + 2.6.4 + + + + + +``` + +The preceding listing should give you a working build. +You can test it by running `mvn package` (for now, you can ignore the “jar will be empty - no content was marked for inclusion!” warning). + +| |At this point, you could import the project into an IDE (most modern Java IDEs include built-in support for Maven).
For simplicity, we continue to use a plain text editor for this example.| +|---|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +### 4.2. Adding Classpath Dependencies + +Spring Boot provides a number of “Starters” that let you add jars to your classpath. +Our applications for smoke tests use the `spring-boot-starter-parent` in the `parent` section of the POM. +The `spring-boot-starter-parent` is a special starter that provides useful Maven defaults. +It also provides a [`dependency-management`](using.html#using.build-systems.dependency-management) section so that you can omit `version` tags for “blessed” dependencies. + +Other “Starters” provide dependencies that you are likely to need when developing a specific type of application. +Since we are developing a web application, we add a `spring-boot-starter-web` dependency. +Before that, we can look at what we currently have by running the following command: + +``` +$ mvn dependency:tree + +[INFO] com.example:myproject:jar:0.0.1-SNAPSHOT +``` + +The `mvn dependency:tree` command prints a tree representation of your project dependencies. +You can see that `spring-boot-starter-parent` provides no dependencies by itself. +To add the necessary dependencies, edit your `pom.xml` and add the `spring-boot-starter-web` dependency immediately below the `parent` section: + +``` + + + org.springframework.boot + spring-boot-starter-web + + +``` + +If you run `mvn dependency:tree` again, you see that there are now a number of additional dependencies, including the Tomcat web server and Spring Boot itself. + +### 4.3. Writing the Code + +To finish our application, we need to create a single Java file. +By default, Maven compiles sources from `src/main/java`, so you need to create that directory structure and then add a file named `src/main/java/MyApplication.java` to contain the following code: + +``` +import org.springframework.boot.SpringApplication; +import org.springframework.boot.autoconfigure.EnableAutoConfiguration; +import org.springframework.web.bind.annotation.RequestMapping; +import org.springframework.web.bind.annotation.RestController; + +@RestController +@EnableAutoConfiguration +public class MyApplication { + + @RequestMapping("/") + String home() { + return "Hello World!"; + } + + public static void main(String[] args) { + SpringApplication.run(MyApplication.class, args); + } + +} + +``` + +Although there is not much code here, quite a lot is going on. +We step through the important parts in the next few sections. + +#### 4.3.1. The @RestController and @RequestMapping Annotations #### + +The first annotation on our `MyApplication` class is `@RestController`. +This is known as a *stereotype* annotation. +It provides hints for people reading the code and for Spring that the class plays a specific role. +In this case, our class is a web `@Controller`, so Spring considers it when handling incoming web requests. + +The `@RequestMapping` annotation provides “routing” information. +It tells Spring that any HTTP request with the `/` path should be mapped to the `home` method. +The `@RestController` annotation tells Spring to render the resulting string directly back to the caller. + +| |The `@RestController` and `@RequestMapping` annotations are Spring MVC annotations (they are not specific to Spring Boot).
See the [MVC section](https://docs.spring.io/spring-framework/docs/5.3.16/reference/html/web.html#mvc) in the Spring Reference Documentation for more details.| +|---|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +#### 4.3.2. The @EnableAutoConfiguration Annotation #### + +The second class-level annotation is `@EnableAutoConfiguration`. +This annotation tells Spring Boot to “guess” how you want to configure Spring, based on the jar dependencies that you have added. +Since `spring-boot-starter-web` added Tomcat and Spring MVC, the auto-configuration assumes that you are developing a web application and sets up Spring accordingly. + +Starters and Auto-configuration + +Auto-configuration is designed to work well with “Starters”, but the two concepts are not directly tied. +You are free to pick and choose jar dependencies outside of the starters. +Spring Boot still does its best to auto-configure your application. + +#### 4.3.3. The “main” Method + +The final part of our application is the `main` method. +This is a standard method that follows the Java convention for an application entry point. +Our main method delegates to Spring Boot’s `SpringApplication` class by calling `run`.`SpringApplication` bootstraps our application, starting Spring, which, in turn, starts the auto-configured Tomcat web server. +We need to pass `MyApplication.class` as an argument to the `run` method to tell `SpringApplication` which is the primary Spring component. +The `args` array is also passed through to expose any command-line arguments. + +### 4.4. Running the Example + +At this point, your application should work. +Since you used the `spring-boot-starter-parent` POM, you have a useful `run` goal that you can use to start the application. +Type `mvn spring-boot:run` from the root project directory to start the application. +You should see output similar to the following: + +``` +$ mvn spring-boot:run + + . ____ _ __ _ _ + /\\ / ___'_ __ _ _(_)_ __ __ _ \ \ \ \ +( ( )\___ | '_ | '_| | '_ \/ _` | \ \ \ \ + \\/ ___)| |_)| | | | | || (_| | ) ) ) ) + ' |____| .__|_| |_|_| |_\__, | / / / / + =========|_|==============|___/=/_/_/_/ + :: Spring Boot :: (v2.6.4) +....... . . . +....... . . . (log output here) +....... . . . +........ Started MyApplication in 2.222 seconds (JVM running for 6.514) +``` + +If you open a web browser to `[localhost:8080](http://localhost:8080)`, you should see the following output: + +``` +Hello World! +``` + +To gracefully exit the application, press `ctrl-c`. + +### 4.5. Creating an Executable Jar + +We finish our example by creating a completely self-contained executable jar file that we could run in production. +Executable jars (sometimes called “fat jars”) are archives containing your compiled classes along with all of the jar dependencies that your code needs to run. + +Executable jars and Java + +Java does not provide a standard way to load nested jar files (jar files that are themselves contained within a jar). +This can be problematic if you are looking to distribute a self-contained application. + +To solve this problem, many developers use “uber” jars. +An uber jar packages all the classes from all the application’s dependencies into a single archive. +The problem with this approach is that it becomes hard to see which libraries are in your application. +It can also be problematic if the same filename is used (but with different content) in multiple jars. + +Spring Boot takes a [different approach](executable-jar.html#appendix.executable-jar) and lets you actually nest jars directly. + +To create an executable jar, we need to add the `spring-boot-maven-plugin` to our `pom.xml`. +To do so, insert the following lines just below the `dependencies` section: + +``` + + + + org.springframework.boot + spring-boot-maven-plugin + + + +``` + +| |The `spring-boot-starter-parent` POM includes `` configuration to bind the `repackage` goal.
If you do not use the parent POM, you need to declare this configuration yourself.
See the [plugin documentation](https://docs.spring.io/spring-boot/docs/2.6.4/maven-plugin/reference/htmlsingle/#getting-started) for details.| +|---|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +Save your `pom.xml` and run `mvn package` from the command line, as follows: + +``` +$ mvn package + +[INFO] Scanning for projects... +[INFO] +[INFO] ------------------------------------------------------------------------ +[INFO] Building myproject 0.0.1-SNAPSHOT +[INFO] ------------------------------------------------------------------------ +[INFO] .... .. +[INFO] --- maven-jar-plugin:2.4:jar (default-jar) @ myproject --- +[INFO] Building jar: /Users/developer/example/spring-boot-example/target/myproject-0.0.1-SNAPSHOT.jar +[INFO] +[INFO] --- spring-boot-maven-plugin:2.6.4:repackage (default) @ myproject --- +[INFO] ------------------------------------------------------------------------ +[INFO] BUILD SUCCESS +[INFO] ------------------------------------------------------------------------ +``` + +If you look in the `target` directory, you should see `myproject-0.0.1-SNAPSHOT.jar`. +The file should be around 10 MB in size. +If you want to peek inside, you can use `jar tvf`, as follows: + +``` +$ jar tvf target/myproject-0.0.1-SNAPSHOT.jar +``` + +You should also see a much smaller file named `myproject-0.0.1-SNAPSHOT.jar.original` in the `target` directory. +This is the original jar file that Maven created before it was repackaged by Spring Boot. + +To run that application, use the `java -jar` command, as follows: + +``` +$ java -jar target/myproject-0.0.1-SNAPSHOT.jar + + . ____ _ __ _ _ + /\\ / ___'_ __ _ _(_)_ __ __ _ \ \ \ \ +( ( )\___ | '_ | '_| | '_ \/ _` | \ \ \ \ + \\/ ___)| |_)| | | | | || (_| | ) ) ) ) + ' |____| .__|_| |_|_| |_\__, | / / / / + =========|_|==============|___/=/_/_/_/ + :: Spring Boot :: (v2.6.4) +....... . . . +....... . . . (log output here) +....... . . . +........ Started MyApplication in 2.536 seconds (JVM running for 2.864) +``` + +As before, to exit the application, press `ctrl-c`. + +## 5. What to Read Next + +Hopefully, this section provided some of the Spring Boot basics and got you on your way to writing your own applications. +If you are a task-oriented type of developer, you might want to jump over to [spring.io](https://spring.io) and follow some of the [getting started](https://spring.io/guides/) guides that solve specific “How do I do that with Spring?” problems. +We also have Spring Boot-specific “[How-to](howto.html#howto)” reference documentation. + +Otherwise, the next logical step is to read *[using.html](using.html#using)*. +If you are really impatient, you could also jump ahead and read about *[Spring Boot features](features.html#features)*. diff --git a/docs/en/spring-boot/howto.md b/docs/en/spring-boot/howto.md new file mode 100644 index 0000000000000000000000000000000000000000..4e8e036be2f8e7dfc0d568c8ad06ce9bb4ad1a40 --- /dev/null +++ b/docs/en/spring-boot/howto.md @@ -0,0 +1,3523 @@ +# “How-to” Guides + +This section provides answers to some common ‘how do I do that…​’ questions that often arise when using Spring Boot. +Its coverage is not exhaustive, but it does cover quite a lot. + +If you have a specific problem that we do not cover here, you might want to check [stackoverflow.com](https://stackoverflow.com/tags/spring-boot) to see if someone has already provided an answer. +This is also a great place to ask new questions (please use the `spring-boot` tag). + +We are also more than happy to extend this section. +If you want to add a ‘how-to’, send us a [pull request](https://github.com/spring-projects/spring-boot/tree/v2.6.4). + +## 1. Spring Boot Application + +This section includes topics relating directly to Spring Boot applications. + +### 1.1. Create Your Own FailureAnalyzer + +[`FailureAnalyzer`](https://docs.spring.io/spring-boot/docs/2.6.4/api/org/springframework/boot/diagnostics/FailureAnalyzer.html) is a great way to intercept an exception on startup and turn it into a human-readable message, wrapped in a [`FailureAnalysis`](https://docs.spring.io/spring-boot/docs/2.6.4/api/org/springframework/boot/diagnostics/FailureAnalysis.html). +Spring Boot provides such an analyzer for application-context-related exceptions, JSR-303 validations, and more. +You can also create your own. + +`AbstractFailureAnalyzer` is a convenient extension of `FailureAnalyzer` that checks the presence of a specified exception type in the exception to handle. +You can extend from that so that your implementation gets a chance to handle the exception only when it is actually present. +If, for whatever reason, you cannot handle the exception, return `null` to give another implementation a chance to handle the exception. + +`FailureAnalyzer` implementations must be registered in `META-INF/spring.factories`. +The following example registers `ProjectConstraintViolationFailureAnalyzer`: + +``` +org.springframework.boot.diagnostics.FailureAnalyzer=\ +com.example.ProjectConstraintViolationFailureAnalyzer +``` + +| |If you need access to the `BeanFactory` or the `Environment`, your `FailureAnalyzer` can implement `BeanFactoryAware` or `EnvironmentAware` respectively.| +|---|---------------------------------------------------------------------------------------------------------------------------------------------------------| + +### 1.2. Troubleshoot Auto-configuration + +The Spring Boot auto-configuration tries its best to “do the right thing”, but sometimes things fail, and it can be hard to tell why. + +There is a really useful `ConditionEvaluationReport` available in any Spring Boot `ApplicationContext`. +You can see it if you enable `DEBUG` logging output. +If you use the `spring-boot-actuator` (see [the Actuator chapter](actuator.html#actuator)), there is also a `conditions` endpoint that renders the report in JSON. +Use that endpoint to debug the application and see what features have been added (and which have not been added) by Spring Boot at runtime. + +Many more questions can be answered by looking at the source code and the Javadoc. +When reading the code, remember the following rules of thumb: + +* Look for classes called `*AutoConfiguration` and read their sources. + Pay special attention to the `@Conditional*` annotations to find out what features they enable and when. + Add `--debug` to the command line or a System property `-Ddebug` to get a log on the console of all the auto-configuration decisions that were made in your app. + In a running application with actuator enabled, look at the `conditions` endpoint (`/actuator/conditions` or the JMX equivalent) for the same information. + +* Look for classes that are `@ConfigurationProperties` (such as [`ServerProperties`](https://github.com/spring-projects/spring-boot/tree/v2.6.4/spring-boot-project/spring-boot-autoconfigure/src/main/java/org/springframework/boot/autoconfigure/web/ServerProperties.java)) and read from there the available external configuration options. + The `@ConfigurationProperties` annotation has a `name` attribute that acts as a prefix to external properties. + Thus, `ServerProperties` has `prefix="server"` and its configuration properties are `server.port`, `server.address`, and others. + In a running application with actuator enabled, look at the `configprops` endpoint. + +* Look for uses of the `bind` method on the `Binder` to pull configuration values explicitly out of the `Environment` in a relaxed manner. + It is often used with a prefix. + +* Look for `@Value` annotations that bind directly to the `Environment`. + +* Look for `@ConditionalOnExpression` annotations that switch features on and off in response to SpEL expressions, normally evaluated with placeholders resolved from the `Environment`. + +### 1.3. Customize the Environment or ApplicationContext Before It Starts ### + +A `SpringApplication` has `ApplicationListeners` and `ApplicationContextInitializers` that are used to apply customizations to the context or environment. +Spring Boot loads a number of such customizations for use internally from `META-INF/spring.factories`. +There is more than one way to register additional customizations: + +* Programmatically, per application, by calling the `addListeners` and `addInitializers` methods on `SpringApplication` before you run it. + +* Declaratively, per application, by setting the `context.initializer.classes` or `context.listener.classes` properties. + +* Declaratively, for all applications, by adding a `META-INF/spring.factories` and packaging a jar file that the applications all use as a library. + +The `SpringApplication` sends some special `ApplicationEvents` to the listeners (some even before the context is created) and then registers the listeners for events published by the `ApplicationContext` as well. +See “[Application Events and Listeners](features.html#features.spring-application.application-events-and-listeners)” in the ‘Spring Boot features’ section for a complete list. + +It is also possible to customize the `Environment` before the application context is refreshed by using `EnvironmentPostProcessor`. +Each implementation should be registered in `META-INF/spring.factories`, as shown in the following example: + +``` +org.springframework.boot.env.EnvironmentPostProcessor=com.example.YourEnvironmentPostProcessor +``` + +The implementation can load arbitrary files and add them to the `Environment`. +For instance, the following example loads a YAML configuration file from the classpath: + +``` +import java.io.IOException; + +import org.springframework.boot.SpringApplication; +import org.springframework.boot.env.EnvironmentPostProcessor; +import org.springframework.boot.env.YamlPropertySourceLoader; +import org.springframework.core.env.ConfigurableEnvironment; +import org.springframework.core.env.PropertySource; +import org.springframework.core.io.ClassPathResource; +import org.springframework.core.io.Resource; +import org.springframework.util.Assert; + +public class MyEnvironmentPostProcessor implements EnvironmentPostProcessor { + + private final YamlPropertySourceLoader loader = new YamlPropertySourceLoader(); + + @Override + public void postProcessEnvironment(ConfigurableEnvironment environment, SpringApplication application) { + Resource path = new ClassPathResource("com/example/myapp/config.yml"); + PropertySource propertySource = loadYaml(path); + environment.getPropertySources().addLast(propertySource); + } + + private PropertySource loadYaml(Resource path) { + Assert.isTrue(path.exists(), () -> "Resource " + path + " does not exist"); + try { + return this.loader.load("custom-resource", path).get(0); + } + catch (IOException ex) { + throw new IllegalStateException("Failed to load yaml configuration from " + path, ex); + } + } + +} + +``` + +| |The `Environment` has already been prepared with all the usual property sources that Spring Boot loads by default.
It is therefore possible to get the location of the file from the environment.
The preceding example adds the `custom-resource` property source at the end of the list so that a key defined in any of the usual other locations takes precedence.
A custom implementation may define another order.| +|---|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +| |While using `@PropertySource` on your `@SpringBootApplication` may seem to be a convenient way to load a custom resource in the `Environment`, we do not recommend it.
Such property sources are not added to the `Environment` until the application context is being refreshed.
This is too late to configure certain properties such as `logging.*` and `spring.main.*` which are read before refresh begins.| +|---|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +### 1.4. Build an ApplicationContext Hierarchy (Adding a Parent or Root Context) ### + +You can use the `ApplicationBuilder` class to create parent/child `ApplicationContext` hierarchies. +See “[features.html](features.html#features.spring-application.fluent-builder-api)” in the ‘Spring Boot features’ section for more information. + +### 1.5. Create a Non-web Application + +Not all Spring applications have to be web applications (or web services). +If you want to execute some code in a `main` method but also bootstrap a Spring application to set up the infrastructure to use, you can use the `SpringApplication` features of Spring Boot. +A `SpringApplication` changes its `ApplicationContext` class, depending on whether it thinks it needs a web application or not. +The first thing you can do to help it is to leave server-related dependencies (such as the servlet API) off the classpath. +If you cannot do that (for example, you run two applications from the same code base) then you can explicitly call `setWebApplicationType(WebApplicationType.NONE)` on your `SpringApplication` instance or set the `applicationContextClass` property (through the Java API or with external properties). +Application code that you want to run as your business logic can be implemented as a `CommandLineRunner` and dropped into the context as a `@Bean` definition. + +## 2. Properties and Configuration + +This section includes topics about setting and reading properties and configuration settings and their interaction with Spring Boot applications. + +### 2.1. Automatically Expand Properties at Build Time + +Rather than hardcoding some properties that are also specified in your project’s build configuration, you can automatically expand them by instead using the existing build configuration. +This is possible in both Maven and Gradle. + +#### 2.1.1. Automatic Property Expansion Using Maven #### + +You can automatically expand properties from the Maven project by using resource filtering. +If you use the `spring-boot-starter-parent`, you can then refer to your Maven ‘project properties’ with `@[[email protected]](/cdn-cgi/l/email-protection)` placeholders, as shown in the following example: + +Properties + +``` +[email protected]@ +[email protected]@ +``` + +Yaml + +``` +app: + encoding: "@[email protected]" + java: + version: "@[email protected]" +``` + +| |Only production configuration is filtered that way (in other words, no filtering is applied on `src/test/resources`).| +|---|---------------------------------------------------------------------------------------------------------------------| + +| |If you enable the `addResources` flag, the `spring-boot:run` goal can add `src/main/resources` directly to the classpath (for hot reloading purposes).
Doing so circumvents the resource filtering and this feature.
Instead, you can use the `exec:java` goal or customize the plugin’s configuration.
See the [plugin usage page](https://docs.spring.io/spring-boot/docs/2.6.4/maven-plugin/reference/htmlsingle/#getting-started) for more details.| +|---|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +If you do not use the starter parent, you need to include the following element inside the `` element of your `pom.xml`: + +``` + + + src/main/resources + true + + +``` + +You also need to include the following element inside ``: + +``` + + org.apache.maven.plugins + maven-resources-plugin + 2.7 + + + @ + + false + + +``` + +| |The `useDefaultDelimiters` property is important if you use standard Spring placeholders (such as `${placeholder}`) in your configuration.
If that property is not set to `false`, these may be expanded by the build.| +|---|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +#### 2.1.2. Automatic Property Expansion Using Gradle #### + +You can automatically expand properties from the Gradle project by configuring the Java plugin’s `processResources` task to do so, as shown in the following example: + +``` +processResources { + expand(project.properties) +} +``` + +You can then refer to your Gradle project’s properties by using placeholders, as shown in the following example: + +Properties + +``` +app.name=${name} +app.description=${description} +``` + +Yaml + +``` +app: + name: "${name}" + description: "${description}" +``` + +| |Gradle’s `expand` method uses Groovy’s `SimpleTemplateEngine`, which transforms `${..}` tokens.
The `${..}` style conflicts with Spring’s own property placeholder mechanism.
To use Spring property placeholders together with automatic expansion, escape the Spring property placeholders as follows: `\${..}`.| +|---|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +### 2.2. Externalize the Configuration of SpringApplication ### + +A `SpringApplication` has bean property setters, so you can use its Java API as you create the application to modify its behavior. +Alternatively, you can externalize the configuration by setting properties in `spring.main.*`. +For example, in `application.properties`, you might have the following settings: + +Properties + +``` +spring.main.web-application-type=none +spring.main.banner-mode=off +``` + +Yaml + +``` +spring: + main: + web-application-type: "none" + banner-mode: "off" +``` + +Then the Spring Boot banner is not printed on startup, and the application is not starting an embedded web server. + +Properties defined in external configuration override and replace the values specified with the Java API, with the notable exception of the primary sources. +Primary sources are those provided to the `SpringApplication` constructor: + +``` +import org.springframework.boot.Banner; +import org.springframework.boot.SpringApplication; +import org.springframework.boot.autoconfigure.SpringBootApplication; + +@SpringBootApplication +public class MyApplication { + + public static void main(String[] args) { + SpringApplication application = new SpringApplication(MyApplication.class); + application.setBannerMode(Banner.Mode.OFF); + application.run(args); + } + +} + +``` + +Or to `sources(…​)` method of a `SpringApplicationBuilder`: + +``` +import org.springframework.boot.Banner; +import org.springframework.boot.builder.SpringApplicationBuilder; + +public class MyApplication { + + public static void main(String[] args) { + new SpringApplicationBuilder() + .bannerMode(Banner.Mode.OFF) + .sources(MyApplication.class) + .run(args); + } + +} + +``` + +Given the examples above, if we have the following configuration: + +Properties + +``` +spring.main.sources=com.example.MyDatabaseConfig,com.example.MyJmsConfig +spring.main.banner-mode=console +``` + +Yaml + +``` +spring: + main: + sources: "com.example.MyDatabaseConfig,com.example.MyJmsConfig" + banner-mode: "console" +``` + +The actual application will show the banner (as overridden by configuration) and uses three sources for the `ApplicationContext`. +The application sources are: + +1. `MyApplication` (from the code) + +2. `MyDatabaseConfig` (from the external config) + +3. `MyJmsConfig`(from the external config) + +### 2.3. Change the Location of External Properties of an Application ### + +By default, properties from different sources are added to the Spring `Environment` in a defined order (see “[features.html](features.html#features.external-config)” in the ‘Spring Boot features’ section for the exact order). + +You can also provide the following System properties (or environment variables) to change the behavior: + +* `spring.config.name` (`SPRING_CONFIG_NAME`): Defaults to `application` as the root of the file name. + +* `spring.config.location` (`SPRING_CONFIG_LOCATION`): The file to load (such as a classpath resource or a URL). + A separate `Environment` property source is set up for this document and it can be overridden by system properties, environment variables, or the command line. + +No matter what you set in the environment, Spring Boot always loads `application.properties` as described above. +By default, if YAML is used, then files with the ‘.yml’ extension are also added to the list. + +Spring Boot logs the configuration files that are loaded at the `DEBUG` level and the candidates it has not found at `TRACE` level. + +See [`ConfigFileApplicationListener`](https://github.com/spring-projects/spring-boot/tree/v2.6.4/spring-boot-project/spring-boot/src/main/java/org/springframework/boot/context/config/ConfigFileApplicationListener.java) for more detail. + +### 2.4. Use ‘Short’ Command Line Arguments + +Some people like to use (for example) `--port=9000` instead of `--server.port=9000` to set configuration properties on the command line. +You can enable this behavior by using placeholders in `application.properties`, as shown in the following example: + +Properties + +``` +server.port=${port:8080} +``` + +Yaml + +``` +server: + port: "${port:8080}" +``` + +| |If you inherit from the `spring-boot-starter-parent` POM, the default filter token of the `maven-resources-plugins` has been changed from `${*}` to `@` (that is, `@[[email protected]](/cdn-cgi/l/email-protection)` instead of `${maven.token}`) to prevent conflicts with Spring-style placeholders.
If you have enabled Maven filtering for the `application.properties` directly, you may want to also change the default filter token to use [other delimiters](https://maven.apache.org/plugins/maven-resources-plugin/resources-mojo.html#delimiters).| +|---|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +| |In this specific case, the port binding works in a PaaS environment such as Heroku or Cloud Foundry.
In those two platforms, the `PORT` environment variable is set automatically and Spring can bind to capitalized synonyms for `Environment` properties.| +|---|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +### 2.5. Use YAML for External Properties + +YAML is a superset of JSON and, as such, is a convenient syntax for storing external properties in a hierarchical format, as shown in the following example: + +``` +spring: + application: + name: "cruncher" + datasource: + driver-class-name: "com.mysql.jdbc.Driver" + url: "jdbc:mysql://localhost/test" +server: + port: 9000 +``` + +Create a file called `application.yml` and put it in the root of your classpath. +Then add `snakeyaml` to your dependencies (Maven coordinates `org.yaml:snakeyaml`, already included if you use the `spring-boot-starter`). +A YAML file is parsed to a Java `Map` (like a JSON object), and Spring Boot flattens the map so that it is one level deep and has period-separated keys, as many people are used to with `Properties` files in Java. + +The preceding example YAML corresponds to the following `application.properties` file: + +``` +spring.application.name=cruncher +spring.datasource.driver-class-name=com.mysql.jdbc.Driver +spring.datasource.url=jdbc:mysql://localhost/test +server.port=9000 +``` + +See “[features.html](features.html#features.external-config.yaml)” in the ‘Spring Boot features’ section for more information about YAML. + +### 2.6. Set the Active Spring Profiles + +The Spring `Environment` has an API for this, but you would normally set a System property (`spring.profiles.active`) or an OS environment variable (`SPRING_PROFILES_ACTIVE`). +Also, you can launch your application with a `-D` argument (remember to put it before the main class or jar archive), as follows: + +``` +$ java -jar -Dspring.profiles.active=production demo-0.0.1-SNAPSHOT.jar +``` + +In Spring Boot, you can also set the active profile in `application.properties`, as shown in the following example: + +Properties + +``` +spring.profiles.active=production +``` + +Yaml + +``` +spring: + profiles: + active: "production" +``` + +A value set this way is replaced by the System property or environment variable setting but not by the `SpringApplicationBuilder.profiles()` method. +Thus, the latter Java API can be used to augment the profiles without changing the defaults. + +See “[features.html](features.html#features.profiles)” in the “Spring Boot features” section for more information. + +### 2.7. Set the Default Profile Name + +The default profile is a profile that is enabled if no profile is active. +By default, the name of the default profile is `default`, but it could be changed using a System property (`spring.profiles.default`) or an OS environment variable (`SPRING_PROFILES_DEFAULT`). + +In Spring Boot, you can also set the default profile name in `application.properties`, as shown in the following example: + +Properties + +``` +spring.profiles.default=dev +``` + +Yaml + +``` +spring: + profiles: + default: "dev" +``` + +See “[features.html](features.html#features.profiles)” in the “Spring Boot features” section for more information. + +### 2.8. Change Configuration Depending on the Environment ### + +Spring Boot supports multi-document YAML and Properties files (see [features.html](features.html#features.external-config.files.multi-document) for details) which can be activated conditionally based on the active profiles. + +If a document contains a `spring.config.activate.on-profile` key, then the profiles value (a comma-separated list of profiles or a profile expression) is fed into the Spring `Environment.acceptsProfiles()` method. +If the profile expression matches then that document is included in the final merge (otherwise, it is not), as shown in the following example: + +Properties + +``` +server.port=9000 +#--- +spring.config.activate.on-profile=development +server.port=9001 +#--- +spring.config.activate.on-profile=production +server.port=0 +``` + +Yaml + +``` +server: + port: 9000 +--- +spring: + config: + activate: + on-profile: "development" +server: + port: 9001 +--- +spring: + config: + activate: + on-profile: "production" +server: + port: 0 +``` + +In the preceding example, the default port is 9000. +However, if the Spring profile called ‘development’ is active, then the port is 9001. +If ‘production’ is active, then the port is 0. + +| |The documents are merged in the order in which they are encountered.
Later values override earlier values.| +|---|--------------------------------------------------------------------------------------------------------------| + +### 2.9. Discover Built-in Options for External Properties ### + +Spring Boot binds external properties from `application.properties` (or `.yml` files and other places) into an application at runtime. +There is not (and technically cannot be) an exhaustive list of all supported properties in a single location, because contributions can come from additional jar files on your classpath. + +A running application with the Actuator features has a `configprops` endpoint that shows all the bound and bindable properties available through `@ConfigurationProperties`. + +The appendix includes an [`application.properties`](application-properties.html#appendix.application-properties) example with a list of the most common properties supported by Spring Boot. +The definitive list comes from searching the source code for `@ConfigurationProperties` and `@Value` annotations as well as the occasional use of `Binder`. +For more about the exact ordering of loading properties, see "[features.html](features.html#features.external-config)". + +## 3. Embedded Web Servers + +Each Spring Boot web application includes an embedded web server. +This feature leads to a number of how-to questions, including how to change the embedded server and how to configure the embedded server. +This section answers those questions. + +### 3.1. Use Another Web Server + +Many Spring Boot starters include default embedded containers. + +* For servlet stack applications, the `spring-boot-starter-web` includes Tomcat by including `spring-boot-starter-tomcat`, but you can use `spring-boot-starter-jetty` or `spring-boot-starter-undertow` instead. + +* For reactive stack applications, the `spring-boot-starter-webflux` includes Reactor Netty by including `spring-boot-starter-reactor-netty`, but you can use `spring-boot-starter-tomcat`, `spring-boot-starter-jetty`, or `spring-boot-starter-undertow` instead. + +When switching to a different HTTP server, you need to swap the default dependencies for those that you need instead. +To help with this process, Spring Boot provides a separate starter for each of the supported HTTP servers. + +The following Maven example shows how to exclude Tomcat and include Jetty for Spring MVC: + +``` + + 3.1.0 + + + org.springframework.boot + spring-boot-starter-web + + + + org.springframework.boot + spring-boot-starter-tomcat + + + + + + org.springframework.boot + spring-boot-starter-jetty + +``` + +| |The version of the servlet API has been overridden as, unlike Tomcat 9 and Undertow 2, Jetty 9.4 does not support servlet 4.0.
If you wish to use Jetty 10, which does support servlet 4.0, override the `jetty.version` property rather than the `servlet-api.version` property.| +|---|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +The following Gradle example configures the necessary dependencies and a [module replacement](https://docs.gradle.org/current/userguide/resolution_rules.html#sec:module_replacement) to use Undertow in place of Reactor Netty for Spring WebFlux: + +``` +dependencies { + implementation "org.springframework.boot:spring-boot-starter-undertow" + implementation "org.springframework.boot:spring-boot-starter-webflux" + modules { + module("org.springframework.boot:spring-boot-starter-reactor-netty") { + replacedBy("org.springframework.boot:spring-boot-starter-undertow", "Use Undertow instead of Reactor Netty") + } + } +} +``` + +| |`spring-boot-starter-reactor-netty` is required to use the `WebClient` class, so you may need to keep a dependency on Netty even when you need to include a different HTTP server.| +|---|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +### 3.2. Disabling the Web Server + +If your classpath contains the necessary bits to start a web server, Spring Boot will automatically start it. +To disable this behavior configure the `WebApplicationType` in your `application.properties`, as shown in the following example: + +Properties + +``` +spring.main.web-application-type=none +``` + +Yaml + +``` +spring: + main: + web-application-type: "none" +``` + +### 3.3. Change the HTTP Port + +In a standalone application, the main HTTP port defaults to `8080` but can be set with `server.port` (for example, in `application.properties` or as a System property). +Thanks to relaxed binding of `Environment` values, you can also use `SERVER_PORT` (for example, as an OS environment variable). + +To switch off the HTTP endpoints completely but still create a `WebApplicationContext`, use `server.port=-1` (doing so is sometimes useful for testing). + +For more details, see “[web.html](web.html#web.servlet.embedded-container.customizing)” in the ‘Spring Boot Features’ section, or the [`ServerProperties`](https://github.com/spring-projects/spring-boot/tree/v2.6.4/spring-boot-project/spring-boot-autoconfigure/src/main/java/org/springframework/boot/autoconfigure/web/ServerProperties.java) source code. + +### 3.4. Use a Random Unassigned HTTP Port + +To scan for a free port (using OS natives to prevent clashes) use `server.port=0`. + +### 3.5. Discover the HTTP Port at Runtime + +You can access the port the server is running on from log output or from the `WebServerApplicationContext` through its `WebServer`. +The best way to get that and be sure it has been initialized is to add a `@Bean` of type `ApplicationListener` and pull the container out of the event when it is published. + +Tests that use `@SpringBootTest(webEnvironment=WebEnvironment.RANDOM_PORT)` can also inject the actual port into a field by using the `@LocalServerPort` annotation, as shown in the following example: + +``` +import org.springframework.boot.test.context.SpringBootTest; +import org.springframework.boot.test.context.SpringBootTest.WebEnvironment; +import org.springframework.boot.web.server.LocalServerPort; + +@SpringBootTest(webEnvironment = WebEnvironment.RANDOM_PORT) +public class MyWebIntegrationTests { + + @LocalServerPort + int port; + + // ... + +} + +``` + +| |`@LocalServerPort` is a meta-annotation for `@Value("${local.server.port}")`.
Do not try to inject the port in a regular application.
As we just saw, the value is set only after the container has been initialized.
Contrary to a test, application code callbacks are processed early (before the value is actually available).| +|---|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +### 3.6. Enable HTTP Response Compression + +HTTP response compression is supported by Jetty, Tomcat, and Undertow. +It can be enabled in `application.properties`, as follows: + +Properties + +``` +server.compression.enabled=true +``` + +Yaml + +``` +server: + compression: + enabled: true +``` + +By default, responses must be at least 2048 bytes in length for compression to be performed. +You can configure this behavior by setting the `server.compression.min-response-size` property. + +By default, responses are compressed only if their content type is one of the following: + +* `text/html` + +* `text/xml` + +* `text/plain` + +* `text/css` + +* `text/javascript` + +* `application/javascript` + +* `application/json` + +* `application/xml` + +You can configure this behavior by setting the `server.compression.mime-types` property. + +### 3.7. Configure SSL + +SSL can be configured declaratively by setting the various `server.ssl.*` properties, typically in `application.properties` or `application.yml`. +The following example shows setting SSL properties in `application.properties`: + +Properties + +``` +server.port=8443 +server.ssl.key-store=classpath:keystore.jks +server.ssl.key-store-password=secret +server.ssl.key-password=another-secret +``` + +Yaml + +``` +server: + port: 8443 + ssl: + key-store: "classpath:keystore.jks" + key-store-password: "secret" + key-password: "another-secret" +``` + +See [`Ssl`](https://github.com/spring-projects/spring-boot/tree/v2.6.4/spring-boot-project/spring-boot/src/main/java/org/springframework/boot/web/server/Ssl.java) for details of all of the supported properties. + +Using configuration such as the preceding example means the application no longer supports a plain HTTP connector at port 8080. +Spring Boot does not support the configuration of both an HTTP connector and an HTTPS connector through `application.properties`. +If you want to have both, you need to configure one of them programmatically. +We recommend using `application.properties` to configure HTTPS, as the HTTP connector is the easier of the two to configure programmatically. + +### 3.8. Configure HTTP/2 + +You can enable HTTP/2 support in your Spring Boot application with the `server.http2.enabled` configuration property. +Both `h2` (HTTP/2 over TLS) and `h2c` (HTTP/2 over TCP) are supported. +To use `h2`, SSL must also be enabled. +When SSL is not enabled, `h2c` will be used. +The details of the `h2` support depend on the chosen web server and the application environment, since that protocol is not supported out-of-the-box by all JDK 8 releases. + +#### 3.8.1. HTTP/2 with Tomcat + +Spring Boot ships by default with Tomcat 9.0.x which supports `h2c` out of the box and `h2` out of the box when using JDK 9 or later. +Alternatively, `h2` can be used on JDK 8 if the `libtcnative` library and its dependencies are installed on the host operating system. + +The library directory must be made available, if not already, to the JVM library path. +You can do so with a JVM argument such as `-Djava.library.path=/usr/local/opt/tomcat-native/lib`. +More on this in the [official Tomcat documentation](https://tomcat.apache.org/tomcat-9.0-doc/apr.html). + +Starting Tomcat 9.0.x on JDK 8 with HTTP/2 and SSL enabled but without that native support logs the following error: + +``` +ERROR 8787 --- [ main] o.a.coyote.http11.Http11NioProtocol : The upgrade handler [org.apache.coyote.http2.Http2Protocol] for [h2] only supports upgrade via ALPN but has been configured for the ["https-jsse-nio-8443"] connector that does not support ALPN. +``` + +This error is not fatal, and the application still starts with HTTP/1.1 SSL support. + +#### 3.8.2. HTTP/2 with Jetty + +For HTTP/2 support, Jetty requires the additional `org.eclipse.jetty.http2:http2-server` dependency. +To use `h2c` no other dependencies are required. +To use `h2`, you also need to choose one of the following dependencies, depending on your deployment: + +* `org.eclipse.jetty:jetty-alpn-java-server` for applications running on JDK9+ + +* `org.eclipse.jetty:jetty-alpn-openjdk8-server` for applications running on JDK8u252+ + +* `org.eclipse.jetty:jetty-alpn-conscrypt-server` and the [Conscrypt library](https://www.conscrypt.org/) with no JDK requirement + +#### 3.8.3. HTTP/2 with Reactor Netty + +The `spring-boot-webflux-starter` is using by default Reactor Netty as a server. +Reactor Netty supports `h2c` using JDK 8 or later with no additional dependencies. +Reactor Netty supports `h2` using the JDK support with JDK 9 or later. +For JDK 8 environments, or for optimal runtime performance, this server also supports `h2` with native libraries. +To enable that, your application needs to have an additional dependency. + +Spring Boot manages the version for the `io.netty:netty-tcnative-boringssl-static` "uber jar", containing native libraries for all platforms. +Developers can choose to import only the required dependencies using a classifier (see [the Netty official documentation](https://netty.io/wiki/forked-tomcat-native.html)). + +#### 3.8.4. HTTP/2 with Undertow + +As of Undertow 1.4.0+, both `h2` and `h2c` are supported on JDK 8 without any additional dependencies. + +### 3.9. Configure the Web Server + +Generally, you should first consider using one of the many available configuration keys and customize your web server by adding new entries in your `application.properties` or `application.yml` file. +See “[Discover Built-in Options for External Properties](#howto.properties-and-configuration.discover-build-in-options-for-external-properties)”). +The `server.*` namespace is quite useful here, and it includes namespaces like `server.tomcat.*`, `server.jetty.*` and others, for server-specific features. +See the list of [application-properties.html](application-properties.html#appendix.application-properties). + +The previous sections covered already many common use cases, such as compression, SSL or HTTP/2. +However, if a configuration key does not exist for your use case, you should then look at [`WebServerFactoryCustomizer`](https://docs.spring.io/spring-boot/docs/2.6.4/api/org/springframework/boot/web/server/WebServerFactoryCustomizer.html). +You can declare such a component and get access to the server factory relevant to your choice: you should select the variant for the chosen Server (Tomcat, Jetty, Reactor Netty, Undertow) and the chosen web stack (servlet or reactive). + +The example below is for Tomcat with the `spring-boot-starter-web` (servlet stack): + +``` +import org.springframework.boot.web.embedded.tomcat.TomcatServletWebServerFactory; +import org.springframework.boot.web.server.WebServerFactoryCustomizer; +import org.springframework.stereotype.Component; + +@Component +public class MyTomcatWebServerCustomizer implements WebServerFactoryCustomizer { + + @Override + public void customize(TomcatServletWebServerFactory factory) { + // customize the factory here + } + +} + +``` + +| |Spring Boot uses that infrastructure internally to auto-configure the server.
Auto-configured `WebServerFactoryCustomizer` beans have an order of `0` and will be processed before any user-defined customizers, unless it has an explicit order that states otherwise.| +|---|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +Once you have got access to a `WebServerFactory` using the customizer, you can use it to configure specific parts, like connectors, server resources, or the server itself - all using server-specific APIs. + +In addition Spring Boot provides: + +| Server | Servlet stack | Reactive stack | +|--------|---------------------------------|----------------------------------| +| Tomcat | `TomcatServletWebServerFactory` | `TomcatReactiveWebServerFactory` | +| Jetty | `JettyServletWebServerFactory` | `JettyReactiveWebServerFactory` | +|Undertow|`UndertowServletWebServerFactory`|`UndertowReactiveWebServerFactory`| +|Reactor | N/A | `NettyReactiveWebServerFactory` | + +As a last resort, you can also declare your own `WebServerFactory` bean, which will override the one provided by Spring Boot. +When you do so, auto-configured customizers are still applied on your custom factory, so use that option carefully. + +### 3.10. Add a Servlet, Filter, or Listener to an Application + +In a servlet stack application, that is with the `spring-boot-starter-web`, there are two ways to add `Servlet`, `Filter`, `ServletContextListener`, and the other listeners supported by the Servlet API to your application: + +* [Add a Servlet, Filter, or Listener by Using a Spring Bean](#howto.webserver.add-servlet-filter-listener.spring-bean) + +* [Add Servlets, Filters, and Listeners by Using Classpath Scanning](#howto.webserver.add-servlet-filter-listener.using-scanning) + +#### 3.10.1. Add a Servlet, Filter, or Listener by Using a Spring Bean #### + +To add a `Servlet`, `Filter`, or servlet `*Listener` by using a Spring bean, you must provide a `@Bean` definition for it. +Doing so can be very useful when you want to inject configuration or dependencies. +However, you must be very careful that they do not cause eager initialization of too many other beans, because they have to be installed in the container very early in the application lifecycle. +(For example, it is not a good idea to have them depend on your `DataSource` or JPA configuration.) +You can work around such restrictions by initializing the beans lazily when first used instead of on initialization. + +In the case of filters and servlets, you can also add mappings and init parameters by adding a `FilterRegistrationBean` or a `ServletRegistrationBean` instead of or in addition to the underlying component. + +| |If no `dispatcherType` is specified on a filter registration, `REQUEST` is used.
This aligns with the servlet specification’s default dispatcher type.| +|---|----------------------------------------------------------------------------------------------------------------------------------------------------------| + +Like any other Spring bean, you can define the order of servlet filter beans; please make sure to check the “[web.html](web.html#web.servlet.embedded-container.servlets-filters-listeners.beans)” section. + +##### Disable Registration of a Servlet or Filter ##### + +As [described earlier](#howto.webserver.add-servlet-filter-listener.spring-bean), any `Servlet` or `Filter` beans are registered with the servlet container automatically. +To disable registration of a particular `Filter` or `Servlet` bean, create a registration bean for it and mark it as disabled, as shown in the following example: + +``` +import org.springframework.boot.web.servlet.FilterRegistrationBean; +import org.springframework.context.annotation.Bean; +import org.springframework.context.annotation.Configuration; + +@Configuration(proxyBeanMethods = false) +public class MyFilterConfiguration { + + @Bean + public FilterRegistrationBean registration(MyFilter filter) { + FilterRegistrationBean registration = new FilterRegistrationBean<>(filter); + registration.setEnabled(false); + return registration; + } + +} + +``` + +#### 3.10.2. Add Servlets, Filters, and Listeners by Using Classpath Scanning #### + +`@WebServlet`, `@WebFilter`, and `@WebListener` annotated classes can be automatically registered with an embedded servlet container by annotating a `@Configuration` class with `@ServletComponentScan` and specifying the package(s) containing the components that you want to register. +By default, `@ServletComponentScan` scans from the package of the annotated class. + +### 3.11. Configure Access Logging + +Access logs can be configured for Tomcat, Undertow, and Jetty through their respective namespaces. + +For instance, the following settings log access on Tomcat with a [custom pattern](https://tomcat.apache.org/tomcat-9.0-doc/config/valve.html#Access_Logging). + +Properties + +``` +server.tomcat.basedir=my-tomcat +server.tomcat.accesslog.enabled=true +server.tomcat.accesslog.pattern=%t %a %r %s (%D ms) +``` + +Yaml + +``` +server: + tomcat: + basedir: "my-tomcat" + accesslog: + enabled: true + pattern: "%t %a %r %s (%D ms)" +``` + +| |The default location for logs is a `logs` directory relative to the Tomcat base directory.
By default, the `logs` directory is a temporary directory, so you may want to fix Tomcat’s base directory or use an absolute path for the logs.
In the preceding example, the logs are available in `my-tomcat/logs` relative to the working directory of the application.| +|---|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +Access logging for Undertow can be configured in a similar fashion, as shown in the following example: + +Properties + +``` +server.undertow.accesslog.enabled=true +server.undertow.accesslog.pattern=%t %a %r %s (%D ms) +``` + +Yaml + +``` +server: + undertow: + accesslog: + enabled: true + pattern: "%t %a %r %s (%D ms)" +``` + +Logs are stored in a `logs` directory relative to the working directory of the application. +You can customize this location by setting the `server.undertow.accesslog.dir` property. + +Finally, access logging for Jetty can also be configured as follows: + +Properties + +``` +server.jetty.accesslog.enabled=true +server.jetty.accesslog.filename=/var/log/jetty-access.log +``` + +Yaml + +``` +server: + jetty: + accesslog: + enabled: true + filename: "/var/log/jetty-access.log" +``` + +By default, logs are redirected to `System.err`. +For more details, see the Jetty documentation. + +### 3.12. Running Behind a Front-end Proxy Server + +If your application is running behind a proxy, a load-balancer or in the cloud, the request information (like the host, port, scheme…​) might change along the way. +Your application may be running on `10.10.10.10:8080`, but HTTP clients should only see `example.org`. + +[RFC7239 "Forwarded Headers"](https://tools.ietf.org/html/rfc7239) defines the `Forwarded` HTTP header; proxies can use this header to provide information about the original request. +You can configure your application to read those headers and automatically use that information when creating links and sending them to clients in HTTP 302 responses, JSON documents or HTML pages. +There are also non-standard headers, like `X-Forwarded-Host`, `X-Forwarded-Port`, `X-Forwarded-Proto`, `X-Forwarded-Ssl`, and `X-Forwarded-Prefix`. + +If the proxy adds the commonly used `X-Forwarded-For` and `X-Forwarded-Proto` headers, setting `server.forward-headers-strategy` to `NATIVE` is enough to support those. +With this option, the Web servers themselves natively support this feature; you can check their specific documentation to learn about specific behavior. + +If this is not enough, Spring Framework provides a [ForwardedHeaderFilter](https://docs.spring.io/spring-framework/docs/5.3.16/reference/html/web.html#filters-forwarded-headers). +You can register it as a servlet filter in your application by setting `server.forward-headers-strategy` is set to `FRAMEWORK`. + +| |If you are using Tomcat and terminating SSL at the proxy, `server.tomcat.redirect-context-root` should be set to `false`.
This allows the `X-Forwarded-Proto` header to be honored before any redirects are performed.| +|---|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +| |If your application runs in Cloud Foundry or Heroku, the `server.forward-headers-strategy` property defaults to `NATIVE`.
In all other instances, it defaults to `NONE`.| +|---|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +#### 3.12.1. Customize Tomcat’s Proxy Configuration + +If you use Tomcat, you can additionally configure the names of the headers used to carry “forwarded” information, as shown in the following example: + +Properties + +``` +server.tomcat.remoteip.remote-ip-header=x-your-remote-ip-header +server.tomcat.remoteip.protocol-header=x-your-protocol-header +``` + +Yaml + +``` +server: + tomcat: + remoteip: + remote-ip-header: "x-your-remote-ip-header" + protocol-header: "x-your-protocol-header" +``` + +Tomcat is also configured with a default regular expression that matches internal proxies that are to be trusted. +By default, IP addresses in `10/8`, `192.168/16`, `169.254/16` and `127/8` are trusted. +You can customize the valve’s configuration by adding an entry to `application.properties`, as shown in the following example: + +Properties + +``` +server.tomcat.remoteip.internal-proxies=192\\.168\\.\\d{1,3}\\.\\d{1,3} +``` + +Yaml + +``` +server: + tomcat: + remoteip: + internal-proxies: "192\\.168\\.\\d{1,3}\\.\\d{1,3}" +``` + +| |You can trust all proxies by setting the `internal-proxies` to empty (but do not do so in production).| +|---|------------------------------------------------------------------------------------------------------| + +You can take complete control of the configuration of Tomcat’s `RemoteIpValve` by switching the automatic one off (to do so, set `server.forward-headers-strategy=NONE`) and adding a new valve instance using a `WebServerFactoryCustomizer` bean. + +### 3.13. Enable Multiple Connectors with Tomcat + +You can add an `org.apache.catalina.connector.Connector` to the `TomcatServletWebServerFactory`, which can allow multiple connectors, including HTTP and HTTPS connectors, as shown in the following example: + +``` +import java.io.IOException; +import java.net.URL; + +import org.apache.catalina.connector.Connector; +import org.apache.coyote.http11.Http11NioProtocol; + +import org.springframework.boot.web.embedded.tomcat.TomcatServletWebServerFactory; +import org.springframework.boot.web.server.WebServerFactoryCustomizer; +import org.springframework.context.annotation.Bean; +import org.springframework.context.annotation.Configuration; +import org.springframework.util.ResourceUtils; + +@Configuration(proxyBeanMethods = false) +public class MyTomcatConfiguration { + + @Bean + public WebServerFactoryCustomizer sslConnectorCustomizer() { + return (tomcat) -> tomcat.addAdditionalTomcatConnectors(createSslConnector()); + } + + private Connector createSslConnector() { + Connector connector = new Connector("org.apache.coyote.http11.Http11NioProtocol"); + Http11NioProtocol protocol = (Http11NioProtocol) connector.getProtocolHandler(); + try { + URL keystore = ResourceUtils.getURL("keystore"); + URL truststore = ResourceUtils.getURL("truststore"); + connector.setScheme("https"); + connector.setSecure(true); + connector.setPort(8443); + protocol.setSSLEnabled(true); + protocol.setKeystoreFile(keystore.toString()); + protocol.setKeystorePass("changeit"); + protocol.setTruststoreFile(truststore.toString()); + protocol.setTruststorePass("changeit"); + protocol.setKeyAlias("apitester"); + return connector; + } + catch (IOException ex) { + throw new IllegalStateException("Fail to create ssl connector", ex); + } + } + +} + +``` + +### 3.14. Use Tomcat’s LegacyCookieProcessor + +By default, the embedded Tomcat used by Spring Boot does not support "Version 0" of the Cookie format, so you may see the following error: + +``` +java.lang.IllegalArgumentException: An invalid character [32] was present in the Cookie value +``` + +If at all possible, you should consider updating your code to only store values compliant with later Cookie specifications. +If, however, you cannot change the way that cookies are written, you can instead configure Tomcat to use a `LegacyCookieProcessor`. +To switch to the `LegacyCookieProcessor`, use an `WebServerFactoryCustomizer` bean that adds a `TomcatContextCustomizer`, as shown in the following example: + +``` +import org.apache.tomcat.util.http.LegacyCookieProcessor; + +import org.springframework.boot.web.embedded.tomcat.TomcatServletWebServerFactory; +import org.springframework.boot.web.server.WebServerFactoryCustomizer; +import org.springframework.context.annotation.Bean; +import org.springframework.context.annotation.Configuration; + +@Configuration(proxyBeanMethods = false) +public class MyLegacyCookieProcessorConfiguration { + + @Bean + public WebServerFactoryCustomizer cookieProcessorCustomizer() { + return (factory) -> factory + .addContextCustomizers((context) -> context.setCookieProcessor(new LegacyCookieProcessor())); + } + +} + +``` + +### 3.15. Enable Tomcat’s MBean Registry + +Embedded Tomcat’s MBean registry is disabled by default. +This minimizes Tomcat’s memory footprint. +If you want to use Tomcat’s MBeans, for example so that they can be used by Micrometer to expose metrics, you must use the `server.tomcat.mbeanregistry.enabled` property to do so, as shown in the following example: + +Properties + +``` +server.tomcat.mbeanregistry.enabled=true +``` + +Yaml + +``` +server: + tomcat: + mbeanregistry: + enabled: true +``` + +### 3.16. Enable Multiple Listeners with Undertow + +Add an `UndertowBuilderCustomizer` to the `UndertowServletWebServerFactory` and add a listener to the `Builder`, as shown in the following example: + +``` +import io.undertow.Undertow.Builder; + +import org.springframework.boot.web.embedded.undertow.UndertowServletWebServerFactory; +import org.springframework.boot.web.server.WebServerFactoryCustomizer; +import org.springframework.context.annotation.Bean; +import org.springframework.context.annotation.Configuration; + +@Configuration(proxyBeanMethods = false) +public class MyUndertowConfiguration { + + @Bean + public WebServerFactoryCustomizer undertowListenerCustomizer() { + return (factory) -> factory.addBuilderCustomizers(this::addHttpListener); + } + + private Builder addHttpListener(Builder builder) { + return builder.addHttpListener(8080, "0.0.0.0"); + } + +} + +``` + +### 3.17. Create WebSocket Endpoints Using @ServerEndpoint ### + +If you want to use `@ServerEndpoint` in a Spring Boot application that used an embedded container, you must declare a single `ServerEndpointExporter` `@Bean`, as shown in the following example: + +``` +import org.springframework.context.annotation.Bean; +import org.springframework.context.annotation.Configuration; +import org.springframework.web.socket.server.standard.ServerEndpointExporter; + +@Configuration(proxyBeanMethods = false) +public class MyWebSocketConfiguration { + + @Bean + public ServerEndpointExporter serverEndpointExporter() { + return new ServerEndpointExporter(); + } + +} + +``` + +The bean shown in the preceding example registers any `@ServerEndpoint` annotated beans with the underlying WebSocket container. +When deployed to a standalone servlet container, this role is performed by a servlet container initializer, and the `ServerEndpointExporter` bean is not required. + +## 4. Spring MVC + +Spring Boot has a number of starters that include Spring MVC. +Note that some starters include a dependency on Spring MVC rather than include it directly. +This section answers common questions about Spring MVC and Spring Boot. + +### 4.1. Write a JSON REST Service + +Any Spring `@RestController` in a Spring Boot application should render JSON response by default as long as Jackson2 is on the classpath, as shown in the following example: + +``` +import org.springframework.web.bind.annotation.RequestMapping; +import org.springframework.web.bind.annotation.RestController; + +@RestController +public class MyController { + + @RequestMapping("/thing") + public MyThing thing() { + return new MyThing(); + } + +} + +``` + +As long as `MyThing` can be serialized by Jackson2 (true for a normal POJO or Groovy object), then `[localhost:8080/thing](http://localhost:8080/thing)` serves a JSON representation of it by default. +Note that, in a browser, you might sometimes see XML responses, because browsers tend to send accept headers that prefer XML. + +### 4.2. Write an XML REST Service + +If you have the Jackson XML extension (`jackson-dataformat-xml`) on the classpath, you can use it to render XML responses. +The previous example that we used for JSON would work. +To use the Jackson XML renderer, add the following dependency to your project: + +``` + + com.fasterxml.jackson.dataformat + jackson-dataformat-xml + +``` + +If Jackson’s XML extension is not available and JAXB is available, XML can be rendered with the additional requirement of having `MyThing` annotated as `@XmlRootElement`, as shown in the following example: + +``` +import javax.xml.bind.annotation.XmlRootElement; + +@XmlRootElement +public class MyThing { + + private String name; + + // getters/setters ... + + public String getName() { + return this.name; + } + + public void setName(String name) { + this.name = name; + } + +} + +``` + +JAXB is only available out of the box with Java 8. +If you use a more recent Java generation, add the following dependency to your project: + +``` + + org.glassfish.jaxb + jaxb-runtime + +``` + +| |To get the server to render XML instead of JSON, you might have to send an `Accept: text/xml` header (or use a browser).| +|---|------------------------------------------------------------------------------------------------------------------------| + +### 4.3. Customize the Jackson ObjectMapper + +Spring MVC (client and server side) uses `HttpMessageConverters` to negotiate content conversion in an HTTP exchange. +If Jackson is on the classpath, you already get the default converter(s) provided by `Jackson2ObjectMapperBuilder`, an instance of which is auto-configured for you. + +The `ObjectMapper` (or `XmlMapper` for Jackson XML converter) instance (created by default) has the following customized properties: + +* `MapperFeature.DEFAULT_VIEW_INCLUSION` is disabled + +* `DeserializationFeature.FAIL_ON_UNKNOWN_PROPERTIES` is disabled + +* `SerializationFeature.WRITE_DATES_AS_TIMESTAMPS` is disabled + +Spring Boot also has some features to make it easier to customize this behavior. + +You can configure the `ObjectMapper` and `XmlMapper` instances by using the environment. +Jackson provides an extensive suite of on/off features that can be used to configure various aspects of its processing. +These features are described in six enums (in Jackson) that map onto properties in the environment: + +| Enum | Property | Values | +|-------------------------------------------------------|-----------------------------------------------|--------------------------------------------------------------| +|`com.fasterxml.jackson.databind.DeserializationFeature`|`spring.jackson.deserialization.`| `true`, `false` | +| `com.fasterxml.jackson.core.JsonGenerator.Feature` | `spring.jackson.generator.` | `true`, `false` | +| `com.fasterxml.jackson.databind.MapperFeature` | `spring.jackson.mapper.` | `true`, `false` | +| `com.fasterxml.jackson.core.JsonParser.Feature` | `spring.jackson.parser.` | `true`, `false` | +| `com.fasterxml.jackson.databind.SerializationFeature` | `spring.jackson.serialization.` | `true`, `false` | +|`com.fasterxml.jackson.annotation.JsonInclude.Include` | `spring.jackson.default-property-inclusion` |`always`, `non_null`, `non_absent`, `non_default`, `non_empty`| + +For example, to enable pretty print, set `spring.jackson.serialization.indent_output=true`. +Note that, thanks to the use of [relaxed binding](features.html#features.external-config.typesafe-configuration-properties.relaxed-binding), the case of `indent_output` does not have to match the case of the corresponding enum constant, which is `INDENT_OUTPUT`. + +This environment-based configuration is applied to the auto-configured `Jackson2ObjectMapperBuilder` bean and applies to any mappers created by using the builder, including the auto-configured `ObjectMapper` bean. + +The context’s `Jackson2ObjectMapperBuilder` can be customized by one or more `Jackson2ObjectMapperBuilderCustomizer` beans. +Such customizer beans can be ordered (Boot’s own customizer has an order of 0), letting additional customization be applied both before and after Boot’s customization. + +Any beans of type `com.fasterxml.jackson.databind.Module` are automatically registered with the auto-configured `Jackson2ObjectMapperBuilder` and are applied to any `ObjectMapper` instances that it creates. +This provides a global mechanism for contributing custom modules when you add new features to your application. + +If you want to replace the default `ObjectMapper` completely, either define a `@Bean` of that type and mark it as `@Primary` or, if you prefer the builder-based approach, define a `Jackson2ObjectMapperBuilder` `@Bean`. +Note that, in either case, doing so disables all auto-configuration of the `ObjectMapper`. + +If you provide any `@Beans` of type `MappingJackson2HttpMessageConverter`, they replace the default value in the MVC configuration. +Also, a convenience bean of type `HttpMessageConverters` is provided (and is always available if you use the default MVC configuration). +It has some useful methods to access the default and user-enhanced message converters. + +See the “[Customize the @ResponseBody Rendering](#howto.spring-mvc.customize-responsebody-rendering)” section and the [`WebMvcAutoConfiguration`](https://github.com/spring-projects/spring-boot/tree/v2.6.4/spring-boot-project/spring-boot-autoconfigure/src/main/java/org/springframework/boot/autoconfigure/web/servlet/WebMvcAutoConfiguration.java) source code for more details. + +### 4.4. Customize the @ResponseBody Rendering + +Spring uses `HttpMessageConverters` to render `@ResponseBody` (or responses from `@RestController`). +You can contribute additional converters by adding beans of the appropriate type in a Spring Boot context. +If a bean you add is of a type that would have been included by default anyway (such as `MappingJackson2HttpMessageConverter` for JSON conversions), it replaces the default value. +A convenience bean of type `HttpMessageConverters` is provided and is always available if you use the default MVC configuration. +It has some useful methods to access the default and user-enhanced message converters (For example, it can be useful if you want to manually inject them into a custom `RestTemplate`). + +As in normal MVC usage, any `WebMvcConfigurer` beans that you provide can also contribute converters by overriding the `configureMessageConverters` method. +However, unlike with normal MVC, you can supply only additional converters that you need (because Spring Boot uses the same mechanism to contribute its defaults). +Finally, if you opt out of the Spring Boot default MVC configuration by providing your own `@EnableWebMvc` configuration, you can take control completely and do everything manually by using `getMessageConverters` from `WebMvcConfigurationSupport`. + +See the [`WebMvcAutoConfiguration`](https://github.com/spring-projects/spring-boot/tree/v2.6.4/spring-boot-project/spring-boot-autoconfigure/src/main/java/org/springframework/boot/autoconfigure/web/servlet/WebMvcAutoConfiguration.java) source code for more details. + +### 4.5. Handling Multipart File Uploads + +Spring Boot embraces the servlet 3 `javax.servlet.http.Part` API to support uploading files. +By default, Spring Boot configures Spring MVC with a maximum size of 1MB per file and a maximum of 10MB of file data in a single request. +You may override these values, the location to which intermediate data is stored (for example, to the `/tmp` directory), and the threshold past which data is flushed to disk by using the properties exposed in the `MultipartProperties` class. +For example, if you want to specify that files be unlimited, set the `spring.servlet.multipart.max-file-size` property to `-1`. + +The multipart support is helpful when you want to receive multipart encoded file data as a `@RequestParam`-annotated parameter of type `MultipartFile` in a Spring MVC controller handler method. + +See the [`MultipartAutoConfiguration`](https://github.com/spring-projects/spring-boot/tree/v2.6.4/spring-boot-project/spring-boot-autoconfigure/src/main/java/org/springframework/boot/autoconfigure/web/servlet/MultipartAutoConfiguration.java) source for more details. + +| |It is recommended to use the container’s built-in support for multipart uploads rather than introducing an additional dependency such as Apache Commons File Upload.| +|---|--------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +### 4.6. Switch Off the Spring MVC DispatcherServlet + +By default, all content is served from the root of your application (`/`). +If you would rather map to a different path, you can configure one as follows: + +Properties + +``` +spring.mvc.servlet.path=/mypath +``` + +Yaml + +``` +spring: + mvc: + servlet: + path: "/mypath" +``` + +If you have additional servlets you can declare a `@Bean` of type `Servlet` or `ServletRegistrationBean` for each and Spring Boot will register them transparently to the container. +Because servlets are registered that way, they can be mapped to a sub-context of the `DispatcherServlet` without invoking it. + +Configuring the `DispatcherServlet` yourself is unusual but if you really need to do it, a `@Bean` of type `DispatcherServletPath` must be provided as well to provide the path of your custom `DispatcherServlet`. + +### 4.7. Switch off the Default MVC Configuration + +The easiest way to take complete control over MVC configuration is to provide your own `@Configuration` with the `@EnableWebMvc` annotation. +Doing so leaves all MVC configuration in your hands. + +### 4.8. Customize ViewResolvers + +A `ViewResolver` is a core component of Spring MVC, translating view names in `@Controller` to actual `View` implementations. +Note that `ViewResolvers` are mainly used in UI applications, rather than REST-style services (a `View` is not used to render a `@ResponseBody`). +There are many implementations of `ViewResolver` to choose from, and Spring on its own is not opinionated about which ones you should use. +Spring Boot, on the other hand, installs one or two for you, depending on what it finds on the classpath and in the application context. +The `DispatcherServlet` uses all the resolvers it finds in the application context, trying each one in turn until it gets a result. +If you add your own, you have to be aware of the order and in which position your resolver is added. + +`WebMvcAutoConfiguration` adds the following `ViewResolvers` to your context: + +* An `InternalResourceViewResolver` named ‘defaultViewResolver’. + This one locates physical resources that can be rendered by using the `DefaultServlet` (including static resources and JSP pages, if you use those). + It applies a prefix and a suffix to the view name and then looks for a physical resource with that path in the servlet context (the defaults are both empty but are accessible for external configuration through `spring.mvc.view.prefix` and `spring.mvc.view.suffix`). + You can override it by providing a bean of the same type. + +* A `BeanNameViewResolver` named ‘beanNameViewResolver’. + This is a useful member of the view resolver chain and picks up any beans with the same name as the `View` being resolved. + It should not be necessary to override or replace it. + +* A `ContentNegotiatingViewResolver` named ‘viewResolver’ is added only if there **are** actually beans of type `View` present. + This is a composite resolver, delegating to all the others and attempting to find a match to the ‘Accept’ HTTP header sent by the client. + There is a useful [blog about `ContentNegotiatingViewResolver`](https://spring.io/blog/2013/06/03/content-negotiation-using-views) that you might like to study to learn more, and you might also look at the source code for detail. + You can switch off the auto-configured `ContentNegotiatingViewResolver` by defining a bean named ‘viewResolver’. + +* If you use Thymeleaf, you also have a `ThymeleafViewResolver` named ‘thymeleafViewResolver’. + It looks for resources by surrounding the view name with a prefix and suffix. + The prefix is `spring.thymeleaf.prefix`, and the suffix is `spring.thymeleaf.suffix`. + The values of the prefix and suffix default to ‘classpath:/templates/’ and ‘.html’, respectively. + You can override `ThymeleafViewResolver` by providing a bean of the same name. + +* If you use FreeMarker, you also have a `FreeMarkerViewResolver` named ‘freeMarkerViewResolver’. + It looks for resources in a loader path (which is externalized to `spring.freemarker.templateLoaderPath` and has a default value of ‘classpath:/templates/’) by surrounding the view name with a prefix and a suffix. + The prefix is externalized to `spring.freemarker.prefix`, and the suffix is externalized to `spring.freemarker.suffix`. + The default values of the prefix and suffix are empty and ‘.ftlh’, respectively. + You can override `FreeMarkerViewResolver` by providing a bean of the same name. + +* If you use Groovy templates (actually, if `groovy-templates` is on your classpath), you also have a `GroovyMarkupViewResolver` named ‘groovyMarkupViewResolver’. + It looks for resources in a loader path by surrounding the view name with a prefix and suffix (externalized to `spring.groovy.template.prefix` and `spring.groovy.template.suffix`). + The prefix and suffix have default values of ‘classpath:/templates/’ and ‘.tpl’, respectively. + You can override `GroovyMarkupViewResolver` by providing a bean of the same name. + +* If you use Mustache, you also have a `MustacheViewResolver` named ‘mustacheViewResolver’. + It looks for resources by surrounding the view name with a prefix and suffix. + The prefix is `spring.mustache.prefix`, and the suffix is `spring.mustache.suffix`. + The values of the prefix and suffix default to ‘classpath:/templates/’ and ‘.mustache’, respectively. + You can override `MustacheViewResolver` by providing a bean of the same name. + +For more detail, see the following sections: + +* [`WebMvcAutoConfiguration`](https://github.com/spring-projects/spring-boot/tree/v2.6.4/spring-boot-project/spring-boot-autoconfigure/src/main/java/org/springframework/boot/autoconfigure/web/servlet/WebMvcAutoConfiguration.java) + +* [`ThymeleafAutoConfiguration`](https://github.com/spring-projects/spring-boot/tree/v2.6.4/spring-boot-project/spring-boot-autoconfigure/src/main/java/org/springframework/boot/autoconfigure/thymeleaf/ThymeleafAutoConfiguration.java) + +* [`FreeMarkerAutoConfiguration`](https://github.com/spring-projects/spring-boot/tree/v2.6.4/spring-boot-project/spring-boot-autoconfigure/src/main/java/org/springframework/boot/autoconfigure/freemarker/FreeMarkerAutoConfiguration.java) + +* [`GroovyTemplateAutoConfiguration`](https://github.com/spring-projects/spring-boot/tree/v2.6.4/spring-boot-project/spring-boot-autoconfigure/src/main/java/org/springframework/boot/autoconfigure/groovy/template/GroovyTemplateAutoConfiguration.java) + +## 5. Jersey + +### 5.1. Secure Jersey endpoints with Spring Security + +Spring Security can be used to secure a Jersey-based web application in much the same way as it can be used to secure a Spring MVC-based web application. +However, if you want to use Spring Security’s method-level security with Jersey, you must configure Jersey to use `setStatus(int)` rather `sendError(int)`. +This prevents Jersey from committing the response before Spring Security has had an opportunity to report an authentication or authorization failure to the client. + +The `jersey.config.server.response.setStatusOverSendError` property must be set to `true` on the application’s `ResourceConfig` bean, as shown in the following example: + +``` +import java.util.Collections; + +import org.glassfish.jersey.server.ResourceConfig; + +import org.springframework.stereotype.Component; + +@Component +public class JerseySetStatusOverSendErrorConfig extends ResourceConfig { + + public JerseySetStatusOverSendErrorConfig() { + register(Endpoint.class); + setProperties(Collections.singletonMap("jersey.config.server.response.setStatusOverSendError", true)); + } + +} + +``` + +### 5.2. Use Jersey Alongside Another Web Framework + +To use Jersey alongside another web framework, such as Spring MVC, it should be configured so that it will allow the other framework to handle requests that it cannot handle. +First, configure Jersey to use a filter rather than a servlet by configuring the `spring.jersey.type` application property with a value of `filter`. +Second, configure your `ResourceConfig` to forward requests that would have resulted in a 404, as shown in the following example. + +``` +import org.glassfish.jersey.server.ResourceConfig; +import org.glassfish.jersey.servlet.ServletProperties; + +import org.springframework.stereotype.Component; + +@Component +public class JerseyConfig extends ResourceConfig { + + public JerseyConfig() { + register(Endpoint.class); + property(ServletProperties.FILTER_FORWARD_ON_404, true); + } + +} + +``` + +## 6. HTTP Clients + +Spring Boot offers a number of starters that work with HTTP clients. +This section answers questions related to using them. + +### 6.1. Configure RestTemplate to Use a Proxy + +As described in [io.html](io.html#io.rest-client.resttemplate.customization), you can use a `RestTemplateCustomizer` with `RestTemplateBuilder` to build a customized `RestTemplate`. +This is the recommended approach for creating a `RestTemplate` configured to use a proxy. + +The exact details of the proxy configuration depend on the underlying client request factory that is being used. + +### 6.2. Configure the TcpClient used by a Reactor Netty-based WebClient ### + +When Reactor Netty is on the classpath a Reactor Netty-based `WebClient` is auto-configured. +To customize the client’s handling of network connections, provide a `ClientHttpConnector` bean. +The following example configures a 60 second connect timeout and adds a `ReadTimeoutHandler`: + +``` +import io.netty.channel.ChannelOption; +import io.netty.handler.timeout.ReadTimeoutHandler; +import reactor.netty.http.client.HttpClient; + +import org.springframework.context.annotation.Bean; +import org.springframework.context.annotation.Configuration; +import org.springframework.http.client.reactive.ClientHttpConnector; +import org.springframework.http.client.reactive.ReactorClientHttpConnector; +import org.springframework.http.client.reactive.ReactorResourceFactory; + +@Configuration(proxyBeanMethods = false) +public class MyReactorNettyClientConfiguration { + + @Bean + ClientHttpConnector clientHttpConnector(ReactorResourceFactory resourceFactory) { + HttpClient httpClient = HttpClient.create(resourceFactory.getConnectionProvider()) + .runOn(resourceFactory.getLoopResources()) + .option(ChannelOption.CONNECT_TIMEOUT_MILLIS, 60000) + .doOnConnected((connection) -> connection.addHandlerLast(new ReadTimeoutHandler(60))); + return new ReactorClientHttpConnector(httpClient); + } + +} + +``` + +| |Note the use of `ReactorResourceFactory` for the connection provider and event loop resources.
This ensures efficient sharing of resources for the server receiving requests and the client making requests.| +|---|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +## 7. Logging + +Spring Boot has no mandatory logging dependency, except for the Commons Logging API, which is typically provided by Spring Framework’s `spring-jcl` module. +To use [Logback](https://logback.qos.ch), you need to include it and `spring-jcl` on the classpath. +The recommended way to do that is through the starters, which all depend on `spring-boot-starter-logging`. +For a web application, you need only `spring-boot-starter-web`, since it depends transitively on the logging starter. +If you use Maven, the following dependency adds logging for you: + +``` + + org.springframework.boot + spring-boot-starter-web + +``` + +Spring Boot has a `LoggingSystem` abstraction that attempts to configure logging based on the content of the classpath. +If Logback is available, it is the first choice. + +If the only change you need to make to logging is to set the levels of various loggers, you can do so in `application.properties` by using the "logging.level" prefix, as shown in the following example: + +Properties + +``` +logging.level.org.springframework.web=debug +logging.level.org.hibernate=error +``` + +Yaml + +``` +logging: + level: + org.springframework.web: "debug" + org.hibernate: "error" +``` + +You can also set the location of a file to which to write the log (in addition to the console) by using `logging.file.name`. + +To configure the more fine-grained settings of a logging system, you need to use the native configuration format supported by the `LoggingSystem` in question. +By default, Spring Boot picks up the native configuration from its default location for the system (such as `classpath:logback.xml` for Logback), but you can set the location of the config file by using the `logging.config` property. + +### 7.1. Configure Logback for Logging + +If you need to apply customizations to logback beyond those that can be achieved with `application.properties`, you will need to add a standard logback configuration file. +You can add a `logback.xml` file to the root of your classpath for logback to find. +You can also use `logback-spring.xml` if you want to use the [Spring Boot Logback extensions](features.html#features.logging.logback-extensions). + +| |The Logback documentation has a [dedicated section that covers configuration](https://logback.qos.ch/manual/configuration.html) in some detail.| +|---|-----------------------------------------------------------------------------------------------------------------------------------------------| + +Spring Boot provides a number of logback configurations that be `included` from your own configuration. +These includes are designed to allow certain common Spring Boot conventions to be re-applied. + +The following files are provided under `org/springframework/boot/logging/logback/`: + +* `defaults.xml` - Provides conversion rules, pattern properties and common logger configurations. + +* `console-appender.xml` - Adds a `ConsoleAppender` using the `CONSOLE_LOG_PATTERN`. + +* `file-appender.xml` - Adds a `RollingFileAppender` using the `FILE_LOG_PATTERN` and `ROLLING_FILE_NAME_PATTERN` with appropriate settings. + +In addition, a legacy `base.xml` file is provided for compatibility with earlier versions of Spring Boot. + +A typical custom `logback.xml` file would look something like this: + +``` + + + + + + + + + +``` + +Your logback configuration file can also make use of System properties that the `LoggingSystem` takes care of creating for you: + +* `${PID}`: The current process ID. + +* `${LOG_FILE}`: Whether `logging.file.name` was set in Boot’s external configuration. + +* `${LOG_PATH}`: Whether `logging.file.path` (representing a directory for log files to live in) was set in Boot’s external configuration. + +* `${LOG_EXCEPTION_CONVERSION_WORD}`: Whether `logging.exception-conversion-word` was set in Boot’s external configuration. + +* `${ROLLING_FILE_NAME_PATTERN}`: Whether `logging.pattern.rolling-file-name` was set in Boot’s external configuration. + +Spring Boot also provides some nice ANSI color terminal output on a console (but not in a log file) by using a custom Logback converter. +See the `CONSOLE_LOG_PATTERN` in the `defaults.xml` configuration for an example. + +If Groovy is on the classpath, you should be able to configure Logback with `logback.groovy` as well. +If present, this setting is given preference. + +| |Spring extensions are not supported with Groovy configuration.
Any `logback-spring.groovy` files will not be detected.| +|---|--------------------------------------------------------------------------------------------------------------------------| + +#### 7.1.1. Configure Logback for File-only Output + +If you want to disable console logging and write output only to a file, you need a custom `logback-spring.xml` that imports `file-appender.xml` but not `console-appender.xml`, as shown in the following example: + +``` + + + + + + + + + +``` + +You also need to add `logging.file.name` to your `application.properties` or `application.yaml`, as shown in the following example: + +Properties + +``` +logging.file.name=myapplication.log +``` + +Yaml + +``` +logging: + file: + name: "myapplication.log" +``` + +### 7.2. Configure Log4j for Logging + +Spring Boot supports [Log4j 2](https://logging.apache.org/log4j/2.x/) for logging configuration if it is on the classpath. +If you use the starters for assembling dependencies, you have to exclude Logback and then include log4j 2 instead. +If you do not use the starters, you need to provide (at least) `spring-jcl` in addition to Log4j 2. + +The recommended path is through the starters, even though it requires some jiggling. +The following example shows how to set up the starters in Maven: + +``` + + org.springframework.boot + spring-boot-starter-web + + + org.springframework.boot + spring-boot-starter + + + org.springframework.boot + spring-boot-starter-logging + + + + + org.springframework.boot + spring-boot-starter-log4j2 + +``` + +Gradle provides a few different ways to set up the starters. +One way is to use a [module replacement](https://docs.gradle.org/current/userguide/resolution_rules.html#sec:module_replacement). +To do so, declare a dependency on the Log4j 2 starter and tell Gradle that any occurrences of the default logging starter should be replaced by the Log4j 2 starter, as shown in the following example: + +``` +dependencies { + implementation "org.springframework.boot:spring-boot-starter-log4j2" + modules { + module("org.springframework.boot:spring-boot-starter-logging") { + replacedBy("org.springframework.boot:spring-boot-starter-log4j2", "Use Log4j2 instead of Logback") + } + } +} +``` + +| |The Log4j starters gather together the dependencies for common logging requirements (such as having Tomcat use `java.util.logging` but configuring the output using Log4j 2).| +|---|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +| |To ensure that debug logging performed using `java.util.logging` is routed into Log4j 2, configure its [JDK logging adapter](https://logging.apache.org/log4j/2.x/log4j-jul/index.html) by setting the `java.util.logging.manager` system property to `org.apache.logging.log4j.jul.LogManager`.| +|---|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +#### 7.2.1. Use YAML or JSON to Configure Log4j 2 + +In addition to its default XML configuration format, Log4j 2 also supports YAML and JSON configuration files. +To configure Log4j 2 to use an alternative configuration file format, add the appropriate dependencies to the classpath and name your configuration files to match your chosen file format, as shown in the following example: + +|Format| Dependencies | File names | +|------|----------------------------------------------------------------------------------------------------------|----------------------------| +| YAML |`com.fasterxml.jackson.core:jackson-databind` + `com.fasterxml.jackson.dataformat:jackson-dataformat-yaml`|`log4j2.yaml` + `log4j2.yml`| +| JSON | `com.fasterxml.jackson.core:jackson-databind` |`log4j2.json` + `log4j2.jsn`| + +#### 7.2.2. Use Composite Configuration to Configure Log4j 2 + +Log4j 2 has support for combining multiple configuration files into a single composite configuration. +To use this support in Spring Boot, configure `logging.log4j2.config.override` with the locations of one or more secondary configuration files. +The secondary configuration files will be merged with the primary configuration, whether the primary’s source is Spring Boot’s defaults, a standard location such as `log4j.xml`, or the location configured by the `logging.config` property. + +## 8. Data Access + +Spring Boot includes a number of starters for working with data sources. +This section answers questions related to doing so. + +### 8.1. Configure a Custom DataSource + +To configure your own `DataSource`, define a `@Bean` of that type in your configuration. +Spring Boot reuses your `DataSource` anywhere one is required, including database initialization. +If you need to externalize some settings, you can bind your `DataSource` to the environment (see “[features.html](features.html#features.external-config.typesafe-configuration-properties.third-party-configuration)”). + +The following example shows how to define a data source in a bean: + +``` +import org.springframework.boot.context.properties.ConfigurationProperties; +import org.springframework.context.annotation.Bean; +import org.springframework.context.annotation.Configuration; + +@Configuration(proxyBeanMethods = false) +public class MyDataSourceConfiguration { + + @Bean + @ConfigurationProperties(prefix = "app.datasource") + public SomeDataSource dataSource() { + return new SomeDataSource(); + } + +} + +``` + +The following example shows how to define a data source by setting properties: + +Properties + +``` +app.datasource.url=jdbc:h2:mem:mydb +app.datasource.username=sa +app.datasource.pool-size=30 +``` + +Yaml + +``` +app: + datasource: + url: "jdbc:h2:mem:mydb" + username: "sa" + pool-size: 30 +``` + +Assuming that `SomeDataSource` has regular JavaBean properties for the URL, the username, and the pool size, these settings are bound automatically before the `DataSource` is made available to other components. + +Spring Boot also provides a utility builder class, called `DataSourceBuilder`, that can be used to create one of the standard data sources (if it is on the classpath). +The builder can detect the one to use based on what is available on the classpath. +It also auto-detects the driver based on the JDBC URL. + +The following example shows how to create a data source by using a `DataSourceBuilder`: + +``` +import javax.sql.DataSource; + +import org.springframework.boot.context.properties.ConfigurationProperties; +import org.springframework.boot.jdbc.DataSourceBuilder; +import org.springframework.context.annotation.Bean; +import org.springframework.context.annotation.Configuration; + +@Configuration(proxyBeanMethods = false) +public class MyDataSourceConfiguration { + + @Bean + @ConfigurationProperties("app.datasource") + public DataSource dataSource() { + return DataSourceBuilder.create().build(); + } + +} + +``` + +To run an app with that `DataSource`, all you need is the connection information. +Pool-specific settings can also be provided. +Check the implementation that is going to be used at runtime for more details. + +The following example shows how to define a JDBC data source by setting properties: + +Properties + +``` +app.datasource.url=jdbc:mysql://localhost/test +app.datasource.username=dbuser +app.datasource.password=dbpass +app.datasource.pool-size=30 +``` + +Yaml + +``` +app: + datasource: + url: "jdbc:mysql://localhost/test" + username: "dbuser" + password: "dbpass" + pool-size: 30 +``` + +However, there is a catch. +Because the actual type of the connection pool is not exposed, no keys are generated in the metadata for your custom `DataSource` and no completion is available in your IDE (because the `DataSource` interface exposes no properties). +Also, if you happen to have Hikari on the classpath, this basic setup does not work, because Hikari has no `url` property (but does have a `jdbcUrl` property). +In that case, you must rewrite your configuration as follows: + +Properties + +``` +app.datasource.jdbc-url=jdbc:mysql://localhost/test +app.datasource.username=dbuser +app.datasource.password=dbpass +app.datasource.pool-size=30 +``` + +Yaml + +``` +app: + datasource: + jdbc-url: "jdbc:mysql://localhost/test" + username: "dbuser" + password: "dbpass" + pool-size: 30 +``` + +You can fix that by forcing the connection pool to use and return a dedicated implementation rather than `DataSource`. +You cannot change the implementation at runtime, but the list of options will be explicit. + +The following example shows how create a `HikariDataSource` with `DataSourceBuilder`: + +``` +import com.zaxxer.hikari.HikariDataSource; + +import org.springframework.boot.context.properties.ConfigurationProperties; +import org.springframework.boot.jdbc.DataSourceBuilder; +import org.springframework.context.annotation.Bean; +import org.springframework.context.annotation.Configuration; + +@Configuration(proxyBeanMethods = false) +public class MyDataSourceConfiguration { + + @Bean + @ConfigurationProperties("app.datasource") + public HikariDataSource dataSource() { + return DataSourceBuilder.create().type(HikariDataSource.class).build(); + } + +} + +``` + +You can even go further by leveraging what `DataSourceProperties` does for you — that is, by providing a default embedded database with a sensible username and password if no URL is provided. +You can easily initialize a `DataSourceBuilder` from the state of any `DataSourceProperties` object, so you could also inject the DataSource that Spring Boot creates automatically. +However, that would split your configuration into two namespaces: `url`, `username`, `password`, `type`, and `driver` on `spring.datasource` and the rest on your custom namespace (`app.datasource`). +To avoid that, you can redefine a custom `DataSourceProperties` on your custom namespace, as shown in the following example: + +``` +import com.zaxxer.hikari.HikariDataSource; + +import org.springframework.boot.autoconfigure.jdbc.DataSourceProperties; +import org.springframework.boot.context.properties.ConfigurationProperties; +import org.springframework.context.annotation.Bean; +import org.springframework.context.annotation.Configuration; +import org.springframework.context.annotation.Primary; + +@Configuration(proxyBeanMethods = false) +public class MyDataSourceConfiguration { + + @Bean + @Primary + @ConfigurationProperties("app.datasource") + public DataSourceProperties dataSourceProperties() { + return new DataSourceProperties(); + } + + @Bean + @ConfigurationProperties("app.datasource.configuration") + public HikariDataSource dataSource(DataSourceProperties properties) { + return properties.initializeDataSourceBuilder().type(HikariDataSource.class).build(); + } + +} + +``` + +This setup puts you *in sync* with what Spring Boot does for you by default, except that a dedicated connection pool is chosen (in code) and its settings are exposed in the `app.datasource.configuration` sub namespace. +Because `DataSourceProperties` is taking care of the `url`/`jdbcUrl` translation for you, you can configure it as follows: + +Properties + +``` +app.datasource.url=jdbc:mysql://localhost/test +app.datasource.username=dbuser +app.datasource.password=dbpass +app.datasource.configuration.maximum-pool-size=30 +``` + +Yaml + +``` +app: + datasource: + url: "jdbc:mysql://localhost/test" + username: "dbuser" + password: "dbpass" + configuration: + maximum-pool-size: 30 +``` + +| |Spring Boot will expose Hikari-specific settings to `spring.datasource.hikari`.
This example uses a more generic `configuration` sub namespace as the example does not support multiple datasource implementations.| +|---|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +| |Because your custom configuration chooses to go with Hikari, `app.datasource.type` has no effect.
In practice, the builder is initialized with whatever value you might set there and then overridden by the call to `.type()`.| +|---|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +See “[data.html](data.html#data.sql.datasource)” in the “Spring Boot features” section and the [`DataSourceAutoConfiguration`](https://github.com/spring-projects/spring-boot/tree/v2.6.4/spring-boot-project/spring-boot-autoconfigure/src/main/java/org/springframework/boot/autoconfigure/jdbc/DataSourceAutoConfiguration.java) class for more details. + +### 8.2. Configure Two DataSources + +If you need to configure multiple data sources, you can apply the same tricks that are described in the previous section. +You must, however, mark one of the `DataSource` instances as `@Primary`, because various auto-configurations down the road expect to be able to get one by type. + +If you create your own `DataSource`, the auto-configuration backs off. +In the following example, we provide the *exact* same feature set as the auto-configuration provides on the primary data source: + +``` +import com.zaxxer.hikari.HikariDataSource; +import org.apache.commons.dbcp2.BasicDataSource; + +import org.springframework.boot.autoconfigure.jdbc.DataSourceProperties; +import org.springframework.boot.context.properties.ConfigurationProperties; +import org.springframework.boot.jdbc.DataSourceBuilder; +import org.springframework.context.annotation.Bean; +import org.springframework.context.annotation.Configuration; +import org.springframework.context.annotation.Primary; + +@Configuration(proxyBeanMethods = false) +public class MyDataSourcesConfiguration { + + @Bean + @Primary + @ConfigurationProperties("app.datasource.first") + public DataSourceProperties firstDataSourceProperties() { + return new DataSourceProperties(); + } + + @Bean + @Primary + @ConfigurationProperties("app.datasource.first.configuration") + public HikariDataSource firstDataSource(DataSourceProperties firstDataSourceProperties) { + return firstDataSourceProperties.initializeDataSourceBuilder().type(HikariDataSource.class).build(); + } + + @Bean + @ConfigurationProperties("app.datasource.second") + public BasicDataSource secondDataSource() { + return DataSourceBuilder.create().type(BasicDataSource.class).build(); + } + +} + +``` + +| |`firstDataSourceProperties` has to be flagged as `@Primary` so that the database initializer feature uses your copy (if you use the initializer).| +|---|-------------------------------------------------------------------------------------------------------------------------------------------------| + +Both data sources are also bound for advanced customizations. +For instance, you could configure them as follows: + +Properties + +``` +app.datasource.first.url=jdbc:mysql://localhost/first +app.datasource.first.username=dbuser +app.datasource.first.password=dbpass +app.datasource.first.configuration.maximum-pool-size=30 + +app.datasource.second.url=jdbc:mysql://localhost/second +app.datasource.second.username=dbuser +app.datasource.second.password=dbpass +app.datasource.second.max-total=30 +``` + +Yaml + +``` +app: + datasource: + first: + url: "jdbc:mysql://localhost/first" + username: "dbuser" + password: "dbpass" + configuration: + maximum-pool-size: 30 + + second: + url: "jdbc:mysql://localhost/second" + username: "dbuser" + password: "dbpass" + max-total: 30 +``` + +You can apply the same concept to the secondary `DataSource` as well, as shown in the following example: + +``` +import com.zaxxer.hikari.HikariDataSource; +import org.apache.commons.dbcp2.BasicDataSource; + +import org.springframework.beans.factory.annotation.Qualifier; +import org.springframework.boot.autoconfigure.jdbc.DataSourceProperties; +import org.springframework.boot.context.properties.ConfigurationProperties; +import org.springframework.context.annotation.Bean; +import org.springframework.context.annotation.Configuration; +import org.springframework.context.annotation.Primary; + +@Configuration(proxyBeanMethods = false) +public class MyCompleteDataSourcesConfiguration { + + @Bean + @Primary + @ConfigurationProperties("app.datasource.first") + public DataSourceProperties firstDataSourceProperties() { + return new DataSourceProperties(); + } + + @Bean + @Primary + @ConfigurationProperties("app.datasource.first.configuration") + public HikariDataSource firstDataSource(DataSourceProperties firstDataSourceProperties) { + return firstDataSourceProperties.initializeDataSourceBuilder().type(HikariDataSource.class).build(); + } + + @Bean + @ConfigurationProperties("app.datasource.second") + public DataSourceProperties secondDataSourceProperties() { + return new DataSourceProperties(); + } + + @Bean + @ConfigurationProperties("app.datasource.second.configuration") + public BasicDataSource secondDataSource( + @Qualifier("secondDataSourceProperties") DataSourceProperties secondDataSourceProperties) { + return secondDataSourceProperties.initializeDataSourceBuilder().type(BasicDataSource.class).build(); + } + +} + +``` + +The preceding example configures two data sources on custom namespaces with the same logic as Spring Boot would use in auto-configuration. +Note that each `configuration` sub namespace provides advanced settings based on the chosen implementation. + +### 8.3. Use Spring Data Repositories + +Spring Data can create implementations of `@Repository` interfaces of various flavors. +Spring Boot handles all of that for you, as long as those `@Repositories` are included in the same package (or a sub-package) of your `@EnableAutoConfiguration` class. + +For many applications, all you need is to put the right Spring Data dependencies on your classpath. +There is a `spring-boot-starter-data-jpa` for JPA, `spring-boot-starter-data-mongodb` for Mongodb, and various other starters for supported technologies. +To get started, create some repository interfaces to handle your `@Entity` objects. + +Spring Boot tries to guess the location of your `@Repository` definitions, based on the `@EnableAutoConfiguration` it finds. +To get more control, use the `@EnableJpaRepositories` annotation (from Spring Data JPA). + +For more about Spring Data, see the [Spring Data project page](https://spring.io/projects/spring-data). + +### 8.4. Separate @Entity Definitions from Spring Configuration ### + +Spring Boot tries to guess the location of your `@Entity` definitions, based on the `@EnableAutoConfiguration` it finds. +To get more control, you can use the `@EntityScan` annotation, as shown in the following example: + +``` +import org.springframework.boot.autoconfigure.EnableAutoConfiguration; +import org.springframework.boot.autoconfigure.domain.EntityScan; +import org.springframework.context.annotation.Configuration; + +@Configuration(proxyBeanMethods = false) +@EnableAutoConfiguration +@EntityScan(basePackageClasses = City.class) +public class MyApplication { + + // ... + +} + +``` + +### 8.5. Configure JPA Properties + +Spring Data JPA already provides some vendor-independent configuration options (such as those for SQL logging), and Spring Boot exposes those options and a few more for Hibernate as external configuration properties. +Some of them are automatically detected according to the context so you should not have to set them. + +The `spring.jpa.hibernate.ddl-auto` is a special case, because, depending on runtime conditions, it has different defaults. +If an embedded database is used and no schema manager (such as Liquibase or Flyway) is handling the `DataSource`, it defaults to `create-drop`. +In all other cases, it defaults to `none`. + +The dialect to use is detected by the JPA provider. +If you prefer to set the dialect yourself, set the `spring.jpa.database-platform` property. + +The most common options to set are shown in the following example: + +Properties + +``` +spring.jpa.hibernate.naming.physical-strategy=com.example.MyPhysicalNamingStrategy +spring.jpa.show-sql=true +``` + +Yaml + +``` +spring: + jpa: + hibernate: + naming: + physical-strategy: "com.example.MyPhysicalNamingStrategy" + show-sql: true +``` + +In addition, all properties in `spring.jpa.properties.*` are passed through as normal JPA properties (with the prefix stripped) when the local `EntityManagerFactory` is created. + +| |You need to ensure that names defined under `spring.jpa.properties.*` exactly match those expected by your JPA provider.
Spring Boot will not attempt any kind of relaxed binding for these entries.

For example, if you want to configure Hibernate’s batch size you must use `spring.jpa.properties.hibernate.jdbc.batch_size`.
If you use other forms, such as `batchSize` or `batch-size`, Hibernate will not apply the setting.| +|---|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +| |If you need to apply advanced customization to Hibernate properties, consider registering a `HibernatePropertiesCustomizer` bean that will be invoked prior to creating the `EntityManagerFactory`.
This takes precedence to anything that is applied by the auto-configuration.| +|---|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +### 8.6. Configure Hibernate Naming Strategy + +Hibernate uses [two different naming strategies](https://docs.jboss.org/hibernate/orm/5.4/userguide/html_single/Hibernate_User_Guide.html#naming) to map names from the object model to the corresponding database names. +The fully qualified class name of the physical and the implicit strategy implementations can be configured by setting the `spring.jpa.hibernate.naming.physical-strategy` and `spring.jpa.hibernate.naming.implicit-strategy` properties, respectively. +Alternatively, if `ImplicitNamingStrategy` or `PhysicalNamingStrategy` beans are available in the application context, Hibernate will be automatically configured to use them. + +By default, Spring Boot configures the physical naming strategy with `CamelCaseToUnderscoresNamingStrategy`. +Using this strategy, all dots are replaced by underscores and camel casing is replaced by underscores as well. +Additionally, by default, all table names are generated in lower case. +For example, a `TelephoneNumber` entity is mapped to the `telephone_number` table. +If your schema requires mixed-case identifiers, define a custom `CamelCaseToUnderscoresNamingStrategy` bean, as shown in the following example: + +``` +import org.hibernate.boot.model.naming.CamelCaseToUnderscoresNamingStrategy; +import org.hibernate.engine.jdbc.env.spi.JdbcEnvironment; + +import org.springframework.context.annotation.Bean; +import org.springframework.context.annotation.Configuration; + +@Configuration(proxyBeanMethods = false) +public class MyHibernateConfiguration { + + @Bean + public CamelCaseToUnderscoresNamingStrategy caseSensitivePhysicalNamingStrategy() { + return new CamelCaseToUnderscoresNamingStrategy() { + + @Override + protected boolean isCaseInsensitive(JdbcEnvironment jdbcEnvironment) { + return false; + } + + }; + } + +} + +``` + +If you prefer to use Hibernate 5’s default instead, set the following property: + +``` +spring.jpa.hibernate.naming.physical-strategy=org.hibernate.boot.model.naming.PhysicalNamingStrategyStandardImpl +``` + +Alternatively, you can configure the following bean: + +``` +import org.hibernate.boot.model.naming.PhysicalNamingStrategyStandardImpl; + +import org.springframework.context.annotation.Bean; +import org.springframework.context.annotation.Configuration; + +@Configuration(proxyBeanMethods = false) +class MyHibernateConfiguration { + + @Bean + PhysicalNamingStrategyStandardImpl caseSensitivePhysicalNamingStrategy() { + return new PhysicalNamingStrategyStandardImpl(); + } + +} + +``` + +See [`HibernateJpaAutoConfiguration`](https://github.com/spring-projects/spring-boot/tree/v2.6.4/spring-boot-project/spring-boot-autoconfigure/src/main/java/org/springframework/boot/autoconfigure/orm/jpa/HibernateJpaAutoConfiguration.java) and [`JpaBaseConfiguration`](https://github.com/spring-projects/spring-boot/tree/v2.6.4/spring-boot-project/spring-boot-autoconfigure/src/main/java/org/springframework/boot/autoconfigure/orm/jpa/JpaBaseConfiguration.java) for more details. + +### 8.7. Configure Hibernate Second-Level Caching + +Hibernate [second-level cache](https://docs.jboss.org/hibernate/orm/5.4/userguide/html_single/Hibernate_User_Guide.html#caching) can be configured for a range of cache providers. +Rather than configuring Hibernate to lookup the cache provider again, it is better to provide the one that is available in the context whenever possible. + +To do this with JCache, first make sure that `org.hibernate:hibernate-jcache` is available on the classpath. +Then, add a `HibernatePropertiesCustomizer` bean as shown in the following example: + +``` +import org.hibernate.cache.jcache.ConfigSettings; + +import org.springframework.boot.autoconfigure.orm.jpa.HibernatePropertiesCustomizer; +import org.springframework.cache.jcache.JCacheCacheManager; +import org.springframework.context.annotation.Bean; +import org.springframework.context.annotation.Configuration; + +@Configuration(proxyBeanMethods = false) +public class MyHibernateSecondLevelCacheConfiguration { + + @Bean + public HibernatePropertiesCustomizer hibernateSecondLevelCacheCustomizer(JCacheCacheManager cacheManager) { + return (properties) -> properties.put(ConfigSettings.CACHE_MANAGER, cacheManager.getCacheManager()); + } + +} + +``` + +This customizer will configure Hibernate to use the same `CacheManager` as the one that the application uses. +It is also possible to use separate `CacheManager` instances. +For details, see [the Hibernate user guide](https://docs.jboss.org/hibernate/orm/5.4/userguide/html_single/Hibernate_User_Guide.html#caching-provider-jcache). + +### 8.8. Use Dependency Injection in Hibernate Components ### + +By default, Spring Boot registers a `BeanContainer` implementation that uses the `BeanFactory` so that converters and entity listeners can use regular dependency injection. + +You can disable or tune this behavior by registering a `HibernatePropertiesCustomizer` that removes or changes the `hibernate.resource.beans.container` property. + +### 8.9. Use a Custom EntityManagerFactory + +To take full control of the configuration of the `EntityManagerFactory`, you need to add a `@Bean` named ‘entityManagerFactory’. +Spring Boot auto-configuration switches off its entity manager in the presence of a bean of that type. + +### 8.10. Using Multiple EntityManagerFactories + +If you need to use JPA against multiple data sources, you likely need one `EntityManagerFactory` per data source. +The `LocalContainerEntityManagerFactoryBean` from Spring ORM allows you to configure an `EntityManagerFactory` for your needs. +You can also reuse `JpaProperties` to bind settings for each `EntityManagerFactory`, as shown in the following example: + +``` +import javax.sql.DataSource; + +import org.springframework.boot.autoconfigure.orm.jpa.JpaProperties; +import org.springframework.boot.context.properties.ConfigurationProperties; +import org.springframework.boot.orm.jpa.EntityManagerFactoryBuilder; +import org.springframework.context.annotation.Bean; +import org.springframework.context.annotation.Configuration; +import org.springframework.orm.jpa.JpaVendorAdapter; +import org.springframework.orm.jpa.LocalContainerEntityManagerFactoryBean; +import org.springframework.orm.jpa.vendor.HibernateJpaVendorAdapter; + +@Configuration(proxyBeanMethods = false) +public class MyEntityManagerFactoryConfiguration { + + @Bean + @ConfigurationProperties("app.jpa.first") + public JpaProperties firstJpaProperties() { + return new JpaProperties(); + } + + @Bean + public LocalContainerEntityManagerFactoryBean firstEntityManagerFactory(DataSource firstDataSource, + JpaProperties firstJpaProperties) { + EntityManagerFactoryBuilder builder = createEntityManagerFactoryBuilder(firstJpaProperties); + return builder.dataSource(firstDataSource).packages(Order.class).persistenceUnit("firstDs").build(); + } + + private EntityManagerFactoryBuilder createEntityManagerFactoryBuilder(JpaProperties jpaProperties) { + JpaVendorAdapter jpaVendorAdapter = createJpaVendorAdapter(jpaProperties); + return new EntityManagerFactoryBuilder(jpaVendorAdapter, jpaProperties.getProperties(), null); + } + + private JpaVendorAdapter createJpaVendorAdapter(JpaProperties jpaProperties) { + // ... map JPA properties as needed + return new HibernateJpaVendorAdapter(); + } + +} + +``` + +The example above creates an `EntityManagerFactory` using a `DataSource` bean named `firstDataSource`. +It scans entities located in the same package as `Order`. +It is possible to map additional JPA properties using the `app.first.jpa` namespace. + +| |When you create a bean for `LocalContainerEntityManagerFactoryBean` yourself, any customization that was applied during the creation of the auto-configured `LocalContainerEntityManagerFactoryBean` is lost.
For example, in case of Hibernate, any properties under the `spring.jpa.hibernate` prefix will not be automatically applied to your `LocalContainerEntityManagerFactoryBean`.
If you were relying on these properties for configuring things like the naming strategy or the DDL mode, you will need to explicitly configure that when creating the `LocalContainerEntityManagerFactoryBean` bean.| +|---|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +You should provide a similar configuration for any additional data sources for which you need JPA access. +To complete the picture, you need to configure a `JpaTransactionManager` for each `EntityManagerFactory` as well. +Alternatively, you might be able to use a JTA transaction manager that spans both. + +If you use Spring Data, you need to configure `@EnableJpaRepositories` accordingly, as shown in the following examples: + +``` +import org.springframework.context.annotation.Configuration; +import org.springframework.data.jpa.repository.config.EnableJpaRepositories; + +@Configuration(proxyBeanMethods = false) +@EnableJpaRepositories(basePackageClasses = Order.class, entityManagerFactoryRef = "firstEntityManagerFactory") +public class OrderConfiguration { + +} + +``` + +``` +import org.springframework.context.annotation.Configuration; +import org.springframework.data.jpa.repository.config.EnableJpaRepositories; + +@Configuration(proxyBeanMethods = false) +@EnableJpaRepositories(basePackageClasses = Customer.class, entityManagerFactoryRef = "secondEntityManagerFactory") +public class CustomerConfiguration { + +} + +``` + +### 8.11. Use a Traditional persistence.xml File + +Spring Boot will not search for or use a `META-INF/persistence.xml` by default. +If you prefer to use a traditional `persistence.xml`, you need to define your own `@Bean` of type `LocalEntityManagerFactoryBean` (with an ID of ‘entityManagerFactory’) and set the persistence unit name there. + +See [`JpaBaseConfiguration`](https://github.com/spring-projects/spring-boot/tree/v2.6.4/spring-boot-project/spring-boot-autoconfigure/src/main/java/org/springframework/boot/autoconfigure/orm/jpa/JpaBaseConfiguration.java) for the default settings. + +### 8.12. Use Spring Data JPA and Mongo Repositories ### + +Spring Data JPA and Spring Data Mongo can both automatically create `Repository` implementations for you. +If they are both present on the classpath, you might have to do some extra configuration to tell Spring Boot which repositories to create. +The most explicit way to do that is to use the standard Spring Data `@EnableJpaRepositories` and `@EnableMongoRepositories` annotations and provide the location of your `Repository` interfaces. + +There are also flags (`spring.data.*.repositories.enabled` and `spring.data.*.repositories.type`) that you can use to switch the auto-configured repositories on and off in external configuration. +Doing so is useful, for instance, in case you want to switch off the Mongo repositories and still use the auto-configured `MongoTemplate`. + +The same obstacle and the same features exist for other auto-configured Spring Data repository types (Elasticsearch, Solr, and others). +To work with them, change the names of the annotations and flags accordingly. + +### 8.13. Customize Spring Data’s Web Support + +Spring Data provides web support that simplifies the use of Spring Data repositories in a web application. +Spring Boot provides properties in the `spring.data.web` namespace for customizing its configuration. +Note that if you are using Spring Data REST, you must use the properties in the `spring.data.rest` namespace instead. + +### 8.14. Expose Spring Data Repositories as REST Endpoint ### + +Spring Data REST can expose the `Repository` implementations as REST endpoints for you, +provided Spring MVC has been enabled for the application. + +Spring Boot exposes a set of useful properties (from the `spring.data.rest` namespace) that customize the [`RepositoryRestConfiguration`](https://docs.spring.io/spring-data/rest/docs/3.6.2/api/org/springframework/data/rest/core/config/RepositoryRestConfiguration.html). +If you need to provide additional customization, you should use a [`RepositoryRestConfigurer`](https://docs.spring.io/spring-data/rest/docs/3.6.2/api/org/springframework/data/rest/webmvc/config/RepositoryRestConfigurer.html) bean. + +| |If you do not specify any order on your custom `RepositoryRestConfigurer`, it runs after the one Spring Boot uses internally.
If you need to specify an order, make sure it is higher than 0.| +|---|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +### 8.15. Configure a Component that is Used by JPA ### + +If you want to configure a component that JPA uses, then you need to ensure that the component is initialized before JPA. +When the component is auto-configured, Spring Boot takes care of this for you. +For example, when Flyway is auto-configured, Hibernate is configured to depend upon Flyway so that Flyway has a chance to initialize the database before Hibernate tries to use it. + +If you are configuring a component yourself, you can use an `EntityManagerFactoryDependsOnPostProcessor` subclass as a convenient way of setting up the necessary dependencies. +For example, if you use Hibernate Search with Elasticsearch as its index manager, any `EntityManagerFactory` beans must be configured to depend on the `elasticsearchClient` bean, as shown in the following example: + +``` +import javax.persistence.EntityManagerFactory; + +import org.springframework.boot.autoconfigure.orm.jpa.EntityManagerFactoryDependsOnPostProcessor; +import org.springframework.stereotype.Component; + +/** + * {@link EntityManagerFactoryDependsOnPostProcessor} that ensures that + * {@link EntityManagerFactory} beans depend on the {@code elasticsearchClient} bean. + */ +@Component +public class ElasticsearchEntityManagerFactoryDependsOnPostProcessor + extends EntityManagerFactoryDependsOnPostProcessor { + + public ElasticsearchEntityManagerFactoryDependsOnPostProcessor() { + super("elasticsearchClient"); + } + +} + +``` + +### 8.16. Configure jOOQ with Two DataSources + +If you need to use jOOQ with multiple data sources, you should create your own `DSLContext` for each one. +See [JooqAutoConfiguration](https://github.com/spring-projects/spring-boot/tree/v2.6.4/spring-boot-project/spring-boot-autoconfigure/src/main/java/org/springframework/boot/autoconfigure/jooq/JooqAutoConfiguration.java) for more details. + +| |In particular, `JooqExceptionTranslator` and `SpringTransactionProvider` can be reused to provide similar features to what the auto-configuration does with a single `DataSource`.| +|---|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +## 9. Database Initialization + +An SQL database can be initialized in different ways depending on what your stack is. +Of course, you can also do it manually, provided the database is a separate process. +It is recommended to use a single mechanism for schema generation. + +### 9.1. Initialize a Database Using JPA + +JPA has features for DDL generation, and these can be set up to run on startup against the database. +This is controlled through two external properties: + +* `spring.jpa.generate-ddl` (boolean) switches the feature on and off and is vendor independent. + +* `spring.jpa.hibernate.ddl-auto` (enum) is a Hibernate feature that controls the behavior in a more fine-grained way. + This feature is described in more detail later in this guide. + +### 9.2. Initialize a Database Using Hibernate + +You can set `spring.jpa.hibernate.ddl-auto` explicitly and the standard Hibernate property values are `none`, `validate`, `update`, `create`, and `create-drop`. +Spring Boot chooses a default value for you based on whether it thinks your database is embedded. +It defaults to `create-drop` if no schema manager has been detected or `none` in all other cases. +An embedded database is detected by looking at the `Connection` type and JDBC url.`hsqldb`, `h2`, and `derby` are candidates, and others are not. +Be careful when switching from in-memory to a ‘real’ database that you do not make assumptions about the existence of the tables and data in the new platform. +You either have to set `ddl-auto` explicitly or use one of the other mechanisms to initialize the database. + +| |You can output the schema creation by enabling the `org.hibernate.SQL` logger.
This is done for you automatically if you enable the [debug mode](features.html#features.logging.console-output).| +|---|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +In addition, a file named `import.sql` in the root of the classpath is executed on startup if Hibernate creates the schema from scratch (that is, if the `ddl-auto` property is set to `create` or `create-drop`). +This can be useful for demos and for testing if you are careful but is probably not something you want to be on the classpath in production. +It is a Hibernate feature (and has nothing to do with Spring). + +### 9.3. Initialize a Database Using Basic SQL Scripts + +Spring Boot can automatically create the schema (DDL scripts) of your JDBC `DataSource` or R2DBC `ConnectionFactory` and initialize it (DML scripts). +It loads SQL from the standard root classpath locations: `schema.sql` and `data.sql`, respectively. +In addition, Spring Boot processes the `schema-${platform}.sql` and `data-${platform}.sql` files (if present), where `platform` is the value of `spring.sql.init.platform`. +This allows you to switch to database-specific scripts if necessary. +For example, you might choose to set it to the vendor name of the database (`hsqldb`, `h2`, `oracle`, `mysql`, `postgresql`, and so on). +By default, SQL database initialization is only performed when using an embedded in-memory database. +To always initialize an SQL database, irrespective of its type, set `spring.sql.init.mode` to `always`. +Similarly, to disable initialization, set `spring.sql.init.mode` to `never`. +By default, Spring Boot enables the fail-fast feature of its script-based database initializer. +This means that, if the scripts cause exceptions, the application fails to start. +You can tune that behavior by setting `spring.sql.init.continue-on-error`. + +Script-based `DataSource` initialization is performed, by default, before any JPA `EntityManagerFactory` beans are created.`schema.sql` can be used to create the schema for JPA-managed entities and `data.sql` can be used to populate it. +While we do not recommend using multiple data source initialization technologies, if you want script-based `DataSource` initialization to be able to build upon the schema creation performed by Hibernate, set `spring.jpa.defer-datasource-initialization` to `true`. +This will defer data source initialization until after any `EntityManagerFactory` beans have been created and initialized.`schema.sql` can then be used to make additions to any schema creation performed by Hibernate and `data.sql` can be used to populate it. + +If you are using a [Higher-level Database Migration Tool](#howto.data-initialization.migration-tool), like Flyway or Liquibase, you should use them alone to create and initialize the schema. +Using the basic `schema.sql` and `data.sql` scripts alongside Flyway or Liquibase is not recommended and support will be removed in a future release. + +### 9.4. Initialize a Spring Batch Database + +If you use Spring Batch, it comes pre-packaged with SQL initialization scripts for most popular database platforms. +Spring Boot can detect your database type and execute those scripts on startup. +If you use an embedded database, this happens by default. +You can also enable it for any database type, as shown in the following example: + +Properties + +``` +spring.batch.jdbc.initialize-schema=always +``` + +Yaml + +``` +spring: + batch: + jdbc: + initialize-schema: "always" +``` + +You can also switch off the initialization explicitly by setting `spring.batch.jdbc.initialize-schema` to `never`. + +### 9.5. Use a Higher-level Database Migration Tool + +Spring Boot supports two higher-level migration tools: [Flyway](https://flywaydb.org/) and [Liquibase](https://www.liquibase.org/). + +#### 9.5.1. Execute Flyway Database Migrations on Startup + +To automatically run Flyway database migrations on startup, add the `org.flywaydb:flyway-core` to your classpath. + +Typically, migrations are scripts in the form `V__.sql` (with `` an underscore-separated version, such as ‘1’ or ‘2\_1’). +By default, they are in a directory called `classpath:db/migration`, but you can modify that location by setting `spring.flyway.locations`. +This is a comma-separated list of one or more `classpath:` or `filesystem:` locations. +For example, the following configuration would search for scripts in both the default classpath location and the `/opt/migration` directory: + +Properties + +``` +spring.flyway.locations=classpath:db/migration,filesystem:/opt/migration +``` + +Yaml + +``` +spring: + flyway: + locations: "classpath:db/migration,filesystem:/opt/migration" +``` + +You can also add a special `{vendor}` placeholder to use vendor-specific scripts. +Assume the following: + +Properties + +``` +spring.flyway.locations=classpath:db/migration/{vendor} +``` + +Yaml + +``` +spring: + flyway: + locations: "classpath:db/migration/{vendor}" +``` + +Rather than using `db/migration`, the preceding configuration sets the directory to use according to the type of the database (such as `db/migration/mysql` for MySQL). +The list of supported databases is available in [`DatabaseDriver`](https://github.com/spring-projects/spring-boot/tree/v2.6.4/spring-boot-project/spring-boot/src/main/java/org/springframework/boot/jdbc/DatabaseDriver.java). + +Migrations can also be written in Java. +Flyway will be auto-configured with any beans that implement `JavaMigration`. + +[`FlywayProperties`](https://github.com/spring-projects/spring-boot/tree/v2.6.4/spring-boot-project/spring-boot-autoconfigure/src/main/java/org/springframework/boot/autoconfigure/flyway/FlywayProperties.java) provides most of Flyway’s settings and a small set of additional properties that can be used to disable the migrations or switch off the location checking. +If you need more control over the configuration, consider registering a `FlywayConfigurationCustomizer` bean. + +Spring Boot calls `Flyway.migrate()` to perform the database migration. +If you would like more control, provide a `@Bean` that implements [`FlywayMigrationStrategy`](https://github.com/spring-projects/spring-boot/tree/v2.6.4/spring-boot-project/spring-boot-autoconfigure/src/main/java/org/springframework/boot/autoconfigure/flyway/FlywayMigrationStrategy.java). + +Flyway supports SQL and Java [callbacks](https://flywaydb.org/documentation/concepts/callbacks). +To use SQL-based callbacks, place the callback scripts in the `classpath:db/migration` directory. +To use Java-based callbacks, create one or more beans that implement `Callback`. +Any such beans are automatically registered with `Flyway`. +They can be ordered by using `@Order` or by implementing `Ordered`. +Beans that implement the deprecated `FlywayCallback` interface can also be detected, however they cannot be used alongside `Callback` beans. + +By default, Flyway autowires the (`@Primary`) `DataSource` in your context and uses that for migrations. +If you like to use a different `DataSource`, you can create one and mark its `@Bean` as `@FlywayDataSource`. +If you do so and want two data sources, remember to create another one and mark it as `@Primary`. +Alternatively, you can use Flyway’s native `DataSource` by setting `spring.flyway.[url,user,password]` in external properties. +Setting either `spring.flyway.url` or `spring.flyway.user` is sufficient to cause Flyway to use its own `DataSource`. +If any of the three properties has not been set, the value of its equivalent `spring.datasource` property will be used. + +You can also use Flyway to provide data for specific scenarios. +For example, you can place test-specific migrations in `src/test/resources` and they are run only when your application starts for testing. +Also, you can use profile-specific configuration to customize `spring.flyway.locations` so that certain migrations run only when a particular profile is active. +For example, in `application-dev.properties`, you might specify the following setting: + +Properties + +``` +spring.flyway.locations=classpath:/db/migration,classpath:/dev/db/migration +``` + +Yaml + +``` +spring: + flyway: + locations: "classpath:/db/migration,classpath:/dev/db/migration" +``` + +With that setup, migrations in `dev/db/migration` run only when the `dev` profile is active. + +#### 9.5.2. Execute Liquibase Database Migrations on Startup #### + +To automatically run Liquibase database migrations on startup, add the `org.liquibase:liquibase-core` to your classpath. + +| |When you add the `org.liquibase:liquibase-core` to your classpath, database migrations run by default for both during application startup and before your tests run.
This behavior can be customized by using the `spring.liquibase.enabled` property, setting different values in the `main` and `test` configurations.
It is not possible to use two different ways to initialize the database (for example Liquibase for application startup, JPA for test runs).| +|---|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +By default, the master change log is read from `db/changelog/db.changelog-master.yaml`, but you can change the location by setting `spring.liquibase.change-log`. +In addition to YAML, Liquibase also supports JSON, XML, and SQL change log formats. + +By default, Liquibase autowires the (`@Primary`) `DataSource` in your context and uses that for migrations. +If you need to use a different `DataSource`, you can create one and mark its `@Bean` as `@LiquibaseDataSource`. +If you do so and you want two data sources, remember to create another one and mark it as `@Primary`. +Alternatively, you can use Liquibase’s native `DataSource` by setting `spring.liquibase.[driver-class-name,url,user,password]` in external properties. +Setting either `spring.liquibase.url` or `spring.liquibase.user` is sufficient to cause Liquibase to use its own `DataSource`. +If any of the three properties has not been set, the value of its equivalent `spring.datasource` property will be used. + +See [`LiquibaseProperties`](https://github.com/spring-projects/spring-boot/tree/v2.6.4/spring-boot-project/spring-boot-autoconfigure/src/main/java/org/springframework/boot/autoconfigure/liquibase/LiquibaseProperties.java) for details about available settings such as contexts, the default schema, and others. + +### 9.6. Depend Upon an Initialized Database + +Database initialization is performed while the application is starting up as part of application context refresh. +To allow an initialized database to be accessed during startup, beans that act as database initializers and beans that require that database to have been initialized are detected automatically. +Beans whose initialization depends upon the database having been initialized are configured to depend upon those that initialize it. +If, during startup, your application tries to access the database and it has not been initialized, you can configure additional detection of beans that initialize the database and require the database to have been initialized. + +#### 9.6.1. Detect a Database Initializer + +Spring Boot will automatically detect beans of the following types that initialize an SQL database: + +* `DataSourceScriptDatabaseInitializer` + +* `EntityManagerFactory` + +* `Flyway` + +* `FlywayMigrationInitializer` + +* `R2dbcScriptDatabaseInitializer` + +* `SpringLiquibase` + +If you are using a third-party starter for a database initialization library, it may provide a detector such that beans of other types are also detected automatically. +To have other beans be detected, register an implementation of `DatabaseInitializerDetector` in `META-INF/spring-factories`. + +#### 9.6.2. Detect a Bean That Depends On Database Initialization #### + +Spring Boot will automatically detect beans of the following types that depends upon database initialization: + +* `AbstractEntityManagerFactoryBean` (unless `spring.jpa.defer-datasource-initialization` is set to `true`) + +* `DSLContext` (jOOQ) + +* `EntityManagerFactory` (unless `spring.jpa.defer-datasource-initialization` is set to `true`) + +* `JdbcOperations` + +* `NamedParameterJdbcOperations` + +If you are using a third-party starter data access library, it may provide a detector such that beans of other types are also detected automatically. +To have other beans be detected, register an implementation of `DependsOnDatabaseInitializationDetector` in `META-INF/spring-factories`. +Alternatively, annotate the bean’s class or its `@Bean` method with `@DependsOnDatabaseInitialization`. + +## 10. Messaging + +Spring Boot offers a number of starters to support messaging. +This section answers questions that arise from using messaging with Spring Boot. + +### 10.1. Disable Transacted JMS Session + +If your JMS broker does not support transacted sessions, you have to disable the support of transactions altogether. +If you create your own `JmsListenerContainerFactory`, there is nothing to do, since, by default it cannot be transacted. +If you want to use the `DefaultJmsListenerContainerFactoryConfigurer` to reuse Spring Boot’s default, you can disable transacted sessions, as follows: + +``` +import javax.jms.ConnectionFactory; + +import org.springframework.boot.autoconfigure.jms.DefaultJmsListenerContainerFactoryConfigurer; +import org.springframework.context.annotation.Bean; +import org.springframework.context.annotation.Configuration; +import org.springframework.jms.config.DefaultJmsListenerContainerFactory; + +@Configuration(proxyBeanMethods = false) +public class MyJmsConfiguration { + + @Bean + public DefaultJmsListenerContainerFactory jmsListenerContainerFactory(ConnectionFactory connectionFactory, + DefaultJmsListenerContainerFactoryConfigurer configurer) { + DefaultJmsListenerContainerFactory listenerFactory = new DefaultJmsListenerContainerFactory(); + configurer.configure(listenerFactory, connectionFactory); + listenerFactory.setTransactionManager(null); + listenerFactory.setSessionTransacted(false); + return listenerFactory; + } + +} + +``` + +The preceding example overrides the default factory, and it should be applied to any other factory that your application defines, if any. + +## 11. Batch Applications + +A number of questions often arise when people use Spring Batch from within a Spring Boot application. +This section addresses those questions. + +### 11.1. Specifying a Batch Data Source + +By default, batch applications require a `DataSource` to store job details. +Spring Batch expects a single `DataSource` by default. +To have it use a `DataSource` other than the application’s main `DataSource`, declare a `DataSource` bean, annotating its `@Bean` method with `@BatchDataSource`. +If you do so and want two data sources, remember to mark the other one `@Primary`. +To take greater control, implement `BatchConfigurer`. +See [The Javadoc of `@EnableBatchProcessing`](https://docs.spring.io/spring-batch/docs/4.3.5/api/org/springframework/batch/core/configuration/annotation/EnableBatchProcessing.html) for more details. + +For more info about Spring Batch, see the [Spring Batch project page](https://spring.io/projects/spring-batch). + +### 11.2. Running Spring Batch Jobs on Startup + +Spring Batch auto-configuration is enabled by adding `@EnableBatchProcessing` to one of your `@Configuration` classes. + +By default, it executes **all** `Jobs` in the application context on startup (see [`JobLauncherApplicationRunner`](https://github.com/spring-projects/spring-boot/tree/v2.6.4/spring-boot-project/spring-boot-autoconfigure/src/main/java/org/springframework/boot/autoconfigure/batch/JobLauncherApplicationRunner.java) for details). +You can narrow down to a specific job or jobs by specifying `spring.batch.job.names` (which takes a comma-separated list of job name patterns). + +See [BatchAutoConfiguration](https://github.com/spring-projects/spring-boot/tree/v2.6.4/spring-boot-project/spring-boot-autoconfigure/src/main/java/org/springframework/boot/autoconfigure/batch/BatchAutoConfiguration.java) and [@EnableBatchProcessing](https://docs.spring.io/spring-batch/docs/4.3.5/api/org/springframework/batch/core/configuration/annotation/EnableBatchProcessing.html) for more details. + +### 11.3. Running from the Command Line + +Spring Boot converts any command line argument starting with `--` to a property to add to the `Environment`, see [accessing command line properties](features.html#features.external-config.command-line-args). +This should not be used to pass arguments to batch jobs. +To specify batch arguments on the command line, use the regular format (that is without `--`), as shown in the following example: + +``` +$ java -jar myapp.jar someParameter=someValue anotherParameter=anotherValue +``` + +If you specify a property of the `Environment` on the command line, it is ignored by the job. +Consider the following command: + +``` +$ java -jar myapp.jar --server.port=7070 someParameter=someValue +``` + +This provides only one argument to the batch job: `someParameter=someValue`. + +### 11.4. Storing the Job Repository + +Spring Batch requires a data store for the `Job` repository. +If you use Spring Boot, you must use an actual database. +Note that it can be an in-memory database, see [Configuring a Job Repository](https://docs.spring.io/spring-batch/docs/4.3.5/reference/html/job.html#configuringJobRepository). + +## 12. Actuator + +Spring Boot includes the Spring Boot Actuator. +This section answers questions that often arise from its use. + +### 12.1. Change the HTTP Port or Address of the Actuator Endpoints ### + +In a standalone application, the Actuator HTTP port defaults to the same as the main HTTP port. +To make the application listen on a different port, set the external property: `management.server.port`. +To listen on a completely different network address (such as when you have an internal network for management and an external one for user applications), you can also set `management.server.address` to a valid IP address to which the server is able to bind. + +For more detail, see the [`ManagementServerProperties`](https://github.com/spring-projects/spring-boot/tree/v2.6.4/spring-boot-project/spring-boot-actuator-autoconfigure/src/main/java/org/springframework/boot/actuate/autoconfigure/web/server/ManagementServerProperties.java) source code and “[actuator.html](actuator.html#actuator.monitoring.customizing-management-server-port)” in the “Production-ready features” section. + +### 12.2. Customize the ‘whitelabel’ Error Page + +Spring Boot installs a ‘whitelabel’ error page that you see in a browser client if you encounter a server error (machine clients consuming JSON and other media types should see a sensible response with the right error code). + +| |Set `server.error.whitelabel.enabled=false` to switch the default error page off.
Doing so restores the default of the servlet container that you are using.
Note that Spring Boot still tries to resolve the error view, so you should probably add your own error page rather than disabling it completely.| +|---|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +Overriding the error page with your own depends on the templating technology that you use. +For example, if you use Thymeleaf, you can add an `error.html` template. +If you use FreeMarker, you can add an `error.ftlh` template. +In general, you need a `View` that resolves with a name of `error` or a `@Controller` that handles the `/error` path. +Unless you replaced some of the default configuration, you should find a `BeanNameViewResolver` in your `ApplicationContext`, so a `@Bean` named `error` would be one way of doing that. +See [`ErrorMvcAutoConfiguration`](https://github.com/spring-projects/spring-boot/tree/v2.6.4/spring-boot-project/spring-boot-autoconfigure/src/main/java/org/springframework/boot/autoconfigure/web/servlet/error/ErrorMvcAutoConfiguration.java) for more options. + +See also the section on “[Error Handling](web.html#web.servlet.spring-mvc.error-handling)” for details of how to register handlers in the servlet container. + +### 12.3. Sanitize Sensitive Values + +Information returned by the `env` and `configprops` endpoints can be somewhat sensitive so keys matching certain patterns are sanitized by default (that is their values are replaced by `******`). +Spring Boot uses sensible defaults for such keys: any key ending with the word "password", "secret", "key", "token", "vcap\_services", "sun.java.command" is entirely sanitized. +Additionally, any key that holds the word `credentials` (configured as a regular expression, that is `.*credentials.*`) as part of the key is also entirely sanitized. + +Furthermore, Spring Boot sanitizes the sensitive portion of URI-like values for keys with one of the following endings: + +* `address` + +* `addresses` + +* `uri` + +* `uris` + +* `url` + +* `urls` + +The sensitive portion of the URI is identified using the format `://:@:/`. +For example, for the property `myclient.uri=http://user1:[[email protected]](/cdn-cgi/l/email-protection):8081`, the resulting sanitized value is`http://user1:******@localhost:8081`. + +The default patterns used by the `env` and `configprops` endpoints can be replaced using `management.endpoint.env.keys-to-sanitize` and `management.endpoint.configprops.keys-to-sanitize` respectively. +Alternatively, additional patterns can be configured using `management.endpoint.env.additional-keys-to-sanitize` and `management.endpoint.configprops.additional-keys-to-sanitize`. + +### 12.4. Map Health Indicators to Micrometer Metrics + +Spring Boot health indicators return a `Status` type to indicate the overall system health. +If you want to monitor or alert on levels of health for a particular application, you can export these statuses as metrics with Micrometer. +By default, the status codes “UP”, “DOWN”, “OUT\_OF\_SERVICE” and “UNKNOWN” are used by Spring Boot. +To export these, you will need to convert these states to some set of numbers so that they can be used with a Micrometer `Gauge`. + +The following example shows one way to write such an exporter: + +``` +import io.micrometer.core.instrument.Gauge; +import io.micrometer.core.instrument.MeterRegistry; + +import org.springframework.boot.actuate.health.HealthEndpoint; +import org.springframework.boot.actuate.health.Status; +import org.springframework.context.annotation.Configuration; + +@Configuration(proxyBeanMethods = false) +public class MyHealthMetricsExportConfiguration { + + public MyHealthMetricsExportConfiguration(MeterRegistry registry, HealthEndpoint healthEndpoint) { + // This example presumes common tags (such as the app) are applied elsewhere + Gauge.builder("health", healthEndpoint, this::getStatusCode).strongReference(true).register(registry); + } + + private int getStatusCode(HealthEndpoint health) { + Status status = health.health().getStatus(); + if (Status.UP.equals(status)) { + return 3; + } + if (Status.OUT_OF_SERVICE.equals(status)) { + return 2; + } + if (Status.DOWN.equals(status)) { + return 1; + } + return 0; + } + +} + +``` + +## 13. Security + +This section addresses questions about security when working with Spring Boot, including questions that arise from using Spring Security with Spring Boot. + +For more about Spring Security, see the [Spring Security project page](https://spring.io/projects/spring-security). + +### 13.1. Switch off the Spring Boot Security Configuration ### + +If you define a `@Configuration` with a `WebSecurityConfigurerAdapter` or a `SecurityFilterChain` bean in your application, it switches off the default webapp security settings in Spring Boot. + +### 13.2. Change the UserDetailsService and Add User Accounts ### + +If you provide a `@Bean` of type `AuthenticationManager`, `AuthenticationProvider`, or `UserDetailsService`, the default `@Bean` for `InMemoryUserDetailsManager` is not created. +This means you have the full feature set of Spring Security available (such as [various authentication options](https://docs.spring.io/spring-security/reference/5.6.2/servlet/authentication/index.html)). + +The easiest way to add user accounts is to provide your own `UserDetailsService` bean. + +### 13.3. Enable HTTPS When Running behind a Proxy Server + +Ensuring that all your main endpoints are only available over HTTPS is an important chore for any application. +If you use Tomcat as a servlet container, then Spring Boot adds Tomcat’s own `RemoteIpValve` automatically if it detects some environment settings, and you should be able to rely on the `HttpServletRequest` to report whether it is secure or not (even downstream of a proxy server that handles the real SSL termination). +The standard behavior is determined by the presence or absence of certain request headers (`x-forwarded-for` and `x-forwarded-proto`), whose names are conventional, so it should work with most front-end proxies. +You can switch on the valve by adding some entries to `application.properties`, as shown in the following example: + +Properties + +``` +server.tomcat.remoteip.remote-ip-header=x-forwarded-for +server.tomcat.remoteip.protocol-header=x-forwarded-proto +``` + +Yaml + +``` +server: + tomcat: + remoteip: + remote-ip-header: "x-forwarded-for" + protocol-header: "x-forwarded-proto" +``` + +(The presence of either of those properties switches on the valve. +Alternatively, you can add the `RemoteIpValve` by customizing the `TomcatServletWebServerFactory` using a `WebServerFactoryCustomizer` bean.) + +To configure Spring Security to require a secure channel for all (or some) requests, consider adding your own `SecurityFilterChain` bean that adds the following `HttpSecurity` configuration: + +``` +import org.springframework.context.annotation.Bean; +import org.springframework.context.annotation.Configuration; +import org.springframework.security.config.annotation.web.builders.HttpSecurity; +import org.springframework.security.web.SecurityFilterChain; + +@Configuration +public class MySecurityConfig { + + @Bean + public SecurityFilterChain securityFilterChain(HttpSecurity http) throws Exception { + // Customize the application security ... + http.requiresChannel().anyRequest().requiresSecure(); + return http.build(); + } + +} + +``` + +## 14. Hot Swapping + +Spring Boot supports hot swapping. +This section answers questions about how it works. + +### 14.1. Reload Static Content + +There are several options for hot reloading. +The recommended approach is to use [`spring-boot-devtools`](using.html#using.devtools), as it provides additional development-time features, such as support for fast application restarts and LiveReload as well as sensible development-time configuration (such as template caching). +Devtools works by monitoring the classpath for changes. +This means that static resource changes must be "built" for the change to take effect. +By default, this happens automatically in Eclipse when you save your changes. +In IntelliJ IDEA, the Make Project command triggers the necessary build. +Due to the [default restart exclusions](using.html#using.devtools.restart.excluding-resources), changes to static resources do not trigger a restart of your application. +They do, however, trigger a live reload. + +Alternatively, running in an IDE (especially with debugging on) is a good way to do development (all modern IDEs allow reloading of static resources and usually also allow hot-swapping of Java class changes). + +Finally, the [Maven and Gradle plugins](build-tool-plugins.html#build-tool-plugins) can be configured (see the `addResources` property) to support running from the command line with reloading of static files directly from source. +You can use that with an external css/js compiler process if you are writing that code with higher-level tools. + +### 14.2. Reload Templates without Restarting the Container + +Most of the templating technologies supported by Spring Boot include a configuration option to disable caching (described later in this document). +If you use the `spring-boot-devtools` module, these properties are [automatically configured](using.html#using.devtools.property-defaults) for you at development time. + +#### 14.2.1. Thymeleaf Templates + +If you use Thymeleaf, set `spring.thymeleaf.cache` to `false`. +See [`ThymeleafAutoConfiguration`](https://github.com/spring-projects/spring-boot/tree/v2.6.4/spring-boot-project/spring-boot-autoconfigure/src/main/java/org/springframework/boot/autoconfigure/thymeleaf/ThymeleafAutoConfiguration.java) for other Thymeleaf customization options. + +#### 14.2.2. FreeMarker Templates + +If you use FreeMarker, set `spring.freemarker.cache` to `false`. +See [`FreeMarkerAutoConfiguration`](https://github.com/spring-projects/spring-boot/tree/v2.6.4/spring-boot-project/spring-boot-autoconfigure/src/main/java/org/springframework/boot/autoconfigure/freemarker/FreeMarkerAutoConfiguration.java) for other FreeMarker customization options. + +#### 14.2.3. Groovy Templates + +If you use Groovy templates, set `spring.groovy.template.cache` to `false`. +See [`GroovyTemplateAutoConfiguration`](https://github.com/spring-projects/spring-boot/tree/v2.6.4/spring-boot-project/spring-boot-autoconfigure/src/main/java/org/springframework/boot/autoconfigure/groovy/template/GroovyTemplateAutoConfiguration.java) for other Groovy customization options. + +### 14.3. Fast Application Restarts + +The `spring-boot-devtools` module includes support for automatic application restarts. +While not as fast as technologies such as [JRebel](https://www.jrebel.com/products/jrebel) it is usually significantly faster than a “cold start”. +You should probably give it a try before investigating some of the more complex reload options discussed later in this document. + +For more details, see the [using.html](using.html#using.devtools) section. + +### 14.4. Reload Java Classes without Restarting the Container ### + +Many modern IDEs (Eclipse, IDEA, and others) support hot swapping of bytecode. +Consequently, if you make a change that does not affect class or method signatures, it should reload cleanly with no side effects. + +## 15. Testing + +Spring Boot includes a number of testing utilities and support classes as well as a dedicated starter that provides common test dependencies. +This section answers common questions about testing. + +### 15.1. Testing With Spring Security + +Spring Security provides support for running tests as a specific user. +For example, the test in the snippet below will run with an authenticated user that has the `ADMIN` role. + +``` +import org.junit.jupiter.api.Test; + +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.boot.test.autoconfigure.web.servlet.WebMvcTest; +import org.springframework.security.test.context.support.WithMockUser; +import org.springframework.test.web.servlet.MockMvc; + +import static org.springframework.test.web.servlet.request.MockMvcRequestBuilders.get; + +@WebMvcTest(UserController.class) +class MySecurityTests { + + @Autowired + private MockMvc mvc; + + @Test + @WithMockUser(roles = "ADMIN") + void requestProtectedUrlWithUser() throws Exception { + this.mvc.perform(get("/")); + } + +} + +``` + +Spring Security provides comprehensive integration with Spring MVC Test and this can also be used when testing controllers using the `@WebMvcTest` slice and `MockMvc`. + +For additional details on Spring Security’s testing support, see Spring Security’s [reference documentation](https://docs.spring.io/spring-security/reference/5.6.2/servlet/test/index.html). + +### 15.2. Use Testcontainers for Integration Testing + +The [Testcontainers](https://www.testcontainers.org/) library provides a way to manage services running inside Docker containers. +It integrates with JUnit, allowing you to write a test class that can start up a container before any of the tests run. +Testcontainers is especially useful for writing integration tests that talk to a real backend service such as MySQL, MongoDB, Cassandra and others. +Testcontainers can be used in a Spring Boot test as follows: + +``` +import org.junit.jupiter.api.Test; +import org.testcontainers.containers.Neo4jContainer; +import org.testcontainers.junit.jupiter.Container; +import org.testcontainers.junit.jupiter.Testcontainers; + +import org.springframework.boot.test.context.SpringBootTest; + +@SpringBootTest +@Testcontainers +class MyIntegrationTests { + + @Container + static Neo4jContainer neo4j = new Neo4jContainer<>("neo4j:4.2"); + + @Test + void myTest() { + // ... + } + +} + +``` + +This will start up a docker container running Neo4j (if Docker is running locally) before any of the tests are run. +In most cases, you will need to configure the application using details from the running container, such as container IP or port. + +This can be done with a static `@DynamicPropertySource` method that allows adding dynamic property values to the Spring Environment. + +``` +import org.junit.jupiter.api.Test; +import org.testcontainers.containers.Neo4jContainer; +import org.testcontainers.junit.jupiter.Container; +import org.testcontainers.junit.jupiter.Testcontainers; + +import org.springframework.boot.test.context.SpringBootTest; +import org.springframework.test.context.DynamicPropertyRegistry; +import org.springframework.test.context.DynamicPropertySource; + +@SpringBootTest +@Testcontainers +class MyIntegrationTests { + + @Container + static Neo4jContainer neo4j = new Neo4jContainer<>("neo4j:4.2"); + + @Test + void myTest() { + // ... + } + + @DynamicPropertySource + static void neo4jProperties(DynamicPropertyRegistry registry) { + registry.add("spring.neo4j.uri", neo4j::getBoltUrl); + } + +} + +``` + +The above configuration allows Neo4j-related beans in the application to communicate with Neo4j running inside the Testcontainers-managed Docker container. + +## 16. Build + +Spring Boot includes build plugins for Maven and Gradle. +This section answers common questions about these plugins. + +### 16.1. Generate Build Information + +Both the Maven plugin and the Gradle plugin allow generating build information containing the coordinates, name, and version of the project. +The plugins can also be configured to add additional properties through configuration. +When such a file is present, Spring Boot auto-configures a `BuildProperties` bean. + +To generate build information with Maven, add an execution for the `build-info` goal, as shown in the following example: + +``` + + + + org.springframework.boot + spring-boot-maven-plugin + 2.6.4 + + + + build-info + + + + + + +``` + +| |See the [Spring Boot Maven Plugin documentation](https://docs.spring.io/spring-boot/docs/2.6.4/maven-plugin/reference/htmlsingle/#goals-build-info) for more details.| +|---|---------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +The following example does the same with Gradle: + +``` +springBoot { + buildInfo() +} +``` + +| |See the [Spring Boot Gradle Plugin documentation](https://docs.spring.io/spring-boot/docs/2.6.4/gradle-plugin/reference/htmlsingle/#integrating-with-actuator-build-info) for more details.| +|---|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +### 16.2. Generate Git Information + +Both Maven and Gradle allow generating a `git.properties` file containing information about the state of your `git` source code repository when the project was built. + +For Maven users, the `spring-boot-starter-parent` POM includes a pre-configured plugin to generate a `git.properties` file. +To use it, add the following declaration for the [`Git Commit Id Plugin`](https://github.com/git-commit-id/git-commit-id-maven-plugin) to your POM: + +``` + + + + pl.project13.maven + git-commit-id-plugin + + + +``` + +Gradle users can achieve the same result by using the [`gradle-git-properties`](https://plugins.gradle.org/plugin/com.gorylenko.gradle-git-properties) plugin, as shown in the following example: + +``` +plugins { + id "com.gorylenko.gradle-git-properties" version "2.3.2" +} +``` + +Both the Maven and Gradle plugins allow the properties that are included in `git.properties` to be configured. + +| |The commit time in `git.properties` is expected to match the following format: `yyyy-MM-dd’T’HH:mm:ssZ`.
This is the default format for both plugins listed above.
Using this format lets the time be parsed into a `Date` and its format, when serialized to JSON, to be controlled by Jackson’s date serialization configuration settings.| +|---|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +### 16.3. Customize Dependency Versions + +The `spring-boot-dependencies` POM manages the versions of common dependencies. +The Spring Boot plugins for Maven and Gradle allow these managed dependency versions to be customized using build properties. + +| |Each Spring Boot release is designed and tested against this specific set of third-party dependencies.
Overriding versions may cause compatibility issues.| +|---|--------------------------------------------------------------------------------------------------------------------------------------------------------------| + +To override dependency versions with Maven, see [this section](https://docs.spring.io/spring-boot/docs/2.6.4/maven-plugin/reference/htmlsingle/#using) of the Maven plugin’s documentation. + +To override dependency versions in Gradle, see [this section](https://docs.spring.io/spring-boot/docs/2.6.4/gradle-plugin/reference/htmlsingle/#managing-dependencies-dependency-management-plugin-customizing) of the Gradle plugin’s documentation. + +### 16.4. Create an Executable JAR with Maven + +The `spring-boot-maven-plugin` can be used to create an executable “fat” JAR. +If you use the `spring-boot-starter-parent` POM, you can declare the plugin and your jars are repackaged as follows: + +``` + + + + org.springframework.boot + spring-boot-maven-plugin + + + +``` + +If you do not use the parent POM, you can still use the plugin. +However, you must additionally add an `` section, as follows: + +``` + + + + org.springframework.boot + spring-boot-maven-plugin + {spring-boot-version} + + + + repackage + + + + + + +``` + +See the [plugin documentation](https://docs.spring.io/spring-boot/docs/2.6.4/maven-plugin/reference/htmlsingle/#repackage) for full usage details. + +### 16.5. Use a Spring Boot Application as a Dependency ### + +Like a war file, a Spring Boot application is not intended to be used as a dependency. +If your application contains classes that you want to share with other projects, the recommended approach is to move that code into a separate module. +The separate module can then be depended upon by your application and other projects. + +If you cannot rearrange your code as recommended above, Spring Boot’s Maven and Gradle plugins must be configured to produce a separate artifact that is suitable for use as a dependency. +The executable archive cannot be used as a dependency as the [executable jar format](executable-jar.html#appendix.executable-jar.nested-jars.jar-structure) packages application classes in `BOOT-INF/classes`. +This means that they cannot be found when the executable jar is used as a dependency. + +To produce the two artifacts, one that can be used as a dependency and one that is executable, a classifier must be specified. +This classifier is applied to the name of the executable archive, leaving the default archive for use as a dependency. + +To configure a classifier of `exec` in Maven, you can use the following configuration: + +``` + + + + org.springframework.boot + spring-boot-maven-plugin + + exec + + + + +``` + +### 16.6. Extract Specific Libraries When an Executable Jar Runs ### + +Most nested libraries in an executable jar do not need to be unpacked in order to run. +However, certain libraries can have problems. +For example, JRuby includes its own nested jar support, which assumes that the `jruby-complete.jar` is always directly available as a file in its own right. + +To deal with any problematic libraries, you can flag that specific nested jars should be automatically unpacked when the executable jar first runs. +Such nested jars are written beneath the temporary directory identified by the `java.io.tmpdir` system property. + +| |Care should be taken to ensure that your operating system is configured so that it will not delete the jars that have been unpacked to the temporary directory while the application is still running.| +|---|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +For example, to indicate that JRuby should be flagged for unpacking by using the Maven Plugin, you would add the following configuration: + +``` + + + + org.springframework.boot + spring-boot-maven-plugin + + + + org.jruby + jruby-complete + + + + + + +``` + +### 16.7. Create a Non-executable JAR with Exclusions + +Often, if you have an executable and a non-executable jar as two separate build products, the executable version has additional configuration files that are not needed in a library jar. +For example, the `application.yml` configuration file might be excluded from the non-executable JAR. + +In Maven, the executable jar must be the main artifact and you can add a classified jar for the library, as follows: + +``` + + + + org.springframework.boot + spring-boot-maven-plugin + + + maven-jar-plugin + + + lib + package + + jar + + + lib + + application.yml + + + + + + + +``` + +### 16.8. Remote Debug a Spring Boot Application Started with Maven + +To attach a remote debugger to a Spring Boot application that was started with Maven, you can use the `jvmArguments` property of the [maven plugin](https://docs.spring.io/spring-boot/docs/2.6.4/maven-plugin/reference/htmlsingle/). + +See [this example](https://docs.spring.io/spring-boot/docs/2.6.4/maven-plugin/reference/htmlsingle/#run-example-debug) for more details. + +### 16.9. Build an Executable Archive from Ant without Using spring-boot-antlib ### + +To build with Ant, you need to grab dependencies, compile, and then create a jar or war archive. +To make it executable, you can either use the `spring-boot-antlib` module or you can follow these instructions: + +1. If you are building a jar, package the application’s classes and resources in a nested `BOOT-INF/classes` directory. + If you are building a war, package the application’s classes in a nested `WEB-INF/classes` directory as usual. + +2. Add the runtime dependencies in a nested `BOOT-INF/lib` directory for a jar or `WEB-INF/lib` for a war. + Remember **not** to compress the entries in the archive. + +3. Add the `provided` (embedded container) dependencies in a nested `BOOT-INF/lib` directory for a jar or `WEB-INF/lib-provided` for a war. + Remember **not** to compress the entries in the archive. + +4. Add the `spring-boot-loader` classes at the root of the archive (so that the `Main-Class` is available). + +5. Use the appropriate launcher (such as `JarLauncher` for a jar file) as a `Main-Class` attribute in the manifest and specify the other properties it needs as manifest entries — principally, by setting a `Start-Class` property. + +The following example shows how to build an executable archive with Ant: + +``` + + + + + + + + + + + + + + + + + + + + + +``` + +## 17. Traditional Deployment + +Spring Boot supports traditional deployment as well as more modern forms of deployment. +This section answers common questions about traditional deployment. + +### 17.1. Create a Deployable War File + +| |Because Spring WebFlux does not strictly depend on the servlet API and applications are deployed by default on an embedded Reactor Netty server, War deployment is not supported for WebFlux applications.| +|---|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +The first step in producing a deployable war file is to provide a `SpringBootServletInitializer` subclass and override its `configure` method. +Doing so makes use of Spring Framework’s servlet 3.0 support and lets you configure your application when it is launched by the servlet container. +Typically, you should update your application’s main class to extend `SpringBootServletInitializer`, as shown in the following example: + +``` +import org.springframework.boot.SpringApplication; +import org.springframework.boot.autoconfigure.SpringBootApplication; +import org.springframework.boot.builder.SpringApplicationBuilder; +import org.springframework.boot.web.servlet.support.SpringBootServletInitializer; + +@SpringBootApplication +public class MyApplication extends SpringBootServletInitializer { + + @Override + protected SpringApplicationBuilder configure(SpringApplicationBuilder application) { + return application.sources(MyApplication.class); + } + + public static void main(String[] args) { + SpringApplication.run(MyApplication.class, args); + } + +} + +``` + +The next step is to update your build configuration such that your project produces a war file rather than a jar file. +If you use Maven and `spring-boot-starter-parent` (which configures Maven’s war plugin for you), all you need to do is to modify `pom.xml` to change the packaging to war, as follows: + +``` +war +``` + +If you use Gradle, you need to modify `build.gradle` to apply the war plugin to the project, as follows: + +``` +apply plugin: 'war' +``` + +The final step in the process is to ensure that the embedded servlet container does not interfere with the servlet container to which the war file is deployed. +To do so, you need to mark the embedded servlet container dependency as being provided. + +If you use Maven, the following example marks the servlet container (Tomcat, in this case) as being provided: + +``` + + + + org.springframework.boot + spring-boot-starter-tomcat + provided + + + +``` + +If you use Gradle, the following example marks the servlet container (Tomcat, in this case) as being provided: + +``` +dependencies { + // ... + providedRuntime 'org.springframework.boot:spring-boot-starter-tomcat' + // ... +} +``` + +| |`providedRuntime` is preferred to Gradle’s `compileOnly` configuration.
Among other limitations, `compileOnly` dependencies are not on the test classpath, so any web-based integration tests fail.| +|---|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +If you use the [Spring Boot build tools](build-tool-plugins.html#build-tool-plugins), marking the embedded servlet container dependency as provided produces an executable war file with the provided dependencies packaged in a `lib-provided` directory. +This means that, in addition to being deployable to a servlet container, you can also run your application by using `java -jar` on the command line. + +### 17.2. Convert an Existing Application to Spring Boot ### + +To convert an existing non-web Spring application to a Spring Boot application, replace the code that creates your `ApplicationContext` and replace it with calls to `SpringApplication` or `SpringApplicationBuilder`. +Spring MVC web applications are generally amenable to first creating a deployable war application and then migrating it later to an executable war or jar. +See the [Getting Started Guide on Converting a jar to a war](https://spring.io/guides/gs/convert-jar-to-war/). + +To create a deployable war by extending `SpringBootServletInitializer` (for example, in a class called `Application`) and adding the Spring Boot `@SpringBootApplication` annotation, use code similar to that shown in the following example: + +``` +import org.springframework.boot.SpringApplication; +import org.springframework.boot.autoconfigure.SpringBootApplication; +import org.springframework.boot.builder.SpringApplicationBuilder; +import org.springframework.boot.web.servlet.support.SpringBootServletInitializer; + +@SpringBootApplication +public class MyApplication extends SpringBootServletInitializer { + + @Override + protected SpringApplicationBuilder configure(SpringApplicationBuilder application) { + // Customize the application or call application.sources(...) to add sources + // Since our example is itself a @Configuration class (via @SpringBootApplication) + // we actually do not need to override this method. + return application; + } + +} + +``` + +Remember that, whatever you put in the `sources` is merely a Spring `ApplicationContext`. +Normally, anything that already works should work here. +There might be some beans you can remove later and let Spring Boot provide its own defaults for them, but it should be possible to get something working before you need to do that. + +Static resources can be moved to `/public` (or `/static` or `/resources` or `/META-INF/resources`) in the classpath root. +The same applies to `messages.properties` (which Spring Boot automatically detects in the root of the classpath). + +Vanilla usage of Spring `DispatcherServlet` and Spring Security should require no further changes. +If you have other features in your application (for instance, using other servlets or filters), you may need to add some configuration to your `Application` context, by replacing those elements from the `web.xml`, as follows: + +* A `@Bean` of type `Servlet` or `ServletRegistrationBean` installs that bean in the container as if it were a `` and `` in `web.xml`. + +* A `@Bean` of type `Filter` or `FilterRegistrationBean` behaves similarly (as a `` and ``). + +* An `ApplicationContext` in an XML file can be added through an `@ImportResource` in your `Application`. + Alternatively, cases where annotation configuration is heavily used already can be recreated in a few lines as `@Bean` definitions. + +Once the war file is working, you can make it executable by adding a `main` method to your `Application`, as shown in the following example: + +``` +public static void main(String[] args) { + SpringApplication.run(MyApplication.class, args); +} + +``` + +| |If you intend to start your application as a war or as an executable application, you need to share the customizations of the builder in a method that is both available to the `SpringBootServletInitializer` callback and in the `main` method in a class similar to the following:

```
import org.springframework.boot.Banner;
import org.springframework.boot.autoconfigure.SpringBootApplication;
import org.springframework.boot.builder.SpringApplicationBuilder;
import org.springframework.boot.web.servlet.support.SpringBootServletInitializer;

@SpringBootApplication
public class MyApplication extends SpringBootServletInitializer {

@Override
protected SpringApplicationBuilder configure(SpringApplicationBuilder builder) {
return customizerBuilder(builder);
}

public static void main(String[] args) {
customizerBuilder(new SpringApplicationBuilder()).run(args);
}

private static SpringApplicationBuilder customizerBuilder(SpringApplicationBuilder builder) {
return builder.sources(MyApplication.class).bannerMode(Banner.Mode.OFF);
}

}

```| +|---|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +Applications can fall into more than one category: + +* Servlet 3.0+ applications with no `web.xml`. + +* Applications with a `web.xml`. + +* Applications with a context hierarchy. + +* Applications without a context hierarchy. + +All of these should be amenable to translation, but each might require slightly different techniques. + +Servlet 3.0+ applications might translate pretty easily if they already use the Spring Servlet 3.0+ initializer support classes. +Normally, all the code from an existing `WebApplicationInitializer` can be moved into a `SpringBootServletInitializer`. +If your existing application has more than one `ApplicationContext` (for example, if it uses `AbstractDispatcherServletInitializer`) then you might be able to combine all your context sources into a single `SpringApplication`. +The main complication you might encounter is if combining does not work and you need to maintain the context hierarchy. +See the [entry on building a hierarchy](#howto.application.context-hierarchy) for examples. +An existing parent context that contains web-specific features usually needs to be broken up so that all the `ServletContextAware` components are in the child context. + +Applications that are not already Spring applications might be convertible to Spring Boot applications, and the previously mentioned guidance may help. +However, you may yet encounter problems. +In that case, we suggest [asking questions on Stack Overflow with a tag of `spring-boot`](https://stackoverflow.com/questions/tagged/spring-boot). + +### 17.3. Deploying a WAR to WebLogic + +To deploy a Spring Boot application to WebLogic, you must ensure that your servlet initializer **directly** implements `WebApplicationInitializer` (even if you extend from a base class that already implements it). + +A typical initializer for WebLogic should resemble the following example: + +``` +import org.springframework.boot.autoconfigure.SpringBootApplication; +import org.springframework.boot.web.servlet.support.SpringBootServletInitializer; +import org.springframework.web.WebApplicationInitializer; + +@SpringBootApplication +public class MyApplication extends SpringBootServletInitializer implements WebApplicationInitializer { + +} + +``` + +If you use Logback, you also need to tell WebLogic to prefer the packaged version rather than the version that was pre-installed with the server. +You can do so by adding a `WEB-INF/weblogic.xml` file with the following contents: + +``` + + + + + org.slf4j + + + +``` diff --git a/docs/en/spring-boot/io.md b/docs/en/spring-boot/io.md new file mode 100644 index 0000000000000000000000000000000000000000..9904e03b6d3cc94c9452a65d091ad8ee9e032804 --- /dev/null +++ b/docs/en/spring-boot/io.md @@ -0,0 +1,966 @@ +# IO + +Most applications will need to deal with input and output concerns at some point. +Spring Boot provides utilities and integrations with a range of technologies to help when you need IO capabilities. +This section covers standard IO features such as caching and validation as well as more advanced topics such as scheduling and distributed transactions. +We will also cover calling remote REST or SOAP services and sending email. + +## 1. Caching + +The Spring Framework provides support for transparently adding caching to an application. +At its core, the abstraction applies caching to methods, thus reducing the number of executions based on the information available in the cache. +The caching logic is applied transparently, without any interference to the invoker. +Spring Boot auto-configures the cache infrastructure as long as caching support is enabled by using the `@EnableCaching` annotation. + +| |Check the [relevant section](https://docs.spring.io/spring-framework/docs/5.3.16/reference/html/integration.html#cache) of the Spring Framework reference for more details.| +|---|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +In a nutshell, to add caching to an operation of your service add the relevant annotation to its method, as shown in the following example: + +``` +import org.springframework.cache.annotation.Cacheable; +import org.springframework.stereotype.Component; + +@Component +public class MyMathService { + + @Cacheable("piDecimals") + public int computePiDecimal(int precision) { + ... + } + +} + +``` + +This example demonstrates the use of caching on a potentially costly operation. +Before invoking `computePiDecimal`, the abstraction looks for an entry in the `piDecimals` cache that matches the `i` argument. +If an entry is found, the content in the cache is immediately returned to the caller, and the method is not invoked. +Otherwise, the method is invoked, and the cache is updated before returning the value. + +| |You can also use the standard JSR-107 (JCache) annotations (such as `@CacheResult`) transparently.
However, we strongly advise you to not mix and match the Spring Cache and JCache annotations.| +|---|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +If you do not add any specific cache library, Spring Boot auto-configures a [simple provider](#io.caching.provider.simple) that uses concurrent maps in memory. +When a cache is required (such as `piDecimals` in the preceding example), this provider creates it for you. +The simple provider is not really recommended for production usage, but it is great for getting started and making sure that you understand the features. +When you have made up your mind about the cache provider to use, please make sure to read its documentation to figure out how to configure the caches that your application uses. +Nearly all providers require you to explicitly configure every cache that you use in the application. +Some offer a way to customize the default caches defined by the `spring.cache.cache-names` property. + +| |It is also possible to transparently [update](https://docs.spring.io/spring-framework/docs/5.3.16/reference/html/integration.html#cache-annotations-put) or [evict](https://docs.spring.io/spring-framework/docs/5.3.16/reference/html/integration.html#cache-annotations-evict) data from the cache.| +|---|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +### 1.1. Supported Cache Providers + +The cache abstraction does not provide an actual store and relies on abstraction materialized by the `org.springframework.cache.Cache` and `org.springframework.cache.CacheManager` interfaces. + +If you have not defined a bean of type `CacheManager` or a `CacheResolver` named `cacheResolver` (see [`CachingConfigurer`](https://docs.spring.io/spring-framework/docs/5.3.16/javadoc-api/org/springframework/cache/annotation/CachingConfigurer.html)), Spring Boot tries to detect the following providers (in the indicated order): + +1. [Generic](#io.caching.provider.generic) + +2. [JCache (JSR-107)](#io.caching.provider.jcache) (EhCache 3, Hazelcast, Infinispan, and others) + +3. [EhCache 2.x](#io.caching.provider.ehcache2) + +4. [Hazelcast](#io.caching.provider.hazelcast) + +5. [Infinispan](#io.caching.provider.infinispan) + +6. [Couchbase](#io.caching.provider.couchbase) + +7. [Redis](#io.caching.provider.redis) + +8. [Caffeine](#io.caching.provider.caffeine) + +9. [Simple](#io.caching.provider.simple) + +| |It is also possible to *force* a particular cache provider by setting the `spring.cache.type` property.
Use this property if you need to [disable caching altogether](#io.caching.provider.none) in certain environments (such as tests).| +|---|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +| |Use the `spring-boot-starter-cache` “Starter” to quickly add basic caching dependencies.
The starter brings in `spring-context-support`.
If you add dependencies manually, you must include `spring-context-support` in order to use the JCache, EhCache 2.x, or Caffeine support.| +|---|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +If the `CacheManager` is auto-configured by Spring Boot, you can further tune its configuration before it is fully initialized by exposing a bean that implements the `CacheManagerCustomizer` interface. +The following example sets a flag to say that `null` values should not be passed down to the underlying map: + +``` +import org.springframework.boot.autoconfigure.cache.CacheManagerCustomizer; +import org.springframework.cache.concurrent.ConcurrentMapCacheManager; +import org.springframework.context.annotation.Bean; +import org.springframework.context.annotation.Configuration; + +@Configuration(proxyBeanMethods = false) +public class MyCacheManagerConfiguration { + + @Bean + public CacheManagerCustomizer cacheManagerCustomizer() { + return (cacheManager) -> cacheManager.setAllowNullValues(false); + } + +} + +``` + +| |In the preceding example, an auto-configured `ConcurrentMapCacheManager` is expected.
If that is not the case (either you provided your own config or a different cache provider was auto-configured), the customizer is not invoked at all.
You can have as many customizers as you want, and you can also order them by using `@Order` or `Ordered`.| +|---|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +#### 1.1.1. Generic + +Generic caching is used if the context defines *at least* one `org.springframework.cache.Cache` bean. +A `CacheManager` wrapping all beans of that type is created. + +#### 1.1.2. JCache (JSR-107) + +[JCache](https://jcp.org/en/jsr/detail?id=107) is bootstrapped through the presence of a `javax.cache.spi.CachingProvider` on the classpath (that is, a JSR-107 compliant caching library exists on the classpath), and the `JCacheCacheManager` is provided by the `spring-boot-starter-cache` “Starter”. +Various compliant libraries are available, and Spring Boot provides dependency management for Ehcache 3, Hazelcast, and Infinispan. +Any other compliant library can be added as well. + +It might happen that more than one provider is present, in which case the provider must be explicitly specified. +Even if the JSR-107 standard does not enforce a standardized way to define the location of the configuration file, Spring Boot does its best to accommodate setting a cache with implementation details, as shown in the following example: + +Properties + +``` +# Only necessary if more than one provider is present +spring.cache.jcache.provider=com.example.MyCachingProvider +spring.cache.jcache.config=classpath:example.xml +``` + +Yaml + +``` +# Only necessary if more than one provider is present +spring: + cache: + jcache: + provider: "com.example.MyCachingProvider" + config: "classpath:example.xml" +``` + +| |When a cache library offers both a native implementation and JSR-107 support, Spring Boot prefers the JSR-107 support, so that the same features are available if you switch to a different JSR-107 implementation.| +|---|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +| |Spring Boot has [general support for Hazelcast](#io.hazelcast).
If a single `HazelcastInstance` is available, it is automatically reused for the `CacheManager` as well, unless the `spring.cache.jcache.config` property is specified.| +|---|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +There are two ways to customize the underlying `javax.cache.cacheManager`: + +* Caches can be created on startup by setting the `spring.cache.cache-names` property. + If a custom `javax.cache.configuration.Configuration` bean is defined, it is used to customize them. + +* `org.springframework.boot.autoconfigure.cache.JCacheManagerCustomizer` beans are invoked with the reference of the `CacheManager` for full customization. + +| |If a standard `javax.cache.CacheManager` bean is defined, it is wrapped automatically in an `org.springframework.cache.CacheManager` implementation that the abstraction expects.
No further customization is applied to it.| +|---|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +#### 1.1.3. EhCache 2.x + +[EhCache](https://www.ehcache.org/) 2.x is used if a file named `ehcache.xml` can be found at the root of the classpath. +If EhCache 2.x is found, the `EhCacheCacheManager` provided by the `spring-boot-starter-cache` “Starter” is used to bootstrap the cache manager. +An alternate configuration file can be provided as well, as shown in the following example: + +Properties + +``` +spring.cache.ehcache.config=classpath:config/another-config.xml +``` + +Yaml + +``` +spring: + cache: + ehcache: + config: "classpath:config/another-config.xml" +``` + +#### 1.1.4. Hazelcast + +Spring Boot has [general support for Hazelcast](#io.hazelcast). +If a `HazelcastInstance` has been auto-configured, it is automatically wrapped in a `CacheManager`. + +#### 1.1.5. Infinispan + +[Infinispan](https://infinispan.org/) has no default configuration file location, so it must be specified explicitly. +Otherwise, the default bootstrap is used. + +Properties + +``` +spring.cache.infinispan.config=infinispan.xml +``` + +Yaml + +``` +spring: + cache: + infinispan: + config: "infinispan.xml" +``` + +Caches can be created on startup by setting the `spring.cache.cache-names` property. +If a custom `ConfigurationBuilder` bean is defined, it is used to customize the caches. + +| |The support of Infinispan in Spring Boot is restricted to the embedded mode and is quite basic.
If you want more options, you should use the official Infinispan Spring Boot starter instead.
See [Infinispan’s documentation](https://github.com/infinispan/infinispan-spring-boot) for more details.| +|---|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +#### 1.1.6. Couchbase + +If Spring Data Couchbase is available and Couchbase is [configured](data.html#data.nosql.couchbase), a `CouchbaseCacheManager` is auto-configured. +It is possible to create additional caches on startup by setting the `spring.cache.cache-names` property and cache defaults can be configured by using `spring.cache.couchbase.*` properties. +For instance, the following configuration creates `cache1` and `cache2` caches with an entry *expiration* of 10 minutes: + +Properties + +``` +spring.cache.cache-names=cache1,cache2 +spring.cache.couchbase.expiration=10m +``` + +Yaml + +``` +spring: + cache: + cache-names: "cache1,cache2" + couchbase: + expiration: "10m" +``` + +If you need more control over the configuration, consider registering a `CouchbaseCacheManagerBuilderCustomizer` bean. +The following example shows a customizer that configures a specific entry expiration for `cache1` and `cache2`: + +``` +import java.time.Duration; + +import org.springframework.boot.autoconfigure.cache.CouchbaseCacheManagerBuilderCustomizer; +import org.springframework.context.annotation.Bean; +import org.springframework.context.annotation.Configuration; +import org.springframework.data.couchbase.cache.CouchbaseCacheConfiguration; + +@Configuration(proxyBeanMethods = false) +public class MyCouchbaseCacheManagerConfiguration { + + @Bean + public CouchbaseCacheManagerBuilderCustomizer myCouchbaseCacheManagerBuilderCustomizer() { + return (builder) -> builder + .withCacheConfiguration("cache1", CouchbaseCacheConfiguration + .defaultCacheConfig().entryExpiry(Duration.ofSeconds(10))) + .withCacheConfiguration("cache2", CouchbaseCacheConfiguration + .defaultCacheConfig().entryExpiry(Duration.ofMinutes(1))); + + } + +} + +``` + +#### 1.1.7. Redis + +If [Redis](https://redis.io/) is available and configured, a `RedisCacheManager` is auto-configured. +It is possible to create additional caches on startup by setting the `spring.cache.cache-names` property and cache defaults can be configured by using `spring.cache.redis.*` properties. +For instance, the following configuration creates `cache1` and `cache2` caches with a *time to live* of 10 minutes: + +Properties + +``` +spring.cache.cache-names=cache1,cache2 +spring.cache.redis.time-to-live=10m +``` + +Yaml + +``` +spring: + cache: + cache-names: "cache1,cache2" + redis: + time-to-live: "10m" +``` + +| |By default, a key prefix is added so that, if two separate caches use the same key, Redis does not have overlapping keys and cannot return invalid values.
We strongly recommend keeping this setting enabled if you create your own `RedisCacheManager`.| +|---|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +| |You can take full control of the default configuration by adding a `RedisCacheConfiguration` `@Bean` of your own.
This can be useful if you need to customize the default serialization strategy.| +|---|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +If you need more control over the configuration, consider registering a `RedisCacheManagerBuilderCustomizer` bean. +The following example shows a customizer that configures a specific time to live for `cache1` and `cache2`: + +``` +import java.time.Duration; + +import org.springframework.boot.autoconfigure.cache.RedisCacheManagerBuilderCustomizer; +import org.springframework.context.annotation.Bean; +import org.springframework.context.annotation.Configuration; +import org.springframework.data.redis.cache.RedisCacheConfiguration; + +@Configuration(proxyBeanMethods = false) +public class MyRedisCacheManagerConfiguration { + + @Bean + public RedisCacheManagerBuilderCustomizer myRedisCacheManagerBuilderCustomizer() { + return (builder) -> builder + .withCacheConfiguration("cache1", RedisCacheConfiguration + .defaultCacheConfig().entryTtl(Duration.ofSeconds(10))) + .withCacheConfiguration("cache2", RedisCacheConfiguration + .defaultCacheConfig().entryTtl(Duration.ofMinutes(1))); + + } + +} + +``` + +#### 1.1.8. Caffeine + +[Caffeine](https://github.com/ben-manes/caffeine) is a Java 8 rewrite of Guava’s cache that supersedes support for Guava. +If Caffeine is present, a `CaffeineCacheManager` (provided by the `spring-boot-starter-cache` “Starter”) is auto-configured. +Caches can be created on startup by setting the `spring.cache.cache-names` property and can be customized by one of the following (in the indicated order): + +1. A cache spec defined by `spring.cache.caffeine.spec` + +2. A `com.github.benmanes.caffeine.cache.CaffeineSpec` bean is defined + +3. A `com.github.benmanes.caffeine.cache.Caffeine` bean is defined + +For instance, the following configuration creates `cache1` and `cache2` caches with a maximum size of 500 and a *time to live* of 10 minutes + +Properties + +``` +spring.cache.cache-names=cache1,cache2 +spring.cache.caffeine.spec=maximumSize=500,expireAfterAccess=600s +``` + +Yaml + +``` +spring: + cache: + cache-names: "cache1,cache2" + caffeine: + spec: "maximumSize=500,expireAfterAccess=600s" +``` + +If a `com.github.benmanes.caffeine.cache.CacheLoader` bean is defined, it is automatically associated to the `CaffeineCacheManager`. +Since the `CacheLoader` is going to be associated with *all* caches managed by the cache manager, it must be defined as `CacheLoader`. +The auto-configuration ignores any other generic type. + +#### 1.1.9. Simple + +If none of the other providers can be found, a simple implementation using a `ConcurrentHashMap` as the cache store is configured. +This is the default if no caching library is present in your application. +By default, caches are created as needed, but you can restrict the list of available caches by setting the `cache-names` property. +For instance, if you want only `cache1` and `cache2` caches, set the `cache-names` property as follows: + +Properties + +``` +spring.cache.cache-names=cache1,cache2 +``` + +Yaml + +``` +spring: + cache: + cache-names: "cache1,cache2" +``` + +If you do so and your application uses a cache not listed, then it fails at runtime when the cache is needed, but not on startup. +This is similar to the way the "real" cache providers behave if you use an undeclared cache. + +#### 1.1.10. None + +When `@EnableCaching` is present in your configuration, a suitable cache configuration is expected as well. +If you need to disable caching altogether in certain environments, force the cache type to `none` to use a no-op implementation, as shown in the following example: + +Properties + +``` +spring.cache.type=none +``` + +Yaml + +``` +spring: + cache: + type: "none" +``` + +## 2. Hazelcast + +If [Hazelcast](https://hazelcast.com/) is on the classpath and a suitable configuration is found, Spring Boot auto-configures a `HazelcastInstance` that you can inject in your application. + +Spring Boot first attempts to create a client by checking the following configuration options: + +* The presence of a `com.hazelcast.client.config.ClientConfig` bean. + +* A configuration file defined by the `spring.hazelcast.config` property. + +* The presence of the `hazelcast.client.config` system property. + +* A `hazelcast-client.xml` in the working directory or at the root of the classpath. + +* A `hazelcast-client.yaml` in the working directory or at the root of the classpath. + +| |Spring Boot supports both Hazelcast 4 and Hazelcast 3.
If you downgrade to Hazelcast 3, `hazelcast-client` should be added to the classpath to configure a client.| +|---|----------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +If a client can not be created, Spring Boot attempts to configure an embedded server. +If you define a `com.hazelcast.config.Config` bean, Spring Boot uses that. +If your configuration defines an instance name, Spring Boot tries to locate an existing instance rather than creating a new one. + +You could also specify the Hazelcast configuration file to use through configuration, as shown in the following example: + +Properties + +``` +spring.hazelcast.config=classpath:config/my-hazelcast.xml +``` + +Yaml + +``` +spring: + hazelcast: + config: "classpath:config/my-hazelcast.xml" +``` + +Otherwise, Spring Boot tries to find the Hazelcast configuration from the default locations: `hazelcast.xml` in the working directory or at the root of the classpath, or a `.yaml` counterpart in the same locations. +We also check if the `hazelcast.config` system property is set. +See the [Hazelcast documentation](https://docs.hazelcast.org/docs/latest/manual/html-single/) for more details. + +| |Spring Boot also has [explicit caching support for Hazelcast](#io.caching.provider.hazelcast).
If caching is enabled, the `HazelcastInstance` is automatically wrapped in a `CacheManager` implementation.| +|---|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +## 3. Quartz Scheduler + +Spring Boot offers several conveniences for working with the [Quartz scheduler](https://www.quartz-scheduler.org/), including the `spring-boot-starter-quartz` “Starter”. +If Quartz is available, a `Scheduler` is auto-configured (through the `SchedulerFactoryBean` abstraction). + +Beans of the following types are automatically picked up and associated with the `Scheduler`: + +* `JobDetail`: defines a particular Job.`JobDetail` instances can be built with the `JobBuilder` API. + +* `Calendar`. + +* `Trigger`: defines when a particular job is triggered. + +By default, an in-memory `JobStore` is used. +However, it is possible to configure a JDBC-based store if a `DataSource` bean is available in your application and if the `spring.quartz.job-store-type` property is configured accordingly, as shown in the following example: + +Properties + +``` +spring.quartz.job-store-type=jdbc +``` + +Yaml + +``` +spring: + quartz: + job-store-type: "jdbc" +``` + +When the JDBC store is used, the schema can be initialized on startup, as shown in the following example: + +Properties + +``` +spring.quartz.jdbc.initialize-schema=always +``` + +Yaml + +``` +spring: + quartz: + jdbc: + initialize-schema: "always" +``` + +| |By default, the database is detected and initialized by using the standard scripts provided with the Quartz library.
These scripts drop existing tables, deleting all triggers on every restart.
It is also possible to provide a custom script by setting the `spring.quartz.jdbc.schema` property.| +|---|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +To have Quartz use a `DataSource` other than the application’s main `DataSource`, declare a `DataSource` bean, annotating its `@Bean` method with `@QuartzDataSource`. +Doing so ensures that the Quartz-specific `DataSource` is used by both the `SchedulerFactoryBean` and for schema initialization. +Similarly, to have Quartz use a `TransactionManager` other than the application’s main `TransactionManager` declare a `TransactionManager` bean, annotating its `@Bean` method with `@QuartzTransactionManager`. + +By default, jobs created by configuration will not overwrite already registered jobs that have been read from a persistent job store. +To enable overwriting existing job definitions set the `spring.quartz.overwrite-existing-jobs` property. + +Quartz Scheduler configuration can be customized using `spring.quartz` properties and `SchedulerFactoryBeanCustomizer` beans, which allow programmatic `SchedulerFactoryBean` customization. +Advanced Quartz configuration properties can be customized using `spring.quartz.properties.*`. + +| |In particular, an `Executor` bean is not associated with the scheduler as Quartz offers a way to configure the scheduler through `spring.quartz.properties`.
If you need to customize the task executor, consider implementing `SchedulerFactoryBeanCustomizer`.| +|---|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +Jobs can define setters to inject data map properties. +Regular beans can also be injected in a similar manner, as shown in the following example: + +``` +import org.quartz.JobExecutionContext; +import org.quartz.JobExecutionException; + +import org.springframework.scheduling.quartz.QuartzJobBean; + +public class MySampleJob extends QuartzJobBean { + + // fields ... + + private MyService myService; + + private String name; + + // Inject "MyService" bean + public void setMyService(MyService myService) { + this.myService = myService; + } + + // Inject the "name" job data property + public void setName(String name) { + this.name = name; + } + + @Override + protected void executeInternal(JobExecutionContext context) throws JobExecutionException { + this.myService.someMethod(context.getFireTime(), this.name); + } + +} + +``` + +## 4. Sending Email + +The Spring Framework provides an abstraction for sending email by using the `JavaMailSender` interface, and Spring Boot provides auto-configuration for it as well as a starter module. + +| |See the [reference documentation](https://docs.spring.io/spring-framework/docs/5.3.16/reference/html/integration.html#mail) for a detailed explanation of how you can use `JavaMailSender`.| +|---|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +If `spring.mail.host` and the relevant libraries (as defined by `spring-boot-starter-mail`) are available, a default `JavaMailSender` is created if none exists. +The sender can be further customized by configuration items from the `spring.mail` namespace. +See [`MailProperties`](https://github.com/spring-projects/spring-boot/tree/v2.6.4/spring-boot-project/spring-boot-autoconfigure/src/main/java/org/springframework/boot/autoconfigure/mail/MailProperties.java) for more details. + +In particular, certain default timeout values are infinite, and you may want to change that to avoid having a thread blocked by an unresponsive mail server, as shown in the following example: + +Properties + +``` +spring.mail.properties[mail.smtp.connectiontimeout]=5000 +spring.mail.properties[mail.smtp.timeout]=3000 +spring.mail.properties[mail.smtp.writetimeout]=5000 +``` + +Yaml + +``` +spring: + mail: + properties: + "[mail.smtp.connectiontimeout]": 5000 + "[mail.smtp.timeout]": 3000 + "[mail.smtp.writetimeout]": 5000 +``` + +It is also possible to configure a `JavaMailSender` with an existing `Session` from JNDI: + +Properties + +``` +spring.mail.jndi-name=mail/Session +``` + +Yaml + +``` +spring: + mail: + jndi-name: "mail/Session" +``` + +When a `jndi-name` is set, it takes precedence over all other Session-related settings. + +## 5. Validation + +The method validation feature supported by Bean Validation 1.1 is automatically enabled as long as a JSR-303 implementation (such as Hibernate validator) is on the classpath. +This lets bean methods be annotated with `javax.validation` constraints on their parameters and/or on their return value. +Target classes with such annotated methods need to be annotated with the `@Validated` annotation at the type level for their methods to be searched for inline constraint annotations. + +For instance, the following service triggers the validation of the first argument, making sure its size is between 8 and 10: + +``` +import javax.validation.constraints.Size; + +import org.springframework.stereotype.Service; +import org.springframework.validation.annotation.Validated; + +@Service +@Validated +public class MyBean { + + public Archive findByCodeAndAuthor(@Size(min = 8, max = 10) String code, Author author) { + return ... + } + +} + +``` + +The application’s `MessageSource` is used when resolving `{parameters}` in constraint messages. +This allows you to use [your application’s `messages.properties` files](features.html#features.internationalization) for Bean Validation messages. +Once the parameters have been resolved, message interpolation is completed using Bean Validation’s default interpolator. + +## 6. Calling REST Services + +If your application calls remote REST services, Spring Boot makes that very convenient using a `RestTemplate` or a `WebClient`. + +### 6.1. RestTemplate + +If you need to call remote REST services from your application, you can use the Spring Framework’s [`RestTemplate`](https://docs.spring.io/spring-framework/docs/5.3.16/javadoc-api/org/springframework/web/client/RestTemplate.html) class. +Since `RestTemplate` instances often need to be customized before being used, Spring Boot does not provide any single auto-configured `RestTemplate` bean. +It does, however, auto-configure a `RestTemplateBuilder`, which can be used to create `RestTemplate` instances when needed. +The auto-configured `RestTemplateBuilder` ensures that sensible `HttpMessageConverters` are applied to `RestTemplate` instances. + +The following code shows a typical example: + +``` +import org.springframework.boot.web.client.RestTemplateBuilder; +import org.springframework.stereotype.Service; +import org.springframework.web.client.RestTemplate; + +@Service +public class MyService { + + private final RestTemplate restTemplate; + + public MyService(RestTemplateBuilder restTemplateBuilder) { + this.restTemplate = restTemplateBuilder.build(); + } + + public Details someRestCall(String name) { + return this.restTemplate.getForObject("/{name}/details", Details.class, name); + } + +} + +``` + +| |`RestTemplateBuilder` includes a number of useful methods that can be used to quickly configure a `RestTemplate`.
For example, to add BASIC auth support, you can use `builder.basicAuthentication("user", "password").build()`.| +|---|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +#### 6.1.1. RestTemplate Customization + +There are three main approaches to `RestTemplate` customization, depending on how broadly you want the customizations to apply. + +To make the scope of any customizations as narrow as possible, inject the auto-configured `RestTemplateBuilder` and then call its methods as required. +Each method call returns a new `RestTemplateBuilder` instance, so the customizations only affect this use of the builder. + +To make an application-wide, additive customization, use a `RestTemplateCustomizer` bean. +All such beans are automatically registered with the auto-configured `RestTemplateBuilder` and are applied to any templates that are built with it. + +The following example shows a customizer that configures the use of a proxy for all hosts except `192.168.0.5`: + +``` +import org.apache.http.HttpException; +import org.apache.http.HttpHost; +import org.apache.http.HttpRequest; +import org.apache.http.client.HttpClient; +import org.apache.http.conn.routing.HttpRoutePlanner; +import org.apache.http.impl.client.HttpClientBuilder; +import org.apache.http.impl.conn.DefaultProxyRoutePlanner; +import org.apache.http.protocol.HttpContext; + +import org.springframework.boot.web.client.RestTemplateCustomizer; +import org.springframework.http.client.HttpComponentsClientHttpRequestFactory; +import org.springframework.web.client.RestTemplate; + +public class MyRestTemplateCustomizer implements RestTemplateCustomizer { + + @Override + public void customize(RestTemplate restTemplate) { + HttpRoutePlanner routePlanner = new CustomRoutePlanner(new HttpHost("proxy.example.com")); + HttpClient httpClient = HttpClientBuilder.create().setRoutePlanner(routePlanner).build(); + restTemplate.setRequestFactory(new HttpComponentsClientHttpRequestFactory(httpClient)); + } + + static class CustomRoutePlanner extends DefaultProxyRoutePlanner { + + CustomRoutePlanner(HttpHost proxy) { + super(proxy); + } + + @Override + public HttpHost determineProxy(HttpHost target, HttpRequest request, HttpContext context) throws HttpException { + if (target.getHostName().equals("192.168.0.5")) { + return null; + } + return super.determineProxy(target, request, context); + } + + } + +} + +``` + +Finally, you can define your own `RestTemplateBuilder` bean. +Doing so will replace the auto-configured builder. +If you want any `RestTemplateCustomizer` beans to be applied to your custom builder, as the auto-configuration would have done, configure it using a `RestTemplateBuilderConfigurer`. +The following example exposes a `RestTemplateBuilder` that matches what Spring Boot’s auto-configuration would have done, except that custom connect and read timeouts are also specified: + +``` +import java.time.Duration; + +import org.springframework.boot.autoconfigure.web.client.RestTemplateBuilderConfigurer; +import org.springframework.boot.web.client.RestTemplateBuilder; +import org.springframework.context.annotation.Bean; +import org.springframework.context.annotation.Configuration; + +@Configuration(proxyBeanMethods = false) +public class MyRestTemplateBuilderConfiguration { + + @Bean + public RestTemplateBuilder restTemplateBuilder(RestTemplateBuilderConfigurer configurer) { + return configurer.configure(new RestTemplateBuilder()).setConnectTimeout(Duration.ofSeconds(5)) + .setReadTimeout(Duration.ofSeconds(2)); + } + +} + +``` + +The most extreme (and rarely used) option is to create your own `RestTemplateBuilder` bean without using a configurer. +In addition to replacing the auto-configured builder, this also prevents any `RestTemplateCustomizer` beans from being used. + +### 6.2. WebClient + +If you have Spring WebFlux on your classpath, you can also choose to use `WebClient` to call remote REST services. +Compared to `RestTemplate`, this client has a more functional feel and is fully reactive. +You can learn more about the `WebClient` in the dedicated [section in the Spring Framework docs](https://docs.spring.io/spring-framework/docs/5.3.16/reference/html/web-reactive.html#webflux-client). + +Spring Boot creates and pre-configures a `WebClient.Builder` for you. +It is strongly advised to inject it in your components and use it to create `WebClient` instances. +Spring Boot is configuring that builder to share HTTP resources, reflect codecs setup in the same fashion as the server ones (see [WebFlux HTTP codecs auto-configuration](web.html#web.reactive.webflux.httpcodecs)), and more. + +The following code shows a typical example: + +``` +import org.neo4j.cypherdsl.core.Relationship.Details; +import reactor.core.publisher.Mono; + +import org.springframework.stereotype.Service; +import org.springframework.web.reactive.function.client.WebClient; + +@Service +public class MyService { + + private final WebClient webClient; + + public MyService(WebClient.Builder webClientBuilder) { + this.webClient = webClientBuilder.baseUrl("https://example.org").build(); + } + + public Mono
someRestCall(String name) { + return this.webClient.get().uri("/{name}/details", name).retrieve().bodyToMono(Details.class); + } + +} + +``` + +#### 6.2.1. WebClient Runtime + +Spring Boot will auto-detect which `ClientHttpConnector` to use to drive `WebClient`, depending on the libraries available on the application classpath. +For now, Reactor Netty and Jetty RS client are supported. + +The `spring-boot-starter-webflux` starter depends on `io.projectreactor.netty:reactor-netty` by default, which brings both server and client implementations. +If you choose to use Jetty as a reactive server instead, you should add a dependency on the Jetty Reactive HTTP client library, `org.eclipse.jetty:jetty-reactive-httpclient`. +Using the same technology for server and client has it advantages, as it will automatically share HTTP resources between client and server. + +Developers can override the resource configuration for Jetty and Reactor Netty by providing a custom `ReactorResourceFactory` or `JettyResourceFactory` bean - this will be applied to both clients and servers. + +If you wish to override that choice for the client, you can define your own `ClientHttpConnector` bean and have full control over the client configuration. + +You can learn more about the [`WebClient` configuration options in the Spring Framework reference documentation](https://docs.spring.io/spring-framework/docs/5.3.16/reference/html/web-reactive.html#webflux-client-builder). + +#### 6.2.2. WebClient Customization + +There are three main approaches to `WebClient` customization, depending on how broadly you want the customizations to apply. + +To make the scope of any customizations as narrow as possible, inject the auto-configured `WebClient.Builder` and then call its methods as required.`WebClient.Builder` instances are stateful: Any change on the builder is reflected in all clients subsequently created with it. +If you want to create several clients with the same builder, you can also consider cloning the builder with `WebClient.Builder other = builder.clone();`. + +To make an application-wide, additive customization to all `WebClient.Builder` instances, you can declare `WebClientCustomizer` beans and change the `WebClient.Builder` locally at the point of injection. + +Finally, you can fall back to the original API and use `WebClient.create()`. +In that case, no auto-configuration or `WebClientCustomizer` is applied. + +## 7. Web Services + +Spring Boot provides Web Services auto-configuration so that all you must do is define your `Endpoints`. + +The [Spring Web Services features](https://docs.spring.io/spring-ws/docs/3.1.2/reference/html/) can be easily accessed with the `spring-boot-starter-webservices` module. + +`SimpleWsdl11Definition` and `SimpleXsdSchema` beans can be automatically created for your WSDLs and XSDs respectively. +To do so, configure their location, as shown in the following example: + +Properties + +``` +spring.webservices.wsdl-locations=classpath:/wsdl +``` + +Yaml + +``` +spring: + webservices: + wsdl-locations: "classpath:/wsdl" +``` + +### 7.1. Calling Web Services with WebServiceTemplate + +If you need to call remote Web services from your application, you can use the [`WebServiceTemplate`](https://docs.spring.io/spring-ws/docs/3.1.2/reference/html/#client-web-service-template) class. +Since `WebServiceTemplate` instances often need to be customized before being used, Spring Boot does not provide any single auto-configured `WebServiceTemplate` bean. +It does, however, auto-configure a `WebServiceTemplateBuilder`, which can be used to create `WebServiceTemplate` instances when needed. + +The following code shows a typical example: + +``` +import org.springframework.boot.webservices.client.WebServiceTemplateBuilder; +import org.springframework.stereotype.Service; +import org.springframework.ws.client.core.WebServiceTemplate; +import org.springframework.ws.soap.client.core.SoapActionCallback; + +@Service +public class MyService { + + private final WebServiceTemplate webServiceTemplate; + + public MyService(WebServiceTemplateBuilder webServiceTemplateBuilder) { + this.webServiceTemplate = webServiceTemplateBuilder.build(); + } + + public SomeResponse someWsCall(SomeRequest detailsReq) { + return (SomeResponse) this.webServiceTemplate.marshalSendAndReceive(detailsReq, + new SoapActionCallback("https://ws.example.com/action")); + } + +} + +``` + +By default, `WebServiceTemplateBuilder` detects a suitable HTTP-based `WebServiceMessageSender` using the available HTTP client libraries on the classpath. +You can also customize read and connection timeouts as follows: + +``` +import java.time.Duration; + +import org.springframework.boot.webservices.client.HttpWebServiceMessageSenderBuilder; +import org.springframework.boot.webservices.client.WebServiceTemplateBuilder; +import org.springframework.context.annotation.Bean; +import org.springframework.context.annotation.Configuration; +import org.springframework.ws.client.core.WebServiceTemplate; +import org.springframework.ws.transport.WebServiceMessageSender; + +@Configuration(proxyBeanMethods = false) +public class MyWebServiceTemplateConfiguration { + + @Bean + public WebServiceTemplate webServiceTemplate(WebServiceTemplateBuilder builder) { + WebServiceMessageSender sender = new HttpWebServiceMessageSenderBuilder() + .setConnectTimeout(Duration.ofSeconds(5)) + .setReadTimeout(Duration.ofSeconds(2)) + .build(); + return builder.messageSenders(sender).build(); + } + +} + +``` + +## 8. Distributed Transactions with JTA + +Spring Boot supports distributed JTA transactions across multiple XA resources by using an [Atomikos](https://www.atomikos.com/) embedded transaction manager. +JTA transactions are also supported when deploying to a suitable Java EE Application Server. + +When a JTA environment is detected, Spring’s `JtaTransactionManager` is used to manage transactions. +Auto-configured JMS, DataSource, and JPA beans are upgraded to support XA transactions. +You can use standard Spring idioms, such as `@Transactional`, to participate in a distributed transaction. +If you are within a JTA environment and still want to use local transactions, you can set the `spring.jta.enabled` property to `false` to disable the JTA auto-configuration. + +### 8.1. Using an Atomikos Transaction Manager + +[Atomikos](https://www.atomikos.com/) is a popular open source transaction manager which can be embedded into your Spring Boot application. +You can use the `spring-boot-starter-jta-atomikos` starter to pull in the appropriate Atomikos libraries. +Spring Boot auto-configures Atomikos and ensures that appropriate `depends-on` settings are applied to your Spring beans for correct startup and shutdown ordering. + +By default, Atomikos transaction logs are written to a `transaction-logs` directory in your application’s home directory (the directory in which your application jar file resides). +You can customize the location of this directory by setting a `spring.jta.log-dir` property in your `application.properties` file. +Properties starting with `spring.jta.atomikos.properties` can also be used to customize the Atomikos `UserTransactionServiceImp`. +See the [`AtomikosProperties` Javadoc](https://docs.spring.io/spring-boot/docs/2.6.4/api/org/springframework/boot/jta/atomikos/AtomikosProperties.html) for complete details. + +| |To ensure that multiple transaction managers can safely coordinate the same resource managers, each Atomikos instance must be configured with a unique ID.
By default, this ID is the IP address of the machine on which Atomikos is running.
To ensure uniqueness in production, you should configure the `spring.jta.transaction-manager-id` property with a different value for each instance of your application.| +|---|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +### 8.2. Using a Java EE Managed Transaction Manager + +If you package your Spring Boot application as a `war` or `ear` file and deploy it to a Java EE application server, you can use your application server’s built-in transaction manager. +Spring Boot tries to auto-configure a transaction manager by looking at common JNDI locations (`java:comp/UserTransaction`, `java:comp/TransactionManager`, and so on). +If you use a transaction service provided by your application server, you generally also want to ensure that all resources are managed by the server and exposed over JNDI. +Spring Boot tries to auto-configure JMS by looking for a `ConnectionFactory` at the JNDI path (`java:/JmsXA` or `java:/XAConnectionFactory`), and you can use the [`spring.datasource.jndi-name` property](data.html#data.sql.datasource.jndi) to configure your `DataSource`. + +### 8.3. Mixing XA and Non-XA JMS Connections + +When using JTA, the primary JMS `ConnectionFactory` bean is XA-aware and participates in distributed transactions. +You can inject into your bean without needing to use any `@Qualifier`: + +``` +public MyBean(ConnectionFactory connectionFactory) { + // ... +} + +``` + +In some situations, you might want to process certain JMS messages by using a non-XA `ConnectionFactory`. +For example, your JMS processing logic might take longer than the XA timeout. + +If you want to use a non-XA `ConnectionFactory`, you can the `nonXaJmsConnectionFactory` bean: + +``` +public MyBean(@Qualifier("nonXaJmsConnectionFactory") ConnectionFactory connectionFactory) { + // ... +} + +``` + +For consistency, the `jmsConnectionFactory` bean is also provided by using the bean alias `xaJmsConnectionFactory`: + +``` +public MyBean(@Qualifier("xaJmsConnectionFactory") ConnectionFactory connectionFactory) { + // ... +} + +``` + +### 8.4. Supporting an Alternative Embedded Transaction Manager ### + +The [`XAConnectionFactoryWrapper`](https://github.com/spring-projects/spring-boot/tree/v2.6.4/spring-boot-project/spring-boot/src/main/java/org/springframework/boot/jms/XAConnectionFactoryWrapper.java) and [`XADataSourceWrapper`](https://github.com/spring-projects/spring-boot/tree/v2.6.4/spring-boot-project/spring-boot/src/main/java/org/springframework/boot/jdbc/XADataSourceWrapper.java) interfaces can be used to support alternative embedded transaction managers. +The interfaces are responsible for wrapping `XAConnectionFactory` and `XADataSource` beans and exposing them as regular `ConnectionFactory` and `DataSource` beans, which transparently enroll in the distributed transaction. +DataSource and JMS auto-configuration use JTA variants, provided you have a `JtaTransactionManager` bean and appropriate XA wrapper beans registered within your `ApplicationContext`. + +The [AtomikosXAConnectionFactoryWrapper](https://github.com/spring-projects/spring-boot/tree/v2.6.4/spring-boot-project/spring-boot/src/main/java/org/springframework/boot/jta/atomikos/AtomikosXAConnectionFactoryWrapper.java) and [AtomikosXADataSourceWrapper](https://github.com/spring-projects/spring-boot/tree/v2.6.4/spring-boot-project/spring-boot/src/main/java/org/springframework/boot/jta/atomikos/AtomikosXADataSourceWrapper.java) provide good examples of how to write XA wrappers. + +## 9. What to Read Next + +You should now have a good understanding of Spring Boot’s [core features](features.html#features) and the various technologies that Spring Boot provides support for via auto-configuration. + +The next few sections go into detail about deploying applications to cloud platforms. +You can read about [building container images](container-images.html#container-images) in the next section or skip to the [production-ready features](actuator.html#actuator) section. diff --git a/docs/en/spring-boot/legal.md b/docs/en/spring-boot/legal.md new file mode 100644 index 0000000000000000000000000000000000000000..b458b7bafa9557184b42aeb2ab3b13e9e8487fbe --- /dev/null +++ b/docs/en/spring-boot/legal.md @@ -0,0 +1,8 @@ +# Legal + +Copyright © 2012-2022 + +Copies of this document may be made for your own use and for distribution to +others, provided that you do not charge any fee for such copies and further +provided that each copy contains this Copyright Notice, whether distributed in +print or electronically. diff --git a/docs/en/spring-boot/messaging.md b/docs/en/spring-boot/messaging.md new file mode 100644 index 0000000000000000000000000000000000000000..91cce40be4533f6185ef0dbd263d0d463910ea75 --- /dev/null +++ b/docs/en/spring-boot/messaging.md @@ -0,0 +1,1000 @@ +# Messaging + +The Spring Framework provides extensive support for integrating with messaging systems, from simplified use of the JMS API using `JmsTemplate` to a complete infrastructure to receive messages asynchronously. +Spring AMQP provides a similar feature set for the Advanced Message Queuing Protocol. +Spring Boot also provides auto-configuration options for `RabbitTemplate` and RabbitMQ. +Spring WebSocket natively includes support for STOMP messaging, and Spring Boot has support for that through starters and a small amount of auto-configuration. +Spring Boot also has support for Apache Kafka. + +## 1. JMS + +The `javax.jms.ConnectionFactory` interface provides a standard method of creating a `javax.jms.Connection` for interacting with a JMS broker. +Although Spring needs a `ConnectionFactory` to work with JMS, you generally need not use it directly yourself and can instead rely on higher level messaging abstractions. +(See the [relevant section](https://docs.spring.io/spring-framework/docs/5.3.16/reference/html/integration.html#jms) of the Spring Framework reference documentation for details.) +Spring Boot also auto-configures the necessary infrastructure to send and receive messages. + +### 1.1. ActiveMQ Support + +When [ActiveMQ](https://activemq.apache.org/) is available on the classpath, Spring Boot can also configure a `ConnectionFactory`. +If the broker is present, an embedded broker is automatically started and configured (provided no broker URL is specified through configuration and the embedded broker is not disabled in the configuration). + +| |If you use `spring-boot-starter-activemq`, the necessary dependencies to connect or embed an ActiveMQ instance are provided, as is the Spring infrastructure to integrate with JMS.| +|---|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +ActiveMQ configuration is controlled by external configuration properties in `spring.activemq.*`. + +By default, ActiveMQ is auto-configured to use the [VM transport](https://activemq.apache.org/vm-transport-reference.html), which starts a broker embedded in the same JVM instance. + +You can disable the embedded broker by configuring the `spring.activemq.in-memory` property, as shown in the following example: + +Properties + +``` +spring.activemq.in-memory=false +``` + +Yaml + +``` +spring: + activemq: + in-memory: false +``` + +The embedded broker will also be disabled if you configure the broker URL, as shown in the following example: + +Properties + +``` +spring.activemq.broker-url=tcp://192.168.1.210:9876 +spring.activemq.user=admin +spring.activemq.password=secret +``` + +Yaml + +``` +spring: + activemq: + broker-url: "tcp://192.168.1.210:9876" + user: "admin" + password: "secret" +``` + +If you want to take full control over the embedded broker, see [the ActiveMQ documentation](https://activemq.apache.org/how-do-i-embed-a-broker-inside-a-connection.html) for further information. + +By default, a `CachingConnectionFactory` wraps the native `ConnectionFactory` with sensible settings that you can control by external configuration properties in `spring.jms.*`: + +Properties + +``` +spring.jms.cache.session-cache-size=5 +``` + +Yaml + +``` +spring: + jms: + cache: + session-cache-size: 5 +``` + +If you’d rather use native pooling, you can do so by adding a dependency to `org.messaginghub:pooled-jms` and configuring the `JmsPoolConnectionFactory` accordingly, as shown in the following example: + +Properties + +``` +spring.activemq.pool.enabled=true +spring.activemq.pool.max-connections=50 +``` + +Yaml + +``` +spring: + activemq: + pool: + enabled: true + max-connections: 50 +``` + +| |See [`ActiveMQProperties`](https://github.com/spring-projects/spring-boot/tree/v2.6.4/spring-boot-project/spring-boot-autoconfigure/src/main/java/org/springframework/boot/autoconfigure/jms/activemq/ActiveMQProperties.java) for more of the supported options.
You can also register an arbitrary number of beans that implement `ActiveMQConnectionFactoryCustomizer` for more advanced customizations.| +|---|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +By default, ActiveMQ creates a destination if it does not yet exist so that destinations are resolved against their provided names. + +### 1.2. ActiveMQ Artemis Support + +Spring Boot can auto-configure a `ConnectionFactory` when it detects that [ActiveMQ Artemis](https://activemq.apache.org/components/artemis/) is available on the classpath. +If the broker is present, an embedded broker is automatically started and configured (unless the mode property has been explicitly set). +The supported modes are `embedded` (to make explicit that an embedded broker is required and that an error should occur if the broker is not available on the classpath) and `native` (to connect to a broker using the `netty` transport protocol). +When the latter is configured, Spring Boot configures a `ConnectionFactory` that connects to a broker running on the local machine with the default settings. + +| |If you use `spring-boot-starter-artemis`, the necessary dependencies to connect to an existing ActiveMQ Artemis instance are provided, as well as the Spring infrastructure to integrate with JMS.
Adding `org.apache.activemq:artemis-jms-server` to your application lets you use embedded mode.| +|---|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +ActiveMQ Artemis configuration is controlled by external configuration properties in `spring.artemis.*`. +For example, you might declare the following section in `application.properties`: + +Properties + +``` +spring.artemis.mode=native +spring.artemis.broker-url=tcp://192.168.1.210:9876 +spring.artemis.user=admin +spring.artemis.password=secret +``` + +Yaml + +``` +spring: + artemis: + mode: native + broker-url: "tcp://192.168.1.210:9876" + user: "admin" + password: "secret" +``` + +When embedding the broker, you can choose if you want to enable persistence and list the destinations that should be made available. +These can be specified as a comma-separated list to create them with the default options, or you can define bean(s) of type `org.apache.activemq.artemis.jms.server.config.JMSQueueConfiguration` or `org.apache.activemq.artemis.jms.server.config.TopicConfiguration`, for advanced queue and topic configurations, respectively. + +By default, a `CachingConnectionFactory` wraps the native `ConnectionFactory` with sensible settings that you can control by external configuration properties in `spring.jms.*`: + +Properties + +``` +spring.jms.cache.session-cache-size=5 +``` + +Yaml + +``` +spring: + jms: + cache: + session-cache-size: 5 +``` + +If you’d rather use native pooling, you can do so by adding a dependency to `org.messaginghub:pooled-jms` and configuring the `JmsPoolConnectionFactory` accordingly, as shown in the following example: + +Properties + +``` +spring.artemis.pool.enabled=true +spring.artemis.pool.max-connections=50 +``` + +Yaml + +``` +spring: + artemis: + pool: + enabled: true + max-connections: 50 +``` + +See [`ArtemisProperties`](https://github.com/spring-projects/spring-boot/tree/v2.6.4/spring-boot-project/spring-boot-autoconfigure/src/main/java/org/springframework/boot/autoconfigure/jms/artemis/ArtemisProperties.java) for more supported options. + +No JNDI lookup is involved, and destinations are resolved against their names, using either the `name` attribute in the Artemis configuration or the names provided through configuration. + +### 1.3. Using a JNDI ConnectionFactory + +If you are running your application in an application server, Spring Boot tries to locate a JMS `ConnectionFactory` by using JNDI. +By default, the `java:/JmsXA` and `java:/XAConnectionFactory` location are checked. +You can use the `spring.jms.jndi-name` property if you need to specify an alternative location, as shown in the following example: + +Properties + +``` +spring.jms.jndi-name=java:/MyConnectionFactory +``` + +Yaml + +``` +spring: + jms: + jndi-name: "java:/MyConnectionFactory" +``` + +### 1.4. Sending a Message + +Spring’s `JmsTemplate` is auto-configured, and you can autowire it directly into your own beans, as shown in the following example: + +``` +import org.springframework.jms.core.JmsTemplate; +import org.springframework.stereotype.Component; + +@Component +public class MyBean { + + private final JmsTemplate jmsTemplate; + + public MyBean(JmsTemplate jmsTemplate) { + this.jmsTemplate = jmsTemplate; + } + + // ... + + public void someMethod() { + this.jmsTemplate.convertAndSend("hello"); + } + +} + +``` + +| |[`JmsMessagingTemplate`](https://docs.spring.io/spring-framework/docs/5.3.16/javadoc-api/org/springframework/jms/core/JmsMessagingTemplate.html) can be injected in a similar manner.
If a `DestinationResolver` or a `MessageConverter` bean is defined, it is associated automatically to the auto-configured `JmsTemplate`.| +|---|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +### 1.5. Receiving a Message + +When the JMS infrastructure is present, any bean can be annotated with `@JmsListener` to create a listener endpoint. +If no `JmsListenerContainerFactory` has been defined, a default one is configured automatically. +If a `DestinationResolver`, a `MessageConverter`, or a `javax.jms.ExceptionListener` beans are defined, they are associated automatically with the default factory. + +By default, the default factory is transactional. +If you run in an infrastructure where a `JtaTransactionManager` is present, it is associated to the listener container by default. +If not, the `sessionTransacted` flag is enabled. +In that latter scenario, you can associate your local data store transaction to the processing of an incoming message by adding `@Transactional` on your listener method (or a delegate thereof). +This ensures that the incoming message is acknowledged, once the local transaction has completed. +This also includes sending response messages that have been performed on the same JMS session. + +The following component creates a listener endpoint on the `someQueue` destination: + +``` +import org.springframework.jms.annotation.JmsListener; +import org.springframework.stereotype.Component; + +@Component +public class MyBean { + + @JmsListener(destination = "someQueue") + public void processMessage(String content) { + // ... + } + +} + +``` + +| |See [the Javadoc of `@EnableJms`](https://docs.spring.io/spring-framework/docs/5.3.16/javadoc-api/org/springframework/jms/annotation/EnableJms.html) for more details.| +|---|----------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +If you need to create more `JmsListenerContainerFactory` instances or if you want to override the default, Spring Boot provides a `DefaultJmsListenerContainerFactoryConfigurer` that you can use to initialize a `DefaultJmsListenerContainerFactory` with the same settings as the one that is auto-configured. + +For instance, the following example exposes another factory that uses a specific `MessageConverter`: + +``` +import javax.jms.ConnectionFactory; + +import org.springframework.boot.autoconfigure.jms.DefaultJmsListenerContainerFactoryConfigurer; +import org.springframework.context.annotation.Bean; +import org.springframework.context.annotation.Configuration; +import org.springframework.jms.config.DefaultJmsListenerContainerFactory; + +@Configuration(proxyBeanMethods = false) +public class MyJmsConfiguration { + + @Bean + public DefaultJmsListenerContainerFactory myFactory(DefaultJmsListenerContainerFactoryConfigurer configurer) { + DefaultJmsListenerContainerFactory factory = new DefaultJmsListenerContainerFactory(); + ConnectionFactory connectionFactory = getCustomConnectionFactory(); + configurer.configure(factory, connectionFactory); + factory.setMessageConverter(new MyMessageConverter()); + return factory; + } + + private ConnectionFactory getCustomConnectionFactory() { + return ... + } + +} + +``` + +Then you can use the factory in any `@JmsListener`-annotated method as follows: + +``` +import org.springframework.jms.annotation.JmsListener; +import org.springframework.stereotype.Component; + +@Component +public class MyBean { + + @JmsListener(destination = "someQueue", containerFactory = "myFactory") + public void processMessage(String content) { + // ... + } + +} + +``` + +## 2. AMQP + +The Advanced Message Queuing Protocol (AMQP) is a platform-neutral, wire-level protocol for message-oriented middleware. +The Spring AMQP project applies core Spring concepts to the development of AMQP-based messaging solutions. +Spring Boot offers several conveniences for working with AMQP through RabbitMQ, including the `spring-boot-starter-amqp` “Starter”. + +### 2.1. RabbitMQ support + +[RabbitMQ](https://www.rabbitmq.com/) is a lightweight, reliable, scalable, and portable message broker based on the AMQP protocol. +Spring uses `RabbitMQ` to communicate through the AMQP protocol. + +RabbitMQ configuration is controlled by external configuration properties in `spring.rabbitmq.*`. +For example, you might declare the following section in `application.properties`: + +Properties + +``` +spring.rabbitmq.host=localhost +spring.rabbitmq.port=5672 +spring.rabbitmq.username=admin +spring.rabbitmq.password=secret +``` + +Yaml + +``` +spring: + rabbitmq: + host: "localhost" + port: 5672 + username: "admin" + password: "secret" +``` + +Alternatively, you could configure the same connection using the `addresses` attribute: + +Properties + +``` +spring.rabbitmq.addresses=amqp://admin:[email protected] +``` + +Yaml + +``` +spring: + rabbitmq: + addresses: "amqp://admin:[email protected]" +``` + +| |When specifying addresses that way, the `host` and `port` properties are ignored.
If the address uses the `amqps` protocol, SSL support is enabled automatically.| +|---|---------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +See [`RabbitProperties`](https://github.com/spring-projects/spring-boot/tree/v2.6.4/spring-boot-project/spring-boot-autoconfigure/src/main/java/org/springframework/boot/autoconfigure/amqp/RabbitProperties.java) for more of the supported property-based configuration options. +To configure lower-level details of the RabbitMQ `ConnectionFactory` that is used by Spring AMQP, define a `ConnectionFactoryCustomizer` bean. + +If a `ConnectionNameStrategy` bean exists in the context, it will be automatically used to name connections created by the auto-configured `CachingConnectionFactory`. + +| |See [Understanding AMQP, the protocol used by RabbitMQ](https://spring.io/blog/2010/06/14/understanding-amqp-the-protocol-used-by-rabbitmq/) for more details.| +|---|--------------------------------------------------------------------------------------------------------------------------------------------------------------| + +### 2.2. Sending a Message + +Spring’s `AmqpTemplate` and `AmqpAdmin` are auto-configured, and you can autowire them directly into your own beans, as shown in the following example: + +``` +import org.springframework.amqp.core.AmqpAdmin; +import org.springframework.amqp.core.AmqpTemplate; +import org.springframework.stereotype.Component; + +@Component +public class MyBean { + + private final AmqpAdmin amqpAdmin; + + private final AmqpTemplate amqpTemplate; + + public MyBean(AmqpAdmin amqpAdmin, AmqpTemplate amqpTemplate) { + this.amqpAdmin = amqpAdmin; + this.amqpTemplate = amqpTemplate; + } + + // ... + + public void someMethod() { + this.amqpAdmin.getQueueInfo("someQueue"); + } + + public void someOtherMethod() { + this.amqpTemplate.convertAndSend("hello"); + } + +} + +``` + +| |[`RabbitMessagingTemplate`](https://docs.spring.io/spring-amqp/docs/2.4.2/api/org/springframework/amqp/rabbit/core/RabbitMessagingTemplate.html) can be injected in a similar manner.
If a `MessageConverter` bean is defined, it is associated automatically to the auto-configured `AmqpTemplate`.| +|---|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +If necessary, any `org.springframework.amqp.core.Queue` that is defined as a bean is automatically used to declare a corresponding queue on the RabbitMQ instance. + +To retry operations, you can enable retries on the `AmqpTemplate` (for example, in the event that the broker connection is lost): + +Properties + +``` +spring.rabbitmq.template.retry.enabled=true +spring.rabbitmq.template.retry.initial-interval=2s +``` + +Yaml + +``` +spring: + rabbitmq: + template: + retry: + enabled: true + initial-interval: "2s" +``` + +Retries are disabled by default. +You can also customize the `RetryTemplate` programmatically by declaring a `RabbitRetryTemplateCustomizer` bean. + +If you need to create more `RabbitTemplate` instances or if you want to override the default, Spring Boot provides a `RabbitTemplateConfigurer` bean that you can use to initialize a `RabbitTemplate` with the same settings as the factories used by the auto-configuration. + +### 2.3. Receiving a Message + +When the Rabbit infrastructure is present, any bean can be annotated with `@RabbitListener` to create a listener endpoint. +If no `RabbitListenerContainerFactory` has been defined, a default `SimpleRabbitListenerContainerFactory` is automatically configured and you can switch to a direct container using the `spring.rabbitmq.listener.type` property. +If a `MessageConverter` or a `MessageRecoverer` bean is defined, it is automatically associated with the default factory. + +The following sample component creates a listener endpoint on the `someQueue` queue: + +``` +import org.springframework.amqp.rabbit.annotation.RabbitListener; +import org.springframework.stereotype.Component; + +@Component +public class MyBean { + + @RabbitListener(queues = "someQueue") + public void processMessage(String content) { + // ... + } + +} + +``` + +| |See [the Javadoc of `@EnableRabbit`](https://docs.spring.io/spring-amqp/docs/2.4.2/api/org/springframework/amqp/rabbit/annotation/EnableRabbit.html) for more details.| +|---|----------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +If you need to create more `RabbitListenerContainerFactory` instances or if you want to override the default, Spring Boot provides a `SimpleRabbitListenerContainerFactoryConfigurer` and a `DirectRabbitListenerContainerFactoryConfigurer` that you can use to initialize a `SimpleRabbitListenerContainerFactory` and a `DirectRabbitListenerContainerFactory` with the same settings as the factories used by the auto-configuration. + +| |It does not matter which container type you chose.
Those two beans are exposed by the auto-configuration.| +|---|-------------------------------------------------------------------------------------------------------------| + +For instance, the following configuration class exposes another factory that uses a specific `MessageConverter`: + +``` +import org.springframework.amqp.rabbit.config.SimpleRabbitListenerContainerFactory; +import org.springframework.amqp.rabbit.connection.ConnectionFactory; +import org.springframework.boot.autoconfigure.amqp.SimpleRabbitListenerContainerFactoryConfigurer; +import org.springframework.context.annotation.Bean; +import org.springframework.context.annotation.Configuration; + +@Configuration(proxyBeanMethods = false) +public class MyRabbitConfiguration { + + @Bean + public SimpleRabbitListenerContainerFactory myFactory(SimpleRabbitListenerContainerFactoryConfigurer configurer) { + SimpleRabbitListenerContainerFactory factory = new SimpleRabbitListenerContainerFactory(); + ConnectionFactory connectionFactory = getCustomConnectionFactory(); + configurer.configure(factory, connectionFactory); + factory.setMessageConverter(new MyMessageConverter()); + return factory; + } + + private ConnectionFactory getCustomConnectionFactory() { + return ... + } + +} + +``` + +Then you can use the factory in any `@RabbitListener`-annotated method, as follows: + +``` +import org.springframework.amqp.rabbit.annotation.RabbitListener; +import org.springframework.stereotype.Component; + +@Component +public class MyBean { + + @RabbitListener(queues = "someQueue", containerFactory = "myFactory") + public void processMessage(String content) { + // ... + } + +} + +``` + +You can enable retries to handle situations where your listener throws an exception. +By default, `RejectAndDontRequeueRecoverer` is used, but you can define a `MessageRecoverer` of your own. +When retries are exhausted, the message is rejected and either dropped or routed to a dead-letter exchange if the broker is configured to do so. +By default, retries are disabled. +You can also customize the `RetryTemplate` programmatically by declaring a `RabbitRetryTemplateCustomizer` bean. + +| |By default, if retries are disabled and the listener throws an exception, the delivery is retried indefinitely.
You can modify this behavior in two ways: Set the `defaultRequeueRejected` property to `false` so that zero re-deliveries are attempted or throw an `AmqpRejectAndDontRequeueException` to signal the message should be rejected.
The latter is the mechanism used when retries are enabled and the maximum number of delivery attempts is reached.| +|---|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +## 3. Apache Kafka Support + +[Apache Kafka](https://kafka.apache.org/) is supported by providing auto-configuration of the `spring-kafka` project. + +Kafka configuration is controlled by external configuration properties in `spring.kafka.*`. +For example, you might declare the following section in `application.properties`: + +Properties + +``` +spring.kafka.bootstrap-servers=localhost:9092 +spring.kafka.consumer.group-id=myGroup +``` + +Yaml + +``` +spring: + kafka: + bootstrap-servers: "localhost:9092" + consumer: + group-id: "myGroup" +``` + +| |To create a topic on startup, add a bean of type `NewTopic`.
If the topic already exists, the bean is ignored.| +|---|------------------------------------------------------------------------------------------------------------------| + +See [`KafkaProperties`](https://github.com/spring-projects/spring-boot/tree/v2.6.4/spring-boot-project/spring-boot-autoconfigure/src/main/java/org/springframework/boot/autoconfigure/kafka/KafkaProperties.java) for more supported options. + +### 3.1. Sending a Message + +Spring’s `KafkaTemplate` is auto-configured, and you can autowire it directly in your own beans, as shown in the following example: + +``` +import org.springframework.kafka.core.KafkaTemplate; +import org.springframework.stereotype.Component; + +@Component +public class MyBean { + + private final KafkaTemplate kafkaTemplate; + + public MyBean(KafkaTemplate kafkaTemplate) { + this.kafkaTemplate = kafkaTemplate; + } + + // ... + + public void someMethod() { + this.kafkaTemplate.send("someTopic", "Hello"); + } + +} + +``` + +| |If the property `spring.kafka.producer.transaction-id-prefix` is defined, a `KafkaTransactionManager` is automatically configured.
Also, if a `RecordMessageConverter` bean is defined, it is automatically associated to the auto-configured `KafkaTemplate`.| +|---|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +### 3.2. Receiving a Message + +When the Apache Kafka infrastructure is present, any bean can be annotated with `@KafkaListener` to create a listener endpoint. +If no `KafkaListenerContainerFactory` has been defined, a default one is automatically configured with keys defined in `spring.kafka.listener.*`. + +The following component creates a listener endpoint on the `someTopic` topic: + +``` +import org.springframework.kafka.annotation.KafkaListener; +import org.springframework.stereotype.Component; + +@Component +public class MyBean { + + @KafkaListener(topics = "someTopic") + public void processMessage(String content) { + // ... + } + +} + +``` + +If a `KafkaTransactionManager` bean is defined, it is automatically associated to the container factory. +Similarly, if a `RecordFilterStrategy`, `CommonErrorHandler`, `AfterRollbackProcessor` or `ConsumerAwareRebalanceListener` bean is defined, it is automatically associated to the default factory. + +Depending on the listener type, a `RecordMessageConverter` or `BatchMessageConverter` bean is associated to the default factory. +If only a `RecordMessageConverter` bean is present for a batch listener, it is wrapped in a `BatchMessageConverter`. + +| |A custom `ChainedKafkaTransactionManager` must be marked `@Primary` as it usually references the auto-configured `KafkaTransactionManager` bean.| +|---|------------------------------------------------------------------------------------------------------------------------------------------------| + +### 3.3. Kafka Streams + +Spring for Apache Kafka provides a factory bean to create a `StreamsBuilder` object and manage the lifecycle of its streams. +Spring Boot auto-configures the required `KafkaStreamsConfiguration` bean as long as `kafka-streams` is on the classpath and Kafka Streams is enabled by the `@EnableKafkaStreams` annotation. + +Enabling Kafka Streams means that the application id and bootstrap servers must be set. +The former can be configured using `spring.kafka.streams.application-id`, defaulting to `spring.application.name` if not set. +The latter can be set globally or specifically overridden only for streams. + +Several additional properties are available using dedicated properties; other arbitrary Kafka properties can be set using the `spring.kafka.streams.properties` namespace. +See also [features.html](features.html#messaging.kafka.additional-properties) for more information. + +To use the factory bean, wire `StreamsBuilder` into your `@Bean` as shown in the following example: + +``` +import org.apache.kafka.common.serialization.Serdes; +import org.apache.kafka.streams.KeyValue; +import org.apache.kafka.streams.StreamsBuilder; +import org.apache.kafka.streams.kstream.KStream; +import org.apache.kafka.streams.kstream.Produced; + +import org.springframework.context.annotation.Bean; +import org.springframework.context.annotation.Configuration; +import org.springframework.kafka.annotation.EnableKafkaStreams; +import org.springframework.kafka.support.serializer.JsonSerde; + +@Configuration(proxyBeanMethods = false) +@EnableKafkaStreams +public class MyKafkaStreamsConfiguration { + + @Bean + public KStream kStream(StreamsBuilder streamsBuilder) { + KStream stream = streamsBuilder.stream("ks1In"); + stream.map(this::uppercaseValue).to("ks1Out", Produced.with(Serdes.Integer(), new JsonSerde<>())); + return stream; + } + + private KeyValue uppercaseValue(Integer key, String value) { + return new KeyValue<>(key, value.toUpperCase()); + } + +} + +``` + +By default, the streams managed by the `StreamBuilder` object it creates are started automatically. +You can customize this behavior using the `spring.kafka.streams.auto-startup` property. + +### 3.4. Additional Kafka Properties + +The properties supported by auto configuration are shown in the [“Integration Properties”](application-properties.html#appendix.application-properties.integration) section of the Appendix. +Note that, for the most part, these properties (hyphenated or camelCase) map directly to the Apache Kafka dotted properties. +See the Apache Kafka documentation for details. + +The first few of these properties apply to all components (producers, consumers, admins, and streams) but can be specified at the component level if you wish to use different values. +Apache Kafka designates properties with an importance of HIGH, MEDIUM, or LOW. +Spring Boot auto-configuration supports all HIGH importance properties, some selected MEDIUM and LOW properties, and any properties that do not have a default value. + +Only a subset of the properties supported by Kafka are available directly through the `KafkaProperties` class. +If you wish to configure the producer or consumer with additional properties that are not directly supported, use the following properties: + +Properties + +``` +spring.kafka.properties[prop.one]=first +spring.kafka.admin.properties[prop.two]=second +spring.kafka.consumer.properties[prop.three]=third +spring.kafka.producer.properties[prop.four]=fourth +spring.kafka.streams.properties[prop.five]=fifth +``` + +Yaml + +``` +spring: + kafka: + properties: + "[prop.one]": "first" + admin: + properties: + "[prop.two]": "second" + consumer: + properties: + "[prop.three]": "third" + producer: + properties: + "[prop.four]": "fourth" + streams: + properties: + "[prop.five]": "fifth" +``` + +This sets the common `prop.one` Kafka property to `first` (applies to producers, consumers and admins), the `prop.two` admin property to `second`, the `prop.three` consumer property to `third`, the `prop.four` producer property to `fourth` and the `prop.five` streams property to `fifth`. + +You can also configure the Spring Kafka `JsonDeserializer` as follows: + +Properties + +``` +spring.kafka.consumer.value-deserializer=org.springframework.kafka.support.serializer.JsonDeserializer +spring.kafka.consumer.properties[spring.json.value.default.type]=com.example.Invoice +spring.kafka.consumer.properties[spring.json.trusted.packages]=com.example.main,com.example.another +``` + +Yaml + +``` +spring: + kafka: + consumer: + value-deserializer: "org.springframework.kafka.support.serializer.JsonDeserializer" + properties: + "[spring.json.value.default.type]": "com.example.Invoice" + "[spring.json.trusted.packages]": "com.example.main,com.example.another" +``` + +Similarly, you can disable the `JsonSerializer` default behavior of sending type information in headers: + +Properties + +``` +spring.kafka.producer.value-serializer=org.springframework.kafka.support.serializer.JsonSerializer +spring.kafka.producer.properties[spring.json.add.type.headers]=false +``` + +Yaml + +``` +spring: + kafka: + producer: + value-serializer: "org.springframework.kafka.support.serializer.JsonSerializer" + properties: + "[spring.json.add.type.headers]": false +``` + +| |Properties set in this way override any configuration item that Spring Boot explicitly supports.| +|---|------------------------------------------------------------------------------------------------| + +### 3.5. Testing with Embedded Kafka + +Spring for Apache Kafka provides a convenient way to test projects with an embedded Apache Kafka broker. +To use this feature, annotate a test class with `@EmbeddedKafka` from the `spring-kafka-test` module. +For more information, please see the Spring for Apache Kafka [reference manual](https://docs.spring.io/spring-kafka/docs/2.8.3/reference/html/#embedded-kafka-annotation). + +To make Spring Boot auto-configuration work with the aforementioned embedded Apache Kafka broker, you need to remap a system property for embedded broker addresses (populated by the `EmbeddedKafkaBroker`) into the Spring Boot configuration property for Apache Kafka. +There are several ways to do that: + +* Provide a system property to map embedded broker addresses into `spring.kafka.bootstrap-servers` in the test class: + +``` +static { + System.setProperty(EmbeddedKafkaBroker.BROKER_LIST_PROPERTY, "spring.kafka.bootstrap-servers"); +} + +``` + +* Configure a property name on the `@EmbeddedKafka` annotation: + +``` +import org.springframework.boot.test.context.SpringBootTest; +import org.springframework.kafka.test.context.EmbeddedKafka; + +@SpringBootTest +@EmbeddedKafka(topics = "someTopic", bootstrapServersProperty = "spring.kafka.bootstrap-servers") +class MyTest { + + // ... + +} + +``` + +* Use a placeholder in configuration properties: + +Properties + +``` +spring.kafka.bootstrap-servers=${spring.embedded.kafka.brokers} +``` + +Yaml + +``` +spring: + kafka: + bootstrap-servers: "${spring.embedded.kafka.brokers}" +``` + +## 4. RSocket + +[RSocket](https://rsocket.io) is a binary protocol for use on byte stream transports. +It enables symmetric interaction models through async message passing over a single connection. + +The `spring-messaging` module of the Spring Framework provides support for RSocket requesters and responders, both on the client and on the server side. +See the [RSocket section](https://docs.spring.io/spring-framework/docs/5.3.16/reference/html/web-reactive.html#rsocket-spring) of the Spring Framework reference for more details, including an overview of the RSocket protocol. + +### 4.1. RSocket Strategies Auto-configuration + +Spring Boot auto-configures an `RSocketStrategies` bean that provides all the required infrastructure for encoding and decoding RSocket payloads. +By default, the auto-configuration will try to configure the following (in order): + +1. [CBOR](https://cbor.io/) codecs with Jackson + +2. JSON codecs with Jackson + +The `spring-boot-starter-rsocket` starter provides both dependencies. +See the [Jackson support section](features.html#features.json.jackson) to know more about customization possibilities. + +Developers can customize the `RSocketStrategies` component by creating beans that implement the `RSocketStrategiesCustomizer` interface. +Note that their `@Order` is important, as it determines the order of codecs. + +### 4.2. RSocket server Auto-configuration + +Spring Boot provides RSocket server auto-configuration. +The required dependencies are provided by the `spring-boot-starter-rsocket`. + +Spring Boot allows exposing RSocket over WebSocket from a WebFlux server, or standing up an independent RSocket server. +This depends on the type of application and its configuration. + +For WebFlux application (that is of type `WebApplicationType.REACTIVE`), the RSocket server will be plugged into the Web Server only if the following properties match: + +Properties + +``` +spring.rsocket.server.mapping-path=/rsocket +spring.rsocket.server.transport=websocket +``` + +Yaml + +``` +spring: + rsocket: + server: + mapping-path: "/rsocket" + transport: "websocket" +``` + +| |Plugging RSocket into a web server is only supported with Reactor Netty, as RSocket itself is built with that library.| +|---|----------------------------------------------------------------------------------------------------------------------| + +Alternatively, an RSocket TCP or websocket server is started as an independent, embedded server. +Besides the dependency requirements, the only required configuration is to define a port for that server: + +Properties + +``` +spring.rsocket.server.port=9898 +``` + +Yaml + +``` +spring: + rsocket: + server: + port: 9898 +``` + +### 4.3. Spring Messaging RSocket support + +Spring Boot will auto-configure the Spring Messaging infrastructure for RSocket. + +This means that Spring Boot will create a `RSocketMessageHandler` bean that will handle RSocket requests to your application. + +### 4.4. Calling RSocket Services with RSocketRequester + +Once the `RSocket` channel is established between server and client, any party can send or receive requests to the other. + +As a server, you can get injected with an `RSocketRequester` instance on any handler method of an RSocket `@Controller`. +As a client, you need to configure and establish an RSocket connection first. +Spring Boot auto-configures an `RSocketRequester.Builder` for such cases with the expected codecs and applies any `RSocketConnectorConfigurer` bean. + +The `RSocketRequester.Builder` instance is a prototype bean, meaning each injection point will provide you with a new instance . +This is done on purpose since this builder is stateful and you should not create requesters with different setups using the same instance. + +The following code shows a typical example: + +``` +import reactor.core.publisher.Mono; + +import org.springframework.messaging.rsocket.RSocketRequester; +import org.springframework.stereotype.Service; + +@Service +public class MyService { + + private final RSocketRequester rsocketRequester; + + public MyService(RSocketRequester.Builder rsocketRequesterBuilder) { + this.rsocketRequester = rsocketRequesterBuilder.tcp("example.org", 9898); + } + + public Mono someRSocketCall(String name) { + return this.rsocketRequester.route("user").data(name).retrieveMono(User.class); + } + +} + +``` + +## 5. Spring Integration + +Spring Boot offers several conveniences for working with [Spring Integration](https://spring.io/projects/spring-integration), including the `spring-boot-starter-integration` “Starter”. +Spring Integration provides abstractions over messaging and also other transports such as HTTP, TCP, and others. +If Spring Integration is available on your classpath, it is initialized through the `@EnableIntegration` annotation. + +Spring Integration polling logic relies [on the auto-configured `TaskScheduler`](features.html#features.task-execution-and-scheduling). +The default `PollerMetadata` (poll unbounded number of messages every second) can be customized with `spring.integration.poller.*` configuration properties. + +Spring Boot also configures some features that are triggered by the presence of additional Spring Integration modules. +If `spring-integration-jmx` is also on the classpath, message processing statistics are published over JMX. +If `spring-integration-jdbc` is available, the default database schema can be created on startup, as shown in the following line: + +Properties + +``` +spring.integration.jdbc.initialize-schema=always +``` + +Yaml + +``` +spring: + integration: + jdbc: + initialize-schema: "always" +``` + +If `spring-integration-rsocket` is available, developers can configure an RSocket server using `"spring.rsocket.server.*"` properties and let it use `IntegrationRSocketEndpoint` or `RSocketOutboundGateway` components to handle incoming RSocket messages. +This infrastructure can handle Spring Integration RSocket channel adapters and `@MessageMapping` handlers (given `"spring.integration.rsocket.server.message-mapping-enabled"` is configured). + +Spring Boot can also auto-configure an `ClientRSocketConnector` using configuration properties: + +Properties + +``` +# Connecting to a RSocket server over TCP +spring.integration.rsocket.client.host=example.org +spring.integration.rsocket.client.port=9898 +``` + +Yaml + +``` +# Connecting to a RSocket server over TCP +spring: + integration: + rsocket: + client: + host: "example.org" + port: 9898 +``` + +Properties + +``` +# Connecting to a RSocket Server over WebSocket +spring.integration.rsocket.client.uri=ws://example.org +``` + +Yaml + +``` +# Connecting to a RSocket Server over WebSocket +spring: + integration: + rsocket: + client: + uri: "ws://example.org" +``` + +See the [`IntegrationAutoConfiguration`](https://github.com/spring-projects/spring-boot/tree/v2.6.4/spring-boot-project/spring-boot-autoconfigure/src/main/java/org/springframework/boot/autoconfigure/integration/IntegrationAutoConfiguration.java) and [`IntegrationProperties`](https://github.com/spring-projects/spring-boot/tree/v2.6.4/spring-boot-project/spring-boot-autoconfigure/src/main/java/org/springframework/boot/autoconfigure/integration/IntegrationProperties.java) classes for more details. + +## 6. What to Read Next + +The next section describes how to enable [IO capabilities](io.html#io) in your application. +You can read about [caching](io.html#io.caching), [mail](io.html#io.email), [validation](io.html#io.validation), [rest clients](io.html#io.rest-client) and more in this section. diff --git a/docs/en/spring-boot/upgrading.md b/docs/en/spring-boot/upgrading.md new file mode 100644 index 0000000000000000000000000000000000000000..c647ed21342ca184622550219e16d70515786578 --- /dev/null +++ b/docs/en/spring-boot/upgrading.md @@ -0,0 +1,44 @@ +# Upgrading Spring Boot + +Instructions for how to upgrade from earlier versions of Spring Boot are provided on the project [wiki](https://github.com/spring-projects/spring-boot/wiki). +Follow the links in the [release notes](https://github.com/spring-projects/spring-boot/wiki#release-notes) section to find the version that you want to upgrade to. + +Upgrading instructions are always the first item in the release notes. +If you are more than one release behind, please make sure that you also review the release notes of the versions that you jumped. + +## 1. Upgrading from 1.x + +If you are upgrading from the `1.x` release of Spring Boot, check the [“migration guide” on the project wiki](https://github.com/spring-projects/spring-boot/wiki/Spring-Boot-2.0-Migration-Guide) that provides detailed upgrade instructions. +Check also the [“release notes”](https://github.com/spring-projects/spring-boot/wiki) for a list of “new and noteworthy” features for each release. + +## 2. Upgrading to a new feature release + +When upgrading to a new feature release, some properties may have been renamed or removed. +Spring Boot provides a way to analyze your application’s environment and print diagnostics at startup, but also temporarily migrate properties at runtime for you. +To enable that feature, add the following dependency to your project: + +``` + + org.springframework.boot + spring-boot-properties-migrator + runtime + +``` + +| |Properties that are added late to the environment, such as when using `@PropertySource`, will not be taken into account.| +|---|------------------------------------------------------------------------------------------------------------------------| + +| |Once you finish the migration, please make sure to remove this module from your project’s dependencies.| +|---|-------------------------------------------------------------------------------------------------------| + +## 3. Upgrading the Spring Boot CLI + +To upgrade an existing CLI installation, use the appropriate package manager command (for example, `brew upgrade`). +If you manually installed the CLI, follow the [standard instructions](getting-started.html#getting-started.installing.cli.manual-installation), remembering to update your `PATH` environment variable to remove any older references. + +## 4. What to Read Next + +Once you’ve decided to upgrade your application, you can find detailed information regarding specific features in the rest of the document. + +Spring Boot’s documentation is specific to that version, so any information that you find in here will contain the most up-to-date changes that are in that version. + diff --git a/docs/en/spring-boot/using.md b/docs/en/spring-boot/using.md new file mode 100644 index 0000000000000000000000000000000000000000..99fcfa58568a6f6af7354982b4e2afe80c44b336 --- /dev/null +++ b/docs/en/spring-boot/using.md @@ -0,0 +1,969 @@ +# Developing with Spring Boot + +This section goes into more detail about how you should use Spring Boot. +It covers topics such as build systems, auto-configuration, and how to run your applications. +We also cover some Spring Boot best practices. +Although there is nothing particularly special about Spring Boot (it is just another library that you can consume), there are a few recommendations that, when followed, make your development process a little easier. + +If you are starting out with Spring Boot, you should probably read the *[Getting Started](getting-started.html#getting-started)* guide before diving into this section. + +## 1. Build Systems + +It is strongly recommended that you choose a build system that supports [*dependency management*](#using.build-systems.dependency-management) and that can consume artifacts published to the “Maven Central” repository. +We would recommend that you choose Maven or Gradle. +It is possible to get Spring Boot to work with other build systems (Ant, for example), but they are not particularly well supported. + +### 1.1. Dependency Management + +Each release of Spring Boot provides a curated list of dependencies that it supports. +In practice, you do not need to provide a version for any of these dependencies in your build configuration, as Spring Boot manages that for you. +When you upgrade Spring Boot itself, these dependencies are upgraded as well in a consistent way. + +| |You can still specify a version and override Spring Boot’s recommendations if you need to do so.| +|---|------------------------------------------------------------------------------------------------| + +The curated list contains all the Spring modules that you can use with Spring Boot as well as a refined list of third party libraries. +The list is available as a standard Bills of Materials (`spring-boot-dependencies`) that can be used with both [Maven](#using.build-systems.maven) and [Gradle](#using.build-systems.gradle). + +| |Each release of Spring Boot is associated with a base version of the Spring Framework.
We **highly** recommend that you not specify its version.| +|---|----------------------------------------------------------------------------------------------------------------------------------------------------| + +### 1.2. Maven + +To learn about using Spring Boot with Maven, see the documentation for Spring Boot’s Maven plugin: + +* Reference ([HTML](https://docs.spring.io/spring-boot/docs/2.6.4/maven-plugin/reference/htmlsingle/) and [PDF](https://docs.spring.io/spring-boot/docs/2.6.4/maven-plugin/reference/pdf/spring-boot-maven-plugin-reference.pdf)) + +* [API](https://docs.spring.io/spring-boot/docs/2.6.4/maven-plugin/api/) + +### 1.3. Gradle + +To learn about using Spring Boot with Gradle, see the documentation for Spring Boot’s Gradle plugin: + +* Reference ([HTML](https://docs.spring.io/spring-boot/docs/2.6.4/gradle-plugin/reference/htmlsingle/) and [PDF](https://docs.spring.io/spring-boot/docs/2.6.4/gradle-plugin/reference/pdf/spring-boot-gradle-plugin-reference.pdf)) + +* [API](https://docs.spring.io/spring-boot/docs/2.6.4/gradle-plugin/api/) + +### 1.4. Ant + +It is possible to build a Spring Boot project using Apache Ant+Ivy. +The `spring-boot-antlib` “AntLib” module is also available to help Ant create executable jars. + +To declare dependencies, a typical `ivy.xml` file looks something like the following example: + +``` + + + + + + + + + + +``` + +A typical `build.xml` looks like the following example: + +``` + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +``` + +| |If you do not want to use the `spring-boot-antlib` module, see the *[howto.html](howto.html#howto.build.build-an-executable-archive-with-ant-without-using-spring-boot-antlib)* “How-to” .| +|---|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +### 1.5. Starters + +Starters are a set of convenient dependency descriptors that you can include in your application. +You get a one-stop shop for all the Spring and related technologies that you need without having to hunt through sample code and copy-paste loads of dependency descriptors. +For example, if you want to get started using Spring and JPA for database access, include the `spring-boot-starter-data-jpa` dependency in your project. + +The starters contain a lot of the dependencies that you need to get a project up and running quickly and with a consistent, supported set of managed transitive dependencies. + +What is in a name + +All **official** starters follow a similar naming pattern; `spring-boot-starter-*`, where `*` is a particular type of application. +This naming structure is intended to help when you need to find a starter. +The Maven integration in many IDEs lets you search dependencies by name. +For example, with the appropriate Eclipse or Spring Tools plugin installed, you can press `ctrl-space` in the POM editor and type “spring-boot-starter” for a complete list. + +As explained in the “[Creating Your Own Starter](features.html#features.developing-auto-configuration.custom-starter)” section, third party starters should not start with `spring-boot`, as it is reserved for official Spring Boot artifacts. +Rather, a third-party starter typically starts with the name of the project. +For example, a third-party starter project called `thirdpartyproject` would typically be named `thirdpartyproject-spring-boot-starter`. + +The following application starters are provided by Spring Boot under the `org.springframework.boot` group: + +| Name | Description | +|-------------------------------------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| []()`spring-boot-starter` | Core starter, including auto-configuration support, logging and YAML | +| []()`spring-boot-starter-activemq` | Starter for JMS messaging using Apache ActiveMQ | +| []()`spring-boot-starter-amqp` | Starter for using Spring AMQP and Rabbit MQ | +| []()`spring-boot-starter-aop` | Starter for aspect-oriented programming with Spring AOP and AspectJ | +| []()`spring-boot-starter-artemis` | Starter for JMS messaging using Apache Artemis | +| []()`spring-boot-starter-batch` | Starter for using Spring Batch | +| []()`spring-boot-starter-cache` | Starter for using Spring Framework’s caching support | +| []()`spring-boot-starter-data-cassandra` | Starter for using Cassandra distributed database and Spring Data Cassandra | +|[]()`spring-boot-starter-data-cassandra-reactive`| Starter for using Cassandra distributed database and Spring Data Cassandra Reactive | +| []()`spring-boot-starter-data-couchbase` | Starter for using Couchbase document-oriented database and Spring Data Couchbase | +|[]()`spring-boot-starter-data-couchbase-reactive`| Starter for using Couchbase document-oriented database and Spring Data Couchbase Reactive | +| []()`spring-boot-starter-data-elasticsearch` | Starter for using Elasticsearch search and analytics engine and Spring Data Elasticsearch | +| []()`spring-boot-starter-data-jdbc` | Starter for using Spring Data JDBC | +| []()`spring-boot-starter-data-jpa` | Starter for using Spring Data JPA with Hibernate | +| []()`spring-boot-starter-data-ldap` | Starter for using Spring Data LDAP | +| []()`spring-boot-starter-data-mongodb` | Starter for using MongoDB document-oriented database and Spring Data MongoDB | +| []()`spring-boot-starter-data-mongodb-reactive` | Starter for using MongoDB document-oriented database and Spring Data MongoDB Reactive | +| []()`spring-boot-starter-data-neo4j` | Starter for using Neo4j graph database and Spring Data Neo4j | +| []()`spring-boot-starter-data-r2dbc` | Starter for using Spring Data R2DBC | +| []()`spring-boot-starter-data-redis` | Starter for using Redis key-value data store with Spring Data Redis and the Lettuce client | +| []()`spring-boot-starter-data-redis-reactive` | Starter for using Redis key-value data store with Spring Data Redis reactive and the Lettuce client | +| []()`spring-boot-starter-data-rest` | Starter for exposing Spring Data repositories over REST using Spring Data REST | +| []()`spring-boot-starter-freemarker` | Starter for building MVC web applications using FreeMarker views | +| []()`spring-boot-starter-groovy-templates` | Starter for building MVC web applications using Groovy Templates views | +| []()`spring-boot-starter-hateoas` | Starter for building hypermedia-based RESTful web application with Spring MVC and Spring HATEOAS | +| []()`spring-boot-starter-integration` | Starter for using Spring Integration | +| []()`spring-boot-starter-jdbc` | Starter for using JDBC with the HikariCP connection pool | +| []()`spring-boot-starter-jersey` | Starter for building RESTful web applications using JAX-RS and Jersey. An alternative to [`spring-boot-starter-web`](#spring-boot-starter-web) | +| []()`spring-boot-starter-jooq` |Starter for using jOOQ to access SQL databases with JDBC. An alternative to [`spring-boot-starter-data-jpa`](#spring-boot-starter-data-jpa) or [`spring-boot-starter-jdbc`](#spring-boot-starter-jdbc)| +| []()`spring-boot-starter-json` | Starter for reading and writing json | +| []()`spring-boot-starter-jta-atomikos` | Starter for JTA transactions using Atomikos | +| []()`spring-boot-starter-mail` | Starter for using Java Mail and Spring Framework’s email sending support | +| []()`spring-boot-starter-mustache` | Starter for building web applications using Mustache views | +| []()`spring-boot-starter-oauth2-client` | Starter for using Spring Security’s OAuth2/OpenID Connect client features | +|[]()`spring-boot-starter-oauth2-resource-server` | Starter for using Spring Security’s OAuth2 resource server features | +| []()`spring-boot-starter-quartz` | Starter for using the Quartz scheduler | +| []()`spring-boot-starter-rsocket` | Starter for building RSocket clients and servers | +| []()`spring-boot-starter-security` | Starter for using Spring Security | +| []()`spring-boot-starter-test` | Starter for testing Spring Boot applications with libraries including JUnit Jupiter, Hamcrest and Mockito | +| []()`spring-boot-starter-thymeleaf` | Starter for building MVC web applications using Thymeleaf views | +| []()`spring-boot-starter-validation` | Starter for using Java Bean Validation with Hibernate Validator | +| []()`spring-boot-starter-web` | Starter for building web, including RESTful, applications using Spring MVC. Uses Tomcat as the default embedded container | +| []()`spring-boot-starter-web-services` | Starter for using Spring Web Services | +| []()`spring-boot-starter-webflux` | Starter for building WebFlux applications using Spring Framework’s Reactive Web support | +| []()`spring-boot-starter-websocket` | Starter for building WebSocket applications using Spring Framework’s WebSocket support | + +In addition to the application starters, the following starters can be used to add *[production ready](actuator.html#actuator)* features: + +| Name | Description | +|----------------------------------|---------------------------------------------------------------------------------------------------------------------------------| +|[]()`spring-boot-starter-actuator`|Starter for using Spring Boot’s Actuator which provides production ready features to help you monitor and manage your application| + +Finally, Spring Boot also includes the following starters that can be used if you want to exclude or swap specific technical facets: + +| Name | Description | +|---------------------------------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------| +| []()`spring-boot-starter-jetty` | Starter for using Jetty as the embedded servlet container. An alternative to [`spring-boot-starter-tomcat`](#spring-boot-starter-tomcat) | +| []()`spring-boot-starter-log4j2` | Starter for using Log4j2 for logging. An alternative to [`spring-boot-starter-logging`](#spring-boot-starter-logging) | +| []()`spring-boot-starter-logging` | Starter for logging using Logback. Default logging starter | +|[]()`spring-boot-starter-reactor-netty`| Starter for using Reactor Netty as the embedded reactive HTTP server. | +| []()`spring-boot-starter-tomcat` |Starter for using Tomcat as the embedded servlet container. Default servlet container starter used by [`spring-boot-starter-web`](#spring-boot-starter-web)| +| []()`spring-boot-starter-undertow` | Starter for using Undertow as the embedded servlet container. An alternative to [`spring-boot-starter-tomcat`](#spring-boot-starter-tomcat) | + +To learn how to swap technical facets, please see the how-to documentation for [swapping web server](howto.html#howto.webserver.use-another) and [logging system](howto.html#howto.logging.log4j). + +| |For a list of additional community contributed starters, see the [README file](https://github.com/spring-projects/spring-boot/tree/main/spring-boot-project/spring-boot-starters/README.adoc) in the `spring-boot-starters` module on GitHub.| +|---|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +## 2. Structuring Your Code + +Spring Boot does not require any specific code layout to work. +However, there are some best practices that help. + +### 2.1. Using the “default” Package + +When a class does not include a `package` declaration, it is considered to be in the “default package”. +The use of the “default package” is generally discouraged and should be avoided. +It can cause particular problems for Spring Boot applications that use the `@ComponentScan`, `@ConfigurationPropertiesScan`, `@EntityScan`, or `@SpringBootApplication` annotations, since every class from every jar is read. + +| |We recommend that you follow Java’s recommended package naming conventions and use a reversed domain name (for example, `com.example.project`).| +|---|-----------------------------------------------------------------------------------------------------------------------------------------------| + +### 2.2. Locating the Main Application Class + +We generally recommend that you locate your main application class in a root package above other classes. +The [`@SpringBootApplication` annotation](#using.using-the-springbootapplication-annotation) is often placed on your main class, and it implicitly defines a base “search package” for certain items. +For example, if you are writing a JPA application, the package of the `@SpringBootApplication` annotated class is used to search for `@Entity` items. +Using a root package also allows component scan to apply only on your project. + +| |If you do not want to use `@SpringBootApplication`, the `@EnableAutoConfiguration` and `@ComponentScan` annotations that it imports defines that behavior so you can also use those instead.| +|---|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +The following listing shows a typical layout: + +``` +com + +- example + +- myapplication + +- MyApplication.java + | + +- customer + | +- Customer.java + | +- CustomerController.java + | +- CustomerService.java + | +- CustomerRepository.java + | + +- order + +- Order.java + +- OrderController.java + +- OrderService.java + +- OrderRepository.java +``` + +The `MyApplication.java` file would declare the `main` method, along with the basic `@SpringBootApplication`, as follows: + +``` +import org.springframework.boot.SpringApplication; +import org.springframework.boot.autoconfigure.SpringBootApplication; + +@SpringBootApplication +public class MyApplication { + + public static void main(String[] args) { + SpringApplication.run(MyApplication.class, args); + } + +} + +``` + +## 3. Configuration Classes + +Spring Boot favors Java-based configuration. +Although it is possible to use `SpringApplication` with XML sources, we generally recommend that your primary source be a single `@Configuration` class. +Usually the class that defines the `main` method is a good candidate as the primary `@Configuration`. + +| |Many Spring configuration examples have been published on the Internet that use XML configuration.
If possible, always try to use the equivalent Java-based configuration.
Searching for `Enable*` annotations can be a good starting point.| +|---|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +### 3.1. Importing Additional Configuration Classes ### + +You need not put all your `@Configuration` into a single class. +The `@Import` annotation can be used to import additional configuration classes. +Alternatively, you can use `@ComponentScan` to automatically pick up all Spring components, including `@Configuration` classes. + +### 3.2. Importing XML Configuration + +If you absolutely must use XML based configuration, we recommend that you still start with a `@Configuration` class. +You can then use an `@ImportResource` annotation to load XML configuration files. + +## 4. Auto-configuration + +Spring Boot auto-configuration attempts to automatically configure your Spring application based on the jar dependencies that you have added. +For example, if `HSQLDB` is on your classpath, and you have not manually configured any database connection beans, then Spring Boot auto-configures an in-memory database. + +You need to opt-in to auto-configuration by adding the `@EnableAutoConfiguration` or `@SpringBootApplication` annotations to one of your `@Configuration` classes. + +| |You should only ever add one `@SpringBootApplication` or `@EnableAutoConfiguration` annotation.
We generally recommend that you add one or the other to your primary `@Configuration` class only.| +|---|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +### 4.1. Gradually Replacing Auto-configuration + +Auto-configuration is non-invasive. +At any point, you can start to define your own configuration to replace specific parts of the auto-configuration. +For example, if you add your own `DataSource` bean, the default embedded database support backs away. + +If you need to find out what auto-configuration is currently being applied, and why, start your application with the `--debug` switch. +Doing so enables debug logs for a selection of core loggers and logs a conditions report to the console. + +### 4.2. Disabling Specific Auto-configuration Classes + +If you find that specific auto-configuration classes that you do not want are being applied, you can use the exclude attribute of `@SpringBootApplication` to disable them, as shown in the following example: + +``` +import org.springframework.boot.autoconfigure.SpringBootApplication; +import org.springframework.boot.autoconfigure.jdbc.DataSourceAutoConfiguration; + +@SpringBootApplication(exclude = { DataSourceAutoConfiguration.class }) +public class MyApplication { + +} + +``` + +If the class is not on the classpath, you can use the `excludeName` attribute of the annotation and specify the fully qualified name instead. +If you prefer to use `@EnableAutoConfiguration` rather than `@SpringBootApplication`, `exclude` and `excludeName` are also available. +Finally, you can also control the list of auto-configuration classes to exclude by using the `spring.autoconfigure.exclude` property. + +| |You can define exclusions both at the annotation level and by using the property.| +|---|---------------------------------------------------------------------------------| + +| |Even though auto-configuration classes are `public`, the only aspect of the class that is considered public API is the name of the class which can be used for disabling the auto-configuration.
The actual contents of those classes, such as nested configuration classes or bean methods are for internal use only and we do not recommend using those directly.| +|---|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +## 5. Spring Beans and Dependency Injection + +You are free to use any of the standard Spring Framework techniques to define your beans and their injected dependencies. +We generally recommend using constructor injection to wire up dependencies and `@ComponentScan` to find beans. + +If you structure your code as suggested above (locating your application class in a top package), you can add `@ComponentScan` without any arguments or use the `@SpringBootApplication` annotation which implicitly includes it. +All of your application components (`@Component`, `@Service`, `@Repository`, `@Controller`, and others) are automatically registered as Spring Beans. + +The following example shows a `@Service` Bean that uses constructor injection to obtain a required `RiskAssessor` bean: + +``` +import org.springframework.stereotype.Service; + +@Service +public class MyAccountService implements AccountService { + + private final RiskAssessor riskAssessor; + + public MyAccountService(RiskAssessor riskAssessor) { + this.riskAssessor = riskAssessor; + } + + // ... + +} + +``` + +If a bean has more than one constructor, you will need to mark the one you want Spring to use with `@Autowired`: + +``` +import java.io.PrintStream; + +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.stereotype.Service; + +@Service +public class MyAccountService implements AccountService { + + private final RiskAssessor riskAssessor; + + private final PrintStream out; + + @Autowired + public MyAccountService(RiskAssessor riskAssessor) { + this.riskAssessor = riskAssessor; + this.out = System.out; + } + + public MyAccountService(RiskAssessor riskAssessor, PrintStream out) { + this.riskAssessor = riskAssessor; + this.out = out; + } + + // ... + +} + +``` + +| |Notice how using constructor injection lets the `riskAssessor` field be marked as `final`, indicating that it cannot be subsequently changed.| +|---|---------------------------------------------------------------------------------------------------------------------------------------------| + +## 6. Using the @SpringBootApplication Annotation + +Many Spring Boot developers like their apps to use auto-configuration, component scan and be able to define extra configuration on their "application class". +A single `@SpringBootApplication` annotation can be used to enable those three features, that is: + +* `@EnableAutoConfiguration`: enable [Spring Boot’s auto-configuration mechanism](#using.auto-configuration) + +* `@ComponentScan`: enable `@Component` scan on the package where the application is located (see [the best practices](#using.structuring-your-code)) + +* `@SpringBootConfiguration`: enable registration of extra beans in the context or the import of additional configuration classes. + An alternative to Spring’s standard `@Configuration` that aids [configuration detection](features.html#features.testing.spring-boot-applications.detecting-configuration) in your integration tests. + +``` +import org.springframework.boot.SpringApplication; +import org.springframework.boot.autoconfigure.SpringBootApplication; + +// Same as @SpringBootConfiguration @EnableAutoConfiguration @ComponentScan +@SpringBootApplication +public class MyApplication { + + public static void main(String[] args) { + SpringApplication.run(MyApplication.class, args); + } + +} + +``` + +| |`@SpringBootApplication` also provides aliases to customize the attributes of `@EnableAutoConfiguration` and `@ComponentScan`.| +|---|------------------------------------------------------------------------------------------------------------------------------| + +| |None of these features are mandatory and you may choose to replace this single annotation by any of the features that it enables.
For instance, you may not want to use component scan or configuration properties scan in your application:

```
import org.springframework.boot.SpringApplication;
import org.springframework.boot.SpringBootConfiguration;
import org.springframework.boot.autoconfigure.EnableAutoConfiguration;
import org.springframework.context.annotation.Import;

@SpringBootConfiguration(proxyBeanMethods = false)
@EnableAutoConfiguration
@Import({ SomeConfiguration.class, AnotherConfiguration.class })
public class MyApplication {

public static void main(String[] args) {
SpringApplication.run(MyApplication.class, args);
}

}

```

In this example, `MyApplication` is just like any other Spring Boot application except that `@Component`-annotated classes and `@ConfigurationProperties`-annotated classes are not detected automatically and the user-defined beans are imported explicitly (see `@Import`).| +|---|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +## 7. Running Your Application + +One of the biggest advantages of packaging your application as a jar and using an embedded HTTP server is that you can run your application as you would any other. +The sample applies to debugging Spring Boot applications. +You do not need any special IDE plugins or extensions. + +| |This section only covers jar-based packaging.
If you choose to package your application as a war file, see your server and IDE documentation.| +|---|-------------------------------------------------------------------------------------------------------------------------------------------------| + +### 7.1. Running from an IDE + +You can run a Spring Boot application from your IDE as a Java application. +However, you first need to import your project. +Import steps vary depending on your IDE and build system. +Most IDEs can import Maven projects directly. +For example, Eclipse users can select `Import…​` → `Existing Maven Projects` from the `File` menu. + +If you cannot directly import your project into your IDE, you may be able to generate IDE metadata by using a build plugin. +Maven includes plugins for [Eclipse](https://maven.apache.org/plugins/maven-eclipse-plugin/) and [IDEA](https://maven.apache.org/plugins/maven-idea-plugin/). +Gradle offers plugins for [various IDEs](https://docs.gradle.org/current/userguide/userguide.html). + +| |If you accidentally run a web application twice, you see a “Port already in use” error.
Spring Tools users can use the `Relaunch` button rather than the `Run` button to ensure that any existing instance is closed.| +|---|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +### 7.2. Running as a Packaged Application + +If you use the Spring Boot Maven or Gradle plugins to create an executable jar, you can run your application using `java -jar`, as shown in the following example: + +``` +$ java -jar target/myapplication-0.0.1-SNAPSHOT.jar +``` + +It is also possible to run a packaged application with remote debugging support enabled. +Doing so lets you attach a debugger to your packaged application, as shown in the following example: + +``` +$ java -Xdebug -Xrunjdwp:server=y,transport=dt_socket,address=8000,suspend=n \ + -jar target/myapplication-0.0.1-SNAPSHOT.jar +``` + +### 7.3. Using the Maven Plugin + +The Spring Boot Maven plugin includes a `run` goal that can be used to quickly compile and run your application. +Applications run in an exploded form, as they do in your IDE. +The following example shows a typical Maven command to run a Spring Boot application: + +``` +$ mvn spring-boot:run +``` + +You might also want to use the `MAVEN_OPTS` operating system environment variable, as shown in the following example: + +``` +$ export MAVEN_OPTS=-Xmx1024m +``` + +### 7.4. Using the Gradle Plugin + +The Spring Boot Gradle plugin also includes a `bootRun` task that can be used to run your application in an exploded form. +The `bootRun` task is added whenever you apply the `org.springframework.boot` and `java` plugins and is shown in the following example: + +``` +$ gradle bootRun +``` + +You might also want to use the `JAVA_OPTS` operating system environment variable, as shown in the following example: + +``` +$ export JAVA_OPTS=-Xmx1024m +``` + +### 7.5. Hot Swapping + +Since Spring Boot applications are plain Java applications, JVM hot-swapping should work out of the box. +JVM hot swapping is somewhat limited with the bytecode that it can replace. +For a more complete solution, [JRebel](https://www.jrebel.com/products/jrebel) can be used. + +The `spring-boot-devtools` module also includes support for quick application restarts. +See the [Hot swapping “How-to”](howto.html#howto.hotswapping) for details. + +## 8. Developer Tools + +Spring Boot includes an additional set of tools that can make the application development experience a little more pleasant. +The `spring-boot-devtools` module can be included in any project to provide additional development-time features. +To include devtools support, add the module dependency to your build, as shown in the following listings for Maven and Gradle: + +Maven + +``` + + + org.springframework.boot + spring-boot-devtools + true + + +``` + +Gradle + +``` +dependencies { + developmentOnly("org.springframework.boot:spring-boot-devtools") +} +``` + +| |Devtools might cause classloading issues, in particular in multi-module projects.[Diagnosing Classloading Issues](#using.devtools.diagnosing-classloading-issues) explains how to diagnose and solve them.| +|---|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +| |Developer tools are automatically disabled when running a fully packaged application.
If your application is launched from `java -jar` or if it is started from a special classloader, then it is considered a “production application”.
You can control this behavior by using the `spring.devtools.restart.enabled` system property.
To enable devtools, irrespective of the classloader used to launch your application, set the `-Dspring.devtools.restart.enabled=true` system property.
This must not be done in a production environment where running devtools is a security risk.
To disable devtools, exclude the dependency or set the `-Dspring.devtools.restart.enabled=false` system property.| +|---|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +| |Flagging the dependency as optional in Maven or using the `developmentOnly` configuration in Gradle (as shown above) prevents devtools from being transitively applied to other modules that use your project.| +|---|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +| |Repackaged archives do not contain devtools by default.
If you want to use a [certain remote devtools feature](#using.devtools.remote-applications), you need to include it.
When using the Maven plugin, set the `excludeDevtools` property to `false`.
When using the Gradle plugin, [configure the task’s classpath to include the `developmentOnly` configuration](https://docs.spring.io/spring-boot/docs/2.6.4/gradle-plugin/reference/htmlsingle/#packaging-executable-configuring-including-development-only-dependencies).| +|---|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +### 8.1. Diagnosing Classloading Issues + +As described in the [Restart vs Reload](#using.devtools.restart.restart-vs-reload) section, restart functionality is implemented by using two classloaders. +For most applications, this approach works well. +However, it can sometimes cause classloading issues, in particular in multi-module projects. + +To diagnose whether the classloading issues are indeed caused by devtools and its two classloaders, [try disabling restart](#using.devtools.restart.disable). +If this solves your problems, [customize the restart classloader](#using.devtools.restart.customizing-the-classload) to include your entire project. + +### 8.2. Property Defaults + +Several of the libraries supported by Spring Boot use caches to improve performance. +For example, [template engines](web.html#web.servlet.spring-mvc.template-engines) cache compiled templates to avoid repeatedly parsing template files. +Also, Spring MVC can add HTTP caching headers to responses when serving static resources. + +While caching is very beneficial in production, it can be counter-productive during development, preventing you from seeing the changes you just made in your application. +For this reason, spring-boot-devtools disables the caching options by default. + +Cache options are usually configured by settings in your `application.properties` file. +For example, Thymeleaf offers the `spring.thymeleaf.cache` property. +Rather than needing to set these properties manually, the `spring-boot-devtools` module automatically applies sensible development-time configuration. + +Because you need more information about web requests while developing Spring MVC and Spring WebFlux applications, developer tools suggests you to enable `DEBUG` logging for the `web` logging group. +This will give you information about the incoming request, which handler is processing it, the response outcome, and other details. +If you wish to log all request details (including potentially sensitive information), you can turn on the `spring.mvc.log-request-details` or `spring.codec.log-request-details` configuration properties. + +| |If you do not want property defaults to be applied you can set `spring.devtools.add-properties` to `false` in your `application.properties`.| +|---|--------------------------------------------------------------------------------------------------------------------------------------------| + +| |For a complete list of the properties that are applied by the devtools, see [DevToolsPropertyDefaultsPostProcessor](https://github.com/spring-projects/spring-boot/tree/v2.6.4/spring-boot-project/spring-boot-devtools/src/main/java/org/springframework/boot/devtools/env/DevToolsPropertyDefaultsPostProcessor.java).| +|---|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +### 8.3. Automatic Restart + +Applications that use `spring-boot-devtools` automatically restart whenever files on the classpath change. +This can be a useful feature when working in an IDE, as it gives a very fast feedback loop for code changes. +By default, any entry on the classpath that points to a directory is monitored for changes. +Note that certain resources, such as static assets and view templates, [do not need to restart the application](#using.devtools.restart.excluding-resources). + +Triggering a restart + +As DevTools monitors classpath resources, the only way to trigger a restart is to update the classpath. +The way in which you cause the classpath to be updated depends on the IDE that you are using: + +* In Eclipse, saving a modified file causes the classpath to be updated and triggers a restart. + +* In IntelliJ IDEA, building the project (`Build +→+ Build Project`) has the same effect. + +* If using a build plugin, running `mvn compile` for Maven or `gradle build` for Gradle will trigger a restart. + +| |If you are restarting with Maven or Gradle using the build plugin you must leave the `forking` set to `enabled`.
If you disable forking, the isolated application classloader used by devtools will not be created and restarts will not operate properly.| +|---|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +| |Automatic restart works very well when used with LiveReload.[See the LiveReload section](#using.devtools.livereload) for details.
If you use JRebel, automatic restarts are disabled in favor of dynamic class reloading.
Other devtools features (such as LiveReload and property overrides) can still be used.| +|---|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +| |DevTools relies on the application context’s shutdown hook to close it during a restart.
It does not work correctly if you have disabled the shutdown hook (`SpringApplication.setRegisterShutdownHook(false)`).| +|---|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +| |DevTools needs to customize the `ResourceLoader` used by the `ApplicationContext`.
If your application provides one already, it is going to be wrapped.
Direct override of the `getResource` method on the `ApplicationContext` is not supported.| +|---|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +| |Automatic restart is not supported when using AspectJ weaving.| +|---|--------------------------------------------------------------| + +Restart vs Reload + +The restart technology provided by Spring Boot works by using two classloaders. +Classes that do not change (for example, those from third-party jars) are loaded into a *base* classloader. +Classes that you are actively developing are loaded into a *restart* classloader. +When the application is restarted, the *restart* classloader is thrown away and a new one is created. +This approach means that application restarts are typically much faster than “cold starts”, since the *base* classloader is already available and populated. + +If you find that restarts are not quick enough for your applications or you encounter classloading issues, you could consider reloading technologies such as [JRebel](https://jrebel.com/software/jrebel/) from ZeroTurnaround. +These work by rewriting classes as they are loaded to make them more amenable to reloading. + +#### 8.3.1. Logging changes in condition evaluation + +By default, each time your application restarts, a report showing the condition evaluation delta is logged. +The report shows the changes to your application’s auto-configuration as you make changes such as adding or removing beans and setting configuration properties. + +To disable the logging of the report, set the following property: + +Properties + +``` +spring.devtools.restart.log-condition-evaluation-delta=false +``` + +Yaml + +``` +spring: + devtools: + restart: + log-condition-evaluation-delta: false +``` + +#### 8.3.2. Excluding Resources + +Certain resources do not necessarily need to trigger a restart when they are changed. +For example, Thymeleaf templates can be edited in-place. +By default, changing resources in `/META-INF/maven`, `/META-INF/resources`, `/resources`, `/static`, `/public`, or `/templates` does not trigger a restart but does trigger a [live reload](#using.devtools.livereload). +If you want to customize these exclusions, you can use the `spring.devtools.restart.exclude` property. +For example, to exclude only `/static` and `/public` you would set the following property: + +Properties + +``` +spring.devtools.restart.exclude=static/**,public/** +``` + +Yaml + +``` +spring: + devtools: + restart: + exclude: "static/**,public/**" +``` + +| |If you want to keep those defaults and *add* additional exclusions, use the `spring.devtools.restart.additional-exclude` property instead.| +|---|------------------------------------------------------------------------------------------------------------------------------------------| + +#### 8.3.3. Watching Additional Paths + +You may want your application to be restarted or reloaded when you make changes to files that are not on the classpath. +To do so, use the `spring.devtools.restart.additional-paths` property to configure additional paths to watch for changes. +You can use the `spring.devtools.restart.exclude` property [described earlier](#using.devtools.restart.excluding-resources) to control whether changes beneath the additional paths trigger a full restart or a [live reload](#using.devtools.livereload). + +#### 8.3.4. Disabling Restart + +If you do not want to use the restart feature, you can disable it by using the `spring.devtools.restart.enabled` property. +In most cases, you can set this property in your `application.properties` (doing so still initializes the restart classloader, but it does not watch for file changes). + +If you need to *completely* disable restart support (for example, because it does not work with a specific library), you need to set the `spring.devtools.restart.enabled` `System` property to `false` before calling `SpringApplication.run(…​)`, as shown in the following example: + +``` +import org.springframework.boot.SpringApplication; +import org.springframework.boot.autoconfigure.SpringBootApplication; + +@SpringBootApplication +public class MyApplication { + + public static void main(String[] args) { + System.setProperty("spring.devtools.restart.enabled", "false"); + SpringApplication.run(MyApplication.class, args); + } + +} + +``` + +#### 8.3.5. Using a Trigger File + +If you work with an IDE that continuously compiles changed files, you might prefer to trigger restarts only at specific times. +To do so, you can use a “trigger file”, which is a special file that must be modified when you want to actually trigger a restart check. + +| |Any update to the file will trigger a check, but restart only actually occurs if Devtools has detected it has something to do.| +|---|------------------------------------------------------------------------------------------------------------------------------| + +To use a trigger file, set the `spring.devtools.restart.trigger-file` property to the name (excluding any path) of your trigger file. +The trigger file must appear somewhere on your classpath. + +For example, if you have a project with the following structure: + +``` +src ++- main + +- resources + +- .reloadtrigger +``` + +Then your `trigger-file` property would be: + +Properties + +``` +spring.devtools.restart.trigger-file=.reloadtrigger +``` + +Yaml + +``` +spring: + devtools: + restart: + trigger-file: ".reloadtrigger" +``` + +Restarts will now only happen when the `src/main/resources/.reloadtrigger` is updated. + +| |You might want to set `spring.devtools.restart.trigger-file` as a [global setting](#using.devtools.globalsettings), so that all your projects behave in the same way.| +|---|---------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +Some IDEs have features that save you from needing to update your trigger file manually.[Spring Tools for Eclipse](https://spring.io/tools) and [IntelliJ IDEA (Ultimate Edition)](https://www.jetbrains.com/idea/) both have such support. +With Spring Tools, you can use the “reload” button from the console view (as long as your `trigger-file` is named `.reloadtrigger`). +For IntelliJ IDEA, you can follow the [instructions in their documentation](https://www.jetbrains.com/help/idea/spring-boot.html#application-update-policies). + +#### 8.3.6. Customizing the Restart Classloader + +As described earlier in the [Restart vs Reload](#using.devtools.restart.restart-vs-reload) section, restart functionality is implemented by using two classloaders. +If this causes issues, you might need to customize what gets loaded by which classloader. + +By default, any open project in your IDE is loaded with the “restart” classloader, and any regular `.jar` file is loaded with the “base” classloader. +The same is true if you use `mvn spring-boot:run` or `gradle bootRun`: the project containing your `@SpringBootApplication` is loaded with the “restart” classloader, and everything else with the “base” classloader. + +You can instruct Spring Boot to load parts of your project with a different classloader by creating a `META-INF/spring-devtools.properties` file. +The `spring-devtools.properties` file can contain properties prefixed with `restart.exclude` and `restart.include`. +The `include` elements are items that should be pulled up into the “restart” classloader, and the `exclude` elements are items that should be pushed down into the “base” classloader. +The value of the property is a regex pattern that is applied to the classpath, as shown in the following example: + +Properties + +``` +restart.exclude.companycommonlibs=/mycorp-common-[\\w\\d-\\.]+\\.jar +restart.include.projectcommon=/mycorp-myproj-[\\w\\d-\\.]+\\.jar +``` + +Yaml + +``` +restart: + exclude: + companycommonlibs: "/mycorp-common-[\\w\\d-\\.]+\\.jar" + include: + projectcommon: "/mycorp-myproj-[\\w\\d-\\.]+\\.jar" +``` + +| |All property keys must be unique.
As long as a property starts with `restart.include.` or `restart.exclude.` it is considered.| +|---|----------------------------------------------------------------------------------------------------------------------------------| + +| |All `META-INF/spring-devtools.properties` from the classpath are loaded.
You can package files inside your project, or in the libraries that the project consumes.| +|---|----------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +#### 8.3.7. Known Limitations + +Restart functionality does not work well with objects that are deserialized by using a standard `ObjectInputStream`. +If you need to deserialize data, you may need to use Spring’s `ConfigurableObjectInputStream` in combination with `Thread.currentThread().getContextClassLoader()`. + +Unfortunately, several third-party libraries deserialize without considering the context classloader. +If you find such a problem, you need to request a fix with the original authors. + +### 8.4. LiveReload + +The `spring-boot-devtools` module includes an embedded LiveReload server that can be used to trigger a browser refresh when a resource is changed. +LiveReload browser extensions are freely available for Chrome, Firefox and Safari from [livereload.com](http://livereload.com/extensions/). + +If you do not want to start the LiveReload server when your application runs, you can set the `spring.devtools.livereload.enabled` property to `false`. + +| |You can only run one LiveReload server at a time.
Before starting your application, ensure that no other LiveReload servers are running.
If you start multiple applications from your IDE, only the first has LiveReload support.| +|---|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +| |To trigger LiveReload when a file changes, [Automatic Restart](#using.devtools.restart) must be enabled.| +|---|--------------------------------------------------------------------------------------------------------| + +### 8.5. Global Settings + +You can configure global devtools settings by adding any of the following files to the `$HOME/.config/spring-boot` directory: + +1. `spring-boot-devtools.properties` + +2. `spring-boot-devtools.yaml` + +3. `spring-boot-devtools.yml` + +Any properties added to these files apply to *all* Spring Boot applications on your machine that use devtools. +For example, to configure restart to always use a [trigger file](#using.devtools.restart.triggerfile), you would add the following property to your `spring-boot-devtools` file: + +Properties + +``` +spring.devtools.restart.trigger-file=.reloadtrigger +``` + +Yaml + +``` +spring: + devtools: + restart: + trigger-file: ".reloadtrigger" +``` + +By default, `$HOME` is the user’s home directory. +To customize this location, set the `SPRING_DEVTOOLS_HOME` environment variable or the `spring.devtools.home` system property. + +| |If devtools configuration files are not found in `$HOME/.config/spring-boot`, the root of the `$HOME` directory is searched for the presence of a `.spring-boot-devtools.properties` file.
This allows you to share the devtools global configuration with applications that are on an older version of Spring Boot that does not support the `$HOME/.config/spring-boot` location.| +|---|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +| |Profiles are not supported in devtools properties/yaml files.

Any profiles activated in `.spring-boot-devtools.properties` will not affect the loading of [profile-specific configuration files](features.html#features.external-config.files.profile-specific).
Profile specific filenames (of the form `spring-boot-devtools-.properties`) and `spring.config.activate.on-profile` documents in both YAML and Properties files are not supported.| +|---|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +#### 8.5.1. Configuring File System Watcher + +[FileSystemWatcher](https://github.com/spring-projects/spring-boot/tree/v2.6.4/spring-boot-project/spring-boot-devtools/src/main/java/org/springframework/boot/devtools/filewatch/FileSystemWatcher.java) works by polling the class changes with a certain time interval, and then waiting for a predefined quiet period to make sure there are no more changes. +Since Spring Boot relies entirely on the IDE to compile and copy files into the location from where Spring Boot can read them, you might find that there are times when certain changes are not reflected when devtools restarts the application. +If you observe such problems constantly, try increasing the `spring.devtools.restart.poll-interval` and `spring.devtools.restart.quiet-period` parameters to the values that fit your development environment: + +Properties + +``` +spring.devtools.restart.poll-interval=2s +spring.devtools.restart.quiet-period=1s +``` + +Yaml + +``` +spring: + devtools: + restart: + poll-interval: "2s" + quiet-period: "1s" +``` + +The monitored classpath directories are now polled every 2 seconds for changes, and a 1 second quiet period is maintained to make sure there are no additional class changes. + +### 8.6. Remote Applications + +The Spring Boot developer tools are not limited to local development. +You can also use several features when running applications remotely. +Remote support is opt-in as enabling it can be a security risk. +It should only be enabled when running on a trusted network or when secured with SSL. +If neither of these options is available to you, you should not use DevTools' remote support. +You should never enable support on a production deployment. + +To enable it, you need to make sure that `devtools` is included in the repackaged archive, as shown in the following listing: + +``` + + + + org.springframework.boot + spring-boot-maven-plugin + + false + + + + +``` + +Then you need to set the `spring.devtools.remote.secret` property. +Like any important password or secret, the value should be unique and strong such that it cannot be guessed or brute-forced. + +Remote devtools support is provided in two parts: a server-side endpoint that accepts connections and a client application that you run in your IDE. +The server component is automatically enabled when the `spring.devtools.remote.secret` property is set. +The client component must be launched manually. + +| |Remote devtools is not supported for Spring WebFlux applications.| +|---|-----------------------------------------------------------------| + +#### 8.6.1. Running the Remote Client Application + +The remote client application is designed to be run from within your IDE. +You need to run `org.springframework.boot.devtools.RemoteSpringApplication` with the same classpath as the remote project that you connect to. +The application’s single required argument is the remote URL to which it connects. + +For example, if you are using Eclipse or Spring Tools and you have a project named `my-app` that you have deployed to Cloud Foundry, you would do the following: + +* Select `Run Configurations…​` from the `Run` menu. + +* Create a new `Java Application` “launch configuration”. + +* Browse for the `my-app` project. + +* Use `org.springframework.boot.devtools.RemoteSpringApplication` as the main class. + +* Add `https://myapp.cfapps.io` to the `Program arguments` (or whatever your remote URL is). + +A running remote client might resemble the following listing: + +``` + . ____ _ __ _ _ + /\\ / ___'_ __ _ _(_)_ __ __ _ ___ _ \ \ \ \ +( ( )\___ | '_ | '_| | '_ \/ _` | | _ \___ _ __ ___| |_ ___ \ \ \ \ + \\/ ___)| |_)| | | | | || (_| []::::::[] / -_) ' \/ _ \ _/ -_) ) ) ) ) + ' |____| .__|_| |_|_| |_\__, | |_|_\___|_|_|_\___/\__\___|/ / / / + =========|_|==============|___/===================================/_/_/_/ + :: Spring Boot Remote :: 2.6.4 + +2015-06-10 18:25:06.632 INFO 14938 --- [ main] o.s.b.devtools.RemoteSpringApplication : Starting RemoteSpringApplication on pwmbp with PID 14938 (/Users/pwebb/projects/spring-boot/code/spring-boot-project/spring-boot-devtools/target/classes started by pwebb in /Users/pwebb/projects/spring-boot/code) +2015-06-10 18:25:06.671 INFO 14938 --- [ main] s.c.a.AnnotationConfigApplicationContext : Refreshing org.spring[email protected]2a17b7b6: startup date [Wed Jun 10 18:25:06 PDT 2015]; root of context hierarchy +2015-06-10 18:25:07.043 WARN 14938 --- [ main] o.s.b.d.r.c.RemoteClientConfiguration : The connection to http://localhost:8080 is insecure. You should use a URL starting with 'https://'. +2015-06-10 18:25:07.074 INFO 14938 --- [ main] o.s.b.d.a.OptionalLiveReloadServer : LiveReload server is running on port 35729 +2015-06-10 18:25:07.130 INFO 14938 --- [ main] o.s.b.devtools.RemoteSpringApplication : Started RemoteSpringApplication in 0.74 seconds (JVM running for 1.105) +``` + +| |Because the remote client is using the same classpath as the real application it can directly read application properties.
This is how the `spring.devtools.remote.secret` property is read and passed to the server for authentication.| +|---|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +| |It is always advisable to use `https://` as the connection protocol, so that traffic is encrypted and passwords cannot be intercepted.| +|---|--------------------------------------------------------------------------------------------------------------------------------------| + +| |If you need to use a proxy to access the remote application, configure the `spring.devtools.remote.proxy.host` and `spring.devtools.remote.proxy.port` properties.| +|---|------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +#### 8.6.2. Remote Update + +The remote client monitors your application classpath for changes in the same way as the [local restart](#using.devtools.restart). +Any updated resource is pushed to the remote application and (*if required*) triggers a restart. +This can be helpful if you iterate on a feature that uses a cloud service that you do not have locally. +Generally, remote updates and restarts are much quicker than a full rebuild and deploy cycle. + +On a slower development environment, it may happen that the quiet period is not enough, and the changes in the classes may be split into batches. +The server is restarted after the first batch of class changes is uploaded. +The next batch can’t be sent to the application, since the server is restarting. + +This is typically manifested by a warning in the `RemoteSpringApplication` logs about failing to upload some of the classes, and a consequent retry. +But it may also lead to application code inconsistency and failure to restart after the first batch of changes is uploaded. +If you observe such problems constantly, try increasing the `spring.devtools.restart.poll-interval` and `spring.devtools.restart.quiet-period` parameters to the values that fit your development environment. +See the [Configuring File System Watcher](#using.devtools.globalsettings.configuring-file-system-watcher) section for configuring these properties. + +| |Files are only monitored when the remote client is running.
If you change a file before starting the remote client, it is not pushed to the remote server.| +|---|--------------------------------------------------------------------------------------------------------------------------------------------------------------| + +## 9. Packaging Your Application for Production + +Executable jars can be used for production deployment. +As they are self-contained, they are also ideally suited for cloud-based deployment. + +For additional “production ready” features, such as health, auditing, and metric REST or JMX end-points, consider adding `spring-boot-actuator`. +See *[actuator.html](actuator.html#actuator)* for details. + +## 10. What to Read Next + +You should now understand how you can use Spring Boot and some best practices that you should follow. +You can now go on to learn about specific *[Spring Boot features](features.html#features)* in depth, or you could skip ahead and read about the “[production ready](actuator.html#actuator)” aspects of Spring Boot. diff --git a/docs/en/spring-boot/web.md b/docs/en/spring-boot/web.md new file mode 100644 index 0000000000000000000000000000000000000000..caa98829252f321490a31c5ec016ed655f74af30 --- /dev/null +++ b/docs/en/spring-boot/web.md @@ -0,0 +1,2045 @@ +# Web + +Spring Boot is well suited for web application development. +You can create a self-contained HTTP server by using embedded Tomcat, Jetty, Undertow, or Netty. +Most web applications use the `spring-boot-starter-web` module to get up and running quickly. +You can also choose to build reactive web applications by using the `spring-boot-starter-webflux` module. + +If you have not yet developed a Spring Boot web application, you can follow the "Hello World!" example in the *[Getting started](getting-started.html#getting-started.first-application)* section. + +## 1. Servlet Web Applications + +If you want to build servlet-based web applications, you can take advantage of Spring Boot’s auto-configuration for Spring MVC or Jersey. + +### 1.1. The “Spring Web MVC Framework” + +The [Spring Web MVC framework](https://docs.spring.io/spring-framework/docs/5.3.16/reference/html/web.html#mvc) (often referred to as “Spring MVC”) is a rich “model view controller” web framework. +Spring MVC lets you create special `@Controller` or `@RestController` beans to handle incoming HTTP requests. +Methods in your controller are mapped to HTTP by using `@RequestMapping` annotations. + +The following code shows a typical `@RestController` that serves JSON data: + +``` +import java.util.List; + +import org.springframework.web.bind.annotation.DeleteMapping; +import org.springframework.web.bind.annotation.GetMapping; +import org.springframework.web.bind.annotation.PathVariable; +import org.springframework.web.bind.annotation.RequestMapping; +import org.springframework.web.bind.annotation.RestController; + +@RestController +@RequestMapping("/users") +public class MyRestController { + + private final UserRepository userRepository; + + private final CustomerRepository customerRepository; + + public MyRestController(UserRepository userRepository, CustomerRepository customerRepository) { + this.userRepository = userRepository; + this.customerRepository = customerRepository; + } + + @GetMapping("/{user}") + public User getUser(@PathVariable Long userId) { + return this.userRepository.findById(userId).get(); + } + + @GetMapping("/{user}/customers") + public List getUserCustomers(@PathVariable Long userId) { + return this.userRepository.findById(userId).map(this.customerRepository::findByUser).get(); + } + + @DeleteMapping("/{user}") + public void deleteUser(@PathVariable Long userId) { + this.userRepository.deleteById(userId); + } + +} + +``` + +“WebMvc.fn”, the functional variant, separates the routing configuration from the actual handling of the requests, as shown in the following example: + +``` +import org.springframework.context.annotation.Bean; +import org.springframework.context.annotation.Configuration; +import org.springframework.http.MediaType; +import org.springframework.web.servlet.function.RequestPredicate; +import org.springframework.web.servlet.function.RouterFunction; +import org.springframework.web.servlet.function.ServerResponse; + +import static org.springframework.web.servlet.function.RequestPredicates.accept; +import static org.springframework.web.servlet.function.RouterFunctions.route; + +@Configuration(proxyBeanMethods = false) +public class MyRoutingConfiguration { + + private static final RequestPredicate ACCEPT_JSON = accept(MediaType.APPLICATION_JSON); + + @Bean + public RouterFunction routerFunction(MyUserHandler userHandler) { + return route() + .GET("/{user}", ACCEPT_JSON, userHandler::getUser) + .GET("/{user}/customers", ACCEPT_JSON, userHandler::getUserCustomers) + .DELETE("/{user}", ACCEPT_JSON, userHandler::deleteUser) + .build(); + } + +} + +``` + +``` +import org.springframework.stereotype.Component; +import org.springframework.web.servlet.function.ServerRequest; +import org.springframework.web.servlet.function.ServerResponse; + +@Component +public class MyUserHandler { + + public ServerResponse getUser(ServerRequest request) { + ... + return ServerResponse.ok().build(); + } + + public ServerResponse getUserCustomers(ServerRequest request) { + ... + return ServerResponse.ok().build(); + } + + public ServerResponse deleteUser(ServerRequest request) { + ... + return ServerResponse.ok().build(); + } + +} + +``` + +Spring MVC is part of the core Spring Framework, and detailed information is available in the [reference documentation](https://docs.spring.io/spring-framework/docs/5.3.16/reference/html/web.html#mvc). +There are also several guides that cover Spring MVC available at [spring.io/guides](https://spring.io/guides). + +| |You can define as many `RouterFunction` beans as you like to modularize the definition of the router.
Beans can be ordered if you need to apply a precedence.| +|---|-----------------------------------------------------------------------------------------------------------------------------------------------------------------| + +#### 1.1.1. Spring MVC Auto-configuration + +Spring Boot provides auto-configuration for Spring MVC that works well with most applications. + +The auto-configuration adds the following features on top of Spring’s defaults: + +* Inclusion of `ContentNegotiatingViewResolver` and `BeanNameViewResolver` beans. + +* Support for serving static resources, including support for WebJars (covered [later in this document](features.html#web.servlet.spring-mvc.static-content)). + +* Automatic registration of `Converter`, `GenericConverter`, and `Formatter` beans. + +* Support for `HttpMessageConverters` (covered [later in this document](features.html#web.servlet.spring-mvc.message-converters)). + +* Automatic registration of `MessageCodesResolver` (covered [later in this document](features.html#web.servlet.spring-mvc.message-codes)). + +* Static `index.html` support. + +* Automatic use of a `ConfigurableWebBindingInitializer` bean (covered [later in this document](features.html#web.servlet.spring-mvc.binding-initializer)). + +If you want to keep those Spring Boot MVC customizations and make more [MVC customizations](https://docs.spring.io/spring-framework/docs/5.3.16/reference/html/web.html#mvc) (interceptors, formatters, view controllers, and other features), you can add your own `@Configuration` class of type `WebMvcConfigurer` but **without** `@EnableWebMvc`. + +If you want to provide custom instances of `RequestMappingHandlerMapping`, `RequestMappingHandlerAdapter`, or `ExceptionHandlerExceptionResolver`, and still keep the Spring Boot MVC customizations, you can declare a bean of type `WebMvcRegistrations` and use it to provide custom instances of those components. + +If you want to take complete control of Spring MVC, you can add your own `@Configuration` annotated with `@EnableWebMvc`, or alternatively add your own `@Configuration`-annotated `DelegatingWebMvcConfiguration` as described in the Javadoc of `@EnableWebMvc`. + +| |Spring MVC uses a different `ConversionService` to the one used to convert values from your `application.properties` or `application.yaml` file.
It means that `Period`, `Duration` and `DataSize` converters are not available and that `@DurationUnit` and `@DataSizeUnit` annotations will be ignored.

If you want to customize the `ConversionService` used by Spring MVC, you can provide a `WebMvcConfigurer` bean with an `addFormatters` method.
From this method you can register any converter that you like, or you can delegate to the static methods available on `ApplicationConversionService`.| +|---|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +#### 1.1.2. HttpMessageConverters + +Spring MVC uses the `HttpMessageConverter` interface to convert HTTP requests and responses. +Sensible defaults are included out of the box. +For example, objects can be automatically converted to JSON (by using the Jackson library) or XML (by using the Jackson XML extension, if available, or by using JAXB if the Jackson XML extension is not available). +By default, strings are encoded in `UTF-8`. + +If you need to add or customize converters, you can use Spring Boot’s `HttpMessageConverters` class, as shown in the following listing: + +``` +import org.springframework.boot.autoconfigure.http.HttpMessageConverters; +import org.springframework.context.annotation.Bean; +import org.springframework.context.annotation.Configuration; +import org.springframework.http.converter.HttpMessageConverter; + +@Configuration(proxyBeanMethods = false) +public class MyHttpMessageConvertersConfiguration { + + @Bean + public HttpMessageConverters customConverters() { + HttpMessageConverter additional = new AdditionalHttpMessageConverter(); + HttpMessageConverter another = new AnotherHttpMessageConverter(); + return new HttpMessageConverters(additional, another); + } + +} + +``` + +Any `HttpMessageConverter` bean that is present in the context is added to the list of converters. +You can also override default converters in the same way. + +#### 1.1.3. Custom JSON Serializers and Deserializers + +If you use Jackson to serialize and deserialize JSON data, you might want to write your own `JsonSerializer` and `JsonDeserializer` classes. +Custom serializers are usually [registered with Jackson through a module](https://github.com/FasterXML/jackson-docs/wiki/JacksonHowToCustomSerializers), but Spring Boot provides an alternative `@JsonComponent` annotation that makes it easier to directly register Spring Beans. + +You can use the `@JsonComponent` annotation directly on `JsonSerializer`, `JsonDeserializer` or `KeyDeserializer` implementations. +You can also use it on classes that contain serializers/deserializers as inner classes, as shown in the following example: + +``` +import java.io.IOException; + +import com.fasterxml.jackson.core.JsonGenerator; +import com.fasterxml.jackson.core.JsonParser; +import com.fasterxml.jackson.core.JsonProcessingException; +import com.fasterxml.jackson.core.ObjectCodec; +import com.fasterxml.jackson.databind.DeserializationContext; +import com.fasterxml.jackson.databind.JsonDeserializer; +import com.fasterxml.jackson.databind.JsonNode; +import com.fasterxml.jackson.databind.JsonSerializer; +import com.fasterxml.jackson.databind.SerializerProvider; + +import org.springframework.boot.jackson.JsonComponent; + +@JsonComponent +public class MyJsonComponent { + + public static class Serializer extends JsonSerializer { + + @Override + public void serialize(MyObject value, JsonGenerator jgen, SerializerProvider serializers) throws IOException { + jgen.writeStringField("name", value.getName()); + jgen.writeNumberField("age", value.getAge()); + } + + } + + public static class Deserializer extends JsonDeserializer { + + @Override + public MyObject deserialize(JsonParser jsonParser, DeserializationContext ctxt) + throws IOException, JsonProcessingException { + ObjectCodec codec = jsonParser.getCodec(); + JsonNode tree = codec.readTree(jsonParser); + String name = tree.get("name").textValue(); + int age = tree.get("age").intValue(); + return new MyObject(name, age); + } + + } + +} + +``` + +All `@JsonComponent` beans in the `ApplicationContext` are automatically registered with Jackson. +Because `@JsonComponent` is meta-annotated with `@Component`, the usual component-scanning rules apply. + +Spring Boot also provides [`JsonObjectSerializer`](https://github.com/spring-projects/spring-boot/tree/v2.6.4/spring-boot-project/spring-boot/src/main/java/org/springframework/boot/jackson/JsonObjectSerializer.java) and [`JsonObjectDeserializer`](https://github.com/spring-projects/spring-boot/tree/v2.6.4/spring-boot-project/spring-boot/src/main/java/org/springframework/boot/jackson/JsonObjectDeserializer.java) base classes that provide useful alternatives to the standard Jackson versions when serializing objects. +See [`JsonObjectSerializer`](https://docs.spring.io/spring-boot/docs/2.6.4/api/org/springframework/boot/jackson/JsonObjectSerializer.html) and [`JsonObjectDeserializer`](https://docs.spring.io/spring-boot/docs/2.6.4/api/org/springframework/boot/jackson/JsonObjectDeserializer.html) in the Javadoc for details. + +The example above can be rewritten to use `JsonObjectSerializer`/`JsonObjectDeserializer` as follows: + +``` +import java.io.IOException; + +import com.fasterxml.jackson.core.JsonGenerator; +import com.fasterxml.jackson.core.JsonParser; +import com.fasterxml.jackson.core.ObjectCodec; +import com.fasterxml.jackson.databind.DeserializationContext; +import com.fasterxml.jackson.databind.JsonNode; +import com.fasterxml.jackson.databind.SerializerProvider; + +import org.springframework.boot.jackson.JsonComponent; +import org.springframework.boot.jackson.JsonObjectDeserializer; +import org.springframework.boot.jackson.JsonObjectSerializer; + +@JsonComponent +public class MyJsonComponent { + + public static class Serializer extends JsonObjectSerializer { + + @Override + protected void serializeObject(MyObject value, JsonGenerator jgen, SerializerProvider provider) + throws IOException { + jgen.writeStringField("name", value.getName()); + jgen.writeNumberField("age", value.getAge()); + } + + } + + public static class Deserializer extends JsonObjectDeserializer { + + @Override + protected MyObject deserializeObject(JsonParser jsonParser, DeserializationContext context, ObjectCodec codec, + JsonNode tree) throws IOException { + String name = nullSafeValue(tree.get("name"), String.class); + int age = nullSafeValue(tree.get("age"), Integer.class); + return new MyObject(name, age); + } + + } + +} + +``` + +#### 1.1.4. MessageCodesResolver + +Spring MVC has a strategy for generating error codes for rendering error messages from binding errors: `MessageCodesResolver`. +If you set the `spring.mvc.message-codes-resolver-format` property `PREFIX_ERROR_CODE` or `POSTFIX_ERROR_CODE`, Spring Boot creates one for you (see the enumeration in [`DefaultMessageCodesResolver.Format`](https://docs.spring.io/spring-framework/docs/5.3.16/javadoc-api/org/springframework/validation/DefaultMessageCodesResolver.Format.html)). + +#### 1.1.5. Static Content + +By default, Spring Boot serves static content from a directory called `/static` (or `/public` or `/resources` or `/META-INF/resources`) in the classpath or from the root of the `ServletContext`. +It uses the `ResourceHttpRequestHandler` from Spring MVC so that you can modify that behavior by adding your own `WebMvcConfigurer` and overriding the `addResourceHandlers` method. + +In a stand-alone web application, the default servlet from the container is also enabled and acts as a fallback, serving content from the root of the `ServletContext` if Spring decides not to handle it. +Most of the time, this does not happen (unless you modify the default MVC configuration), because Spring can always handle requests through the `DispatcherServlet`. + +By default, resources are mapped on `/**`, but you can tune that with the `spring.mvc.static-path-pattern` property. +For instance, relocating all resources to `/resources/**` can be achieved as follows: + +Properties + +``` +spring.mvc.static-path-pattern=/resources/** +``` + +Yaml + +``` +spring: + mvc: + static-path-pattern: "/resources/**" +``` + +You can also customize the static resource locations by using the `spring.web.resources.static-locations` property (replacing the default values with a list of directory locations). +The root servlet context path, `"/"`, is automatically added as a location as well. + +In addition to the “standard” static resource locations mentioned earlier, a special case is made for [Webjars content](https://www.webjars.org/). +Any resources with a path in `/webjars/**` are served from jar files if they are packaged in the Webjars format. + +| |Do not use the `src/main/webapp` directory if your application is packaged as a jar.
Although this directory is a common standard, it works **only** with war packaging, and it is silently ignored by most build tools if you generate a jar.| +|---|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +Spring Boot also supports the advanced resource handling features provided by Spring MVC, allowing use cases such as cache-busting static resources or using version agnostic URLs for Webjars. + +To use version agnostic URLs for Webjars, add the `webjars-locator-core` dependency. +Then declare your Webjar. +Using jQuery as an example, adding `"/webjars/jquery/jquery.min.js"` results in `"/webjars/jquery/x.y.z/jquery.min.js"` where `x.y.z` is the Webjar version. + +| |If you use JBoss, you need to declare the `webjars-locator-jboss-vfs` dependency instead of the `webjars-locator-core`.
Otherwise, all Webjars resolve as a `404`.| +|---|----------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +To use cache busting, the following configuration configures a cache busting solution for all static resources, effectively adding a content hash, such as ``, in URLs: + +Properties + +``` +spring.web.resources.chain.strategy.content.enabled=true +spring.web.resources.chain.strategy.content.paths=/** +``` + +Yaml + +``` +spring: + web: + resources: + chain: + strategy: + content: + enabled: true + paths: "/**" +``` + +| |Links to resources are rewritten in templates at runtime, thanks to a `ResourceUrlEncodingFilter` that is auto-configured for Thymeleaf and FreeMarker.
You should manually declare this filter when using JSPs.
Other template engines are currently not automatically supported but can be with custom template macros/helpers and the use of the [`ResourceUrlProvider`](https://docs.spring.io/spring-framework/docs/5.3.16/javadoc-api/org/springframework/web/servlet/resource/ResourceUrlProvider.html).| +|---|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +When loading resources dynamically with, for example, a JavaScript module loader, renaming files is not an option. +That is why other strategies are also supported and can be combined. +A "fixed" strategy adds a static version string in the URL without changing the file name, as shown in the following example: + +Properties + +``` +spring.web.resources.chain.strategy.content.enabled=true +spring.web.resources.chain.strategy.content.paths=/** +spring.web.resources.chain.strategy.fixed.enabled=true +spring.web.resources.chain.strategy.fixed.paths=/js/lib/ +spring.web.resources.chain.strategy.fixed.version=v12 +``` + +Yaml + +``` +spring: + web: + resources: + chain: + strategy: + content: + enabled: true + paths: "/**" + fixed: + enabled: true + paths: "/js/lib/" + version: "v12" +``` + +With this configuration, JavaScript modules located under `"/js/lib/"` use a fixed versioning strategy (`"/v12/js/lib/mymodule.js"`), while other resources still use the content one (``). + +See [`WebProperties.Resources`](https://github.com/spring-projects/spring-boot/tree/v2.6.4/spring-boot-project/spring-boot-autoconfigure/src/main/java/org/springframework/boot/autoconfigure/web/WebProperties.java) for more supported options. + +| |This feature has been thoroughly described in a dedicated [blog post](https://spring.io/blog/2014/07/24/spring-framework-4-1-handling-static-web-resources) and in Spring Framework’s [reference documentation](https://docs.spring.io/spring-framework/docs/5.3.16/reference/html/web.html#mvc-config-static-resources).| +|---|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +#### 1.1.6. Welcome Page + +Spring Boot supports both static and templated welcome pages. +It first looks for an `index.html` file in the configured static content locations. +If one is not found, it then looks for an `index` template. +If either is found, it is automatically used as the welcome page of the application. + +#### 1.1.7. Path Matching and Content Negotiation + +Spring MVC can map incoming HTTP requests to handlers by looking at the request path and matching it to the mappings defined in your application (for example, `@GetMapping` annotations on Controller methods). + +Spring Boot chooses to disable suffix pattern matching by default, which means that requests like `"GET /projects/spring-boot.json"` will not be matched to `@GetMapping("/projects/spring-boot")` mappings. +This is considered as a [best practice for Spring MVC applications](https://docs.spring.io/spring-framework/docs/5.3.16/reference/html/web.html#mvc-ann-requestmapping-suffix-pattern-match). +This feature was mainly useful in the past for HTTP clients which did not send proper "Accept" request headers; we needed to make sure to send the correct Content Type to the client. +Nowadays, Content Negotiation is much more reliable. + +There are other ways to deal with HTTP clients that do not consistently send proper "Accept" request headers. +Instead of using suffix matching, we can use a query parameter to ensure that requests like `"GET /projects/spring-boot?format=json"` will be mapped to `@GetMapping("/projects/spring-boot")`: + +Properties + +``` +spring.mvc.contentnegotiation.favor-parameter=true +``` + +Yaml + +``` +spring: + mvc: + contentnegotiation: + favor-parameter: true +``` + +Or if you prefer to use a different parameter name: + +Properties + +``` +spring.mvc.contentnegotiation.favor-parameter=true +spring.mvc.contentnegotiation.parameter-name=myparam +``` + +Yaml + +``` +spring: + mvc: + contentnegotiation: + favor-parameter: true + parameter-name: "myparam" +``` + +Most standard media types are supported out-of-the-box, but you can also define new ones: + +Properties + +``` +spring.mvc.contentnegotiation.media-types.markdown=text/markdown +``` + +Yaml + +``` +spring: + mvc: + contentnegotiation: + media-types: + markdown: "text/markdown" +``` + +Suffix pattern matching is deprecated and will be removed in a future release. +If you understand the caveats and would still like your application to use suffix pattern matching, the following configuration is required: + +Properties + +``` +spring.mvc.contentnegotiation.favor-path-extension=true +spring.mvc.pathmatch.use-suffix-pattern=true +``` + +Yaml + +``` +spring: + mvc: + contentnegotiation: + favor-path-extension: true + pathmatch: + use-suffix-pattern: true +``` + +Alternatively, rather than open all suffix patterns, it is more secure to only support registered suffix patterns: + +Properties + +``` +spring.mvc.contentnegotiation.favor-path-extension=true +spring.mvc.pathmatch.use-registered-suffix-pattern=true +``` + +Yaml + +``` +spring: + mvc: + contentnegotiation: + favor-path-extension: true + pathmatch: + use-registered-suffix-pattern: true +``` + +As of Spring Framework 5.3, Spring MVC supports several implementation strategies for matching request paths to Controller handlers. +It was previously only supporting the `AntPathMatcher` strategy, but it now also offers `PathPatternParser`. +Spring Boot now provides a configuration property to choose and opt in the new strategy: + +Properties + +``` +spring.mvc.pathmatch.matching-strategy=path-pattern-parser +``` + +Yaml + +``` +spring: + mvc: + pathmatch: + matching-strategy: "path-pattern-parser" +``` + +For more details on why you should consider this new implementation, see the[dedicated blog post](https://spring.io/blog/2020/06/30/url-matching-with-pathpattern-in-spring-mvc). + +| |`PathPatternParser` is an optimized implementation but restricts usage of[some path patterns variants](https://docs.spring.io/spring-framework/docs/5.3.16/reference/html/web.html#mvc-ann-requestmapping-uri-templates)and is incompatible with suffix pattern matching (`spring.mvc.pathmatch.use-suffix-pattern`,`spring.mvc.pathmatch.use-registered-suffix-pattern`) or mapping the `DispatcherServlet`with a servlet prefix (`spring.mvc.servlet.path`).| +|---|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +#### 1.1.8. ConfigurableWebBindingInitializer + +Spring MVC uses a `WebBindingInitializer` to initialize a `WebDataBinder` for a particular request. +If you create your own `ConfigurableWebBindingInitializer` `@Bean`, Spring Boot automatically configures Spring MVC to use it. + +#### 1.1.9. Template Engines + +As well as REST web services, you can also use Spring MVC to serve dynamic HTML content. +Spring MVC supports a variety of templating technologies, including Thymeleaf, FreeMarker, and JSPs. +Also, many other templating engines include their own Spring MVC integrations. + +Spring Boot includes auto-configuration support for the following templating engines: + +* [FreeMarker](https://freemarker.apache.org/docs/) + +* [Groovy](https://docs.groovy-lang.org/docs/next/html/documentation/template-engines.html#_the_markuptemplateengine) + +* [Thymeleaf](https://www.thymeleaf.org) + +* [Mustache](https://mustache.github.io/) + +| |If possible, JSPs should be avoided.
There are several [known limitations](#web.servlet.embedded-container.jsp-limitations) when using them with embedded servlet containers.| +|---|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +When you use one of these templating engines with the default configuration, your templates are picked up automatically from `src/main/resources/templates`. + +| |Depending on how you run your application, your IDE may order the classpath differently.
Running your application in the IDE from its main method results in a different ordering than when you run your application by using Maven or Gradle or from its packaged jar.
This can cause Spring Boot to fail to find the expected template.
If you have this problem, you can reorder the classpath in the IDE to place the module’s classes and resources first.| +|---|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +#### 1.1.10. Error Handling + +By default, Spring Boot provides an `/error` mapping that handles all errors in a sensible way, and it is registered as a “global” error page in the servlet container. +For machine clients, it produces a JSON response with details of the error, the HTTP status, and the exception message. +For browser clients, there is a “whitelabel” error view that renders the same data in HTML format (to customize it, add a `View` that resolves to `error`). + +There are a number of `server.error` properties that can be set if you want to customize the default error handling behavior. +See the [“Server Properties”](application-properties.html#appendix.application-properties.server) section of the Appendix. + +To replace the default behavior completely, you can implement `ErrorController` and register a bean definition of that type or add a bean of type `ErrorAttributes` to use the existing mechanism but replace the contents. + +| |The `BasicErrorController` can be used as a base class for a custom `ErrorController`.
This is particularly useful if you want to add a handler for a new content type (the default is to handle `text/html` specifically and provide a fallback for everything else).
To do so, extend `BasicErrorController`, add a public method with a `@RequestMapping` that has a `produces` attribute, and create a bean of your new type.| +|---|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +You can also define a class annotated with `@ControllerAdvice` to customize the JSON document to return for a particular controller and/or exception type, as shown in the following example: + +``` +import javax.servlet.RequestDispatcher; +import javax.servlet.http.HttpServletRequest; + +import org.springframework.http.HttpStatus; +import org.springframework.http.ResponseEntity; +import org.springframework.web.bind.annotation.ControllerAdvice; +import org.springframework.web.bind.annotation.ExceptionHandler; +import org.springframework.web.bind.annotation.ResponseBody; +import org.springframework.web.servlet.mvc.method.annotation.ResponseEntityExceptionHandler; + +@ControllerAdvice(basePackageClasses = SomeController.class) +public class MyControllerAdvice extends ResponseEntityExceptionHandler { + + @ResponseBody + @ExceptionHandler(MyException.class) + public ResponseEntity handleControllerException(HttpServletRequest request, Throwable ex) { + HttpStatus status = getStatus(request); + return new ResponseEntity<>(new MyErrorBody(status.value(), ex.getMessage()), status); + } + + private HttpStatus getStatus(HttpServletRequest request) { + Integer code = (Integer) request.getAttribute(RequestDispatcher.ERROR_STATUS_CODE); + HttpStatus status = HttpStatus.resolve(code); + return (status != null) ? status : HttpStatus.INTERNAL_SERVER_ERROR; + } + +} + +``` + +In the preceding example, if `YourException` is thrown by a controller defined in the same package as `SomeController`, a JSON representation of the `CustomErrorType` POJO is used instead of the `ErrorAttributes` representation. + +In some cases, errors handled at the controller level are not recorded by the [metrics infrastructure](actuator.html#actuator.metrics.supported.spring-mvc). +Applications can ensure that such exceptions are recorded with the request metrics by setting the handled exception as a request attribute: + +``` +import javax.servlet.http.HttpServletRequest; + +import org.springframework.boot.web.servlet.error.ErrorAttributes; +import org.springframework.stereotype.Controller; +import org.springframework.web.bind.annotation.ExceptionHandler; + +@Controller +public class MyController { + + @ExceptionHandler(CustomException.class) + String handleCustomException(HttpServletRequest request, CustomException ex) { + request.setAttribute(ErrorAttributes.ERROR_ATTRIBUTE, ex); + return "errorView"; + } + +} + +``` + +##### Custom Error Pages + +If you want to display a custom HTML error page for a given status code, you can add a file to an `/error` directory. +Error pages can either be static HTML (that is, added under any of the static resource directories) or be built by using templates. +The name of the file should be the exact status code or a series mask. + +For example, to map `404` to a static HTML file, your directory structure would be as follows: + +``` +src/ + +- main/ + +- java/ + | + + +- resources/ + +- public/ + +- error/ + | +- 404.html + +- +``` + +To map all `5xx` errors by using a FreeMarker template, your directory structure would be as follows: + +``` +src/ + +- main/ + +- java/ + | + + +- resources/ + +- templates/ + +- error/ + | +- 5xx.ftlh + +- +``` + +For more complex mappings, you can also add beans that implement the `ErrorViewResolver` interface, as shown in the following example: + +``` +import java.util.Map; + +import javax.servlet.http.HttpServletRequest; + +import org.springframework.boot.autoconfigure.web.servlet.error.ErrorViewResolver; +import org.springframework.http.HttpStatus; +import org.springframework.web.servlet.ModelAndView; + +public class MyErrorViewResolver implements ErrorViewResolver { + + @Override + public ModelAndView resolveErrorView(HttpServletRequest request, HttpStatus status, Map model) { + // Use the request or status to optionally return a ModelAndView + if (status == HttpStatus.INSUFFICIENT_STORAGE) { + // We could add custom model values here + new ModelAndView("myview"); + } + return null; + } + +} + +``` + +You can also use regular Spring MVC features such as [`@ExceptionHandler` methods](https://docs.spring.io/spring-framework/docs/5.3.16/reference/html/web.html#mvc-exceptionhandlers) and [`@ControllerAdvice`](https://docs.spring.io/spring-framework/docs/5.3.16/reference/html/web.html#mvc-ann-controller-advice). +The `ErrorController` then picks up any unhandled exceptions. + +##### Mapping Error Pages outside of Spring MVC ##### + +For applications that do not use Spring MVC, you can use the `ErrorPageRegistrar` interface to directly register `ErrorPages`. +This abstraction works directly with the underlying embedded servlet container and works even if you do not have a Spring MVC `DispatcherServlet`. + +``` +import org.springframework.boot.web.server.ErrorPage; +import org.springframework.boot.web.server.ErrorPageRegistrar; +import org.springframework.boot.web.server.ErrorPageRegistry; +import org.springframework.context.annotation.Bean; +import org.springframework.context.annotation.Configuration; +import org.springframework.http.HttpStatus; + +@Configuration(proxyBeanMethods = false) +public class MyErrorPagesConfiguration { + + @Bean + public ErrorPageRegistrar errorPageRegistrar() { + return this::registerErrorPages; + } + + private void registerErrorPages(ErrorPageRegistry registry) { + registry.addErrorPages(new ErrorPage(HttpStatus.BAD_REQUEST, "/400")); + } + +} + +``` + +| |If you register an `ErrorPage` with a path that ends up being handled by a `Filter` (as is common with some non-Spring web frameworks, like Jersey and Wicket), then the `Filter` has to be explicitly registered as an `ERROR` dispatcher, as shown in the following example:| +|---|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +``` +import java.util.EnumSet; + +import javax.servlet.DispatcherType; + +import org.springframework.boot.web.servlet.FilterRegistrationBean; +import org.springframework.context.annotation.Bean; +import org.springframework.context.annotation.Configuration; + +@Configuration(proxyBeanMethods = false) +public class MyFilterConfiguration { + + @Bean + public FilterRegistrationBean myFilter() { + FilterRegistrationBean registration = new FilterRegistrationBean<>(new MyFilter()); + // ... + registration.setDispatcherTypes(EnumSet.allOf(DispatcherType.class)); + return registration; + } + +} + +``` + +Note that the default `FilterRegistrationBean` does not include the `ERROR` dispatcher type. + +##### Error handling in a war deployment + +When deployed to a servlet container, Spring Boot uses its error page filter to forward a request with an error status to the appropriate error page. +This is necessary as the servlet specification does not provide an API for registering error pages. +Depending on the container that you are deploying your war file to and the technologies that your application uses, some additional configuration may be required. + +The error page filter can only forward the request to the correct error page if the response has not already been committed. +By default, WebSphere Application Server 8.0 and later commits the response upon successful completion of a servlet’s service method. +You should disable this behavior by setting `com.ibm.ws.webcontainer.invokeFlushAfterService` to `false`. + +If you are using Spring Security and want to access the principal in an error page, you must configure Spring Security’s filter to be invoked on error dispatches. +To do so, set the `spring.security.filter.dispatcher-types` property to `async, error, forward, request`. + +#### 1.1.11. CORS Support + +[Cross-origin resource sharing](https://en.wikipedia.org/wiki/Cross-origin_resource_sharing) (CORS) is a [W3C specification](https://www.w3.org/TR/cors/) implemented by [most browsers](https://caniuse.com/#feat=cors) that lets you specify in a flexible way what kind of cross-domain requests are authorized, instead of using some less secure and less powerful approaches such as IFRAME or JSONP. + +As of version 4.2, Spring MVC [supports CORS](https://docs.spring.io/spring-framework/docs/5.3.16/reference/html/web.html#mvc-cors). +Using [controller method CORS configuration](https://docs.spring.io/spring-framework/docs/5.3.16/reference/html/web.html#mvc-cors-controller) with [`@CrossOrigin`](https://docs.spring.io/spring-framework/docs/5.3.16/javadoc-api/org/springframework/web/bind/annotation/CrossOrigin.html) annotations in your Spring Boot application does not require any specific configuration.[Global CORS configuration](https://docs.spring.io/spring-framework/docs/5.3.16/reference/html/web.html#mvc-cors-global) can be defined by registering a `WebMvcConfigurer` bean with a customized `addCorsMappings(CorsRegistry)` method, as shown in the following example: + +``` +import org.springframework.context.annotation.Bean; +import org.springframework.context.annotation.Configuration; +import org.springframework.web.servlet.config.annotation.CorsRegistry; +import org.springframework.web.servlet.config.annotation.WebMvcConfigurer; + +@Configuration(proxyBeanMethods = false) +public class MyCorsConfiguration { + + @Bean + public WebMvcConfigurer corsConfigurer() { + return new WebMvcConfigurer() { + + @Override + public void addCorsMappings(CorsRegistry registry) { + registry.addMapping("/api/**"); + } + + }; + } + +} + +``` + +### 1.2. JAX-RS and Jersey + +If you prefer the JAX-RS programming model for REST endpoints, you can use one of the available implementations instead of Spring MVC.[Jersey](https://jersey.github.io/) and [Apache CXF](https://cxf.apache.org/) work quite well out of the box. +CXF requires you to register its `Servlet` or `Filter` as a `@Bean` in your application context. +Jersey has some native Spring support, so we also provide auto-configuration support for it in Spring Boot, together with a starter. + +To get started with Jersey, include the `spring-boot-starter-jersey` as a dependency and then you need one `@Bean` of type `ResourceConfig` in which you register all the endpoints, as shown in the following example: + +``` +import org.glassfish.jersey.server.ResourceConfig; + +import org.springframework.stereotype.Component; + +@Component +public class MyJerseyConfig extends ResourceConfig { + + public MyJerseyConfig() { + register(MyEndpoint.class); + } + +} + +``` + +| |Jersey’s support for scanning executable archives is rather limited.
For example, it cannot scan for endpoints in a package found in a [fully executable jar file](deployment.html#deployment.installing) or in `WEB-INF/classes` when running an executable war file.
To avoid this limitation, the `packages` method should not be used, and endpoints should be registered individually by using the `register` method, as shown in the preceding example.| +|---|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +For more advanced customizations, you can also register an arbitrary number of beans that implement `ResourceConfigCustomizer`. + +All the registered endpoints should be `@Components` with HTTP resource annotations (`@GET` and others), as shown in the following example: + +``` +import javax.ws.rs.GET; +import javax.ws.rs.Path; + +import org.springframework.stereotype.Component; + +@Component +@Path("/hello") +public class MyEndpoint { + + @GET + public String message() { + return "Hello"; + } + +} + +``` + +Since the `Endpoint` is a Spring `@Component`, its lifecycle is managed by Spring and you can use the `@Autowired` annotation to inject dependencies and use the `@Value` annotation to inject external configuration. +By default, the Jersey servlet is registered and mapped to `/*`. +You can change the mapping by adding `@ApplicationPath` to your `ResourceConfig`. + +By default, Jersey is set up as a servlet in a `@Bean` of type `ServletRegistrationBean` named `jerseyServletRegistration`. +By default, the servlet is initialized lazily, but you can customize that behavior by setting `spring.jersey.servlet.load-on-startup`. +You can disable or override that bean by creating one of your own with the same name. +You can also use a filter instead of a servlet by setting `spring.jersey.type=filter` (in which case, the `@Bean` to replace or override is `jerseyFilterRegistration`). +The filter has an `@Order`, which you can set with `spring.jersey.filter.order`. +When using Jersey as a filter, a servlet that will handle any requests that are not intercepted by Jersey must be present. +If your application does not contain such a servlet, you may want to enable the default servlet by setting `server.servlet.register-default-servlet` to `true`. +Both the servlet and the filter registrations can be given init parameters by using `spring.jersey.init.*` to specify a map of properties. + +### 1.3. Embedded Servlet Container Support + +For servlet application, Spring Boot includes support for embedded [Tomcat](https://tomcat.apache.org/), [Jetty](https://www.eclipse.org/jetty/), and [Undertow](https://github.com/undertow-io/undertow) servers. +Most developers use the appropriate “Starter” to obtain a fully configured instance. +By default, the embedded server listens for HTTP requests on port `8080`. + +#### 1.3.1. Servlets, Filters, and listeners + +When using an embedded servlet container, you can register servlets, filters, and all the listeners (such as `HttpSessionListener`) from the servlet spec, either by using Spring beans or by scanning for servlet components. + +##### Registering Servlets, Filters, and Listeners as Spring Beans ##### + +Any `Servlet`, `Filter`, or servlet `*Listener` instance that is a Spring bean is registered with the embedded container. +This can be particularly convenient if you want to refer to a value from your `application.properties` during configuration. + +By default, if the context contains only a single Servlet, it is mapped to `/`. +In the case of multiple servlet beans, the bean name is used as a path prefix. +Filters map to `/*`. + +If convention-based mapping is not flexible enough, you can use the `ServletRegistrationBean`, `FilterRegistrationBean`, and `ServletListenerRegistrationBean` classes for complete control. + +It is usually safe to leave filter beans unordered. +If a specific order is required, you should annotate the `Filter` with `@Order` or make it implement `Ordered`. +You cannot configure the order of a `Filter` by annotating its bean method with `@Order`. +If you cannot change the `Filter` class to add `@Order` or implement `Ordered`, you must define a `FilterRegistrationBean` for the `Filter` and set the registration bean’s order using the `setOrder(int)` method. +Avoid configuring a filter that reads the request body at `Ordered.HIGHEST_PRECEDENCE`, since it might go against the character encoding configuration of your application. +If a servlet filter wraps the request, it should be configured with an order that is less than or equal to `OrderedFilter.REQUEST_WRAPPER_FILTER_MAX_ORDER`. + +| |To see the order of every `Filter` in your application, enable debug level logging for the `web` [logging group](features.html#features.logging.log-groups) (`logging.level.web=debug`).
Details of the registered filters, including their order and URL patterns, will then be logged at startup.| +|---|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +| |Take care when registering `Filter` beans since they are initialized very early in the application lifecycle.
If you need to register a `Filter` that interacts with other beans, consider using a [`DelegatingFilterProxyRegistrationBean`](https://docs.spring.io/spring-boot/docs/2.6.4/api/org/springframework/boot/web/servlet/DelegatingFilterProxyRegistrationBean.html) instead.| +|---|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +#### 1.3.2. Servlet Context Initialization + +Embedded servlet containers do not directly execute the servlet 3.0+ `javax.servlet.ServletContainerInitializer` interface or Spring’s `org.springframework.web.WebApplicationInitializer` interface. +This is an intentional design decision intended to reduce the risk that third party libraries designed to run inside a war may break Spring Boot applications. + +If you need to perform servlet context initialization in a Spring Boot application, you should register a bean that implements the `org.springframework.boot.web.servlet.ServletContextInitializer` interface. +The single `onStartup` method provides access to the `ServletContext` and, if necessary, can easily be used as an adapter to an existing `WebApplicationInitializer`. + +##### Scanning for Servlets, Filters, and listeners ##### + +When using an embedded container, automatic registration of classes annotated with `@WebServlet`, `@WebFilter`, and `@WebListener` can be enabled by using `@ServletComponentScan`. + +| |`@ServletComponentScan` has no effect in a standalone container, where the container’s built-in discovery mechanisms are used instead.| +|---|--------------------------------------------------------------------------------------------------------------------------------------| + +#### 1.3.3. The ServletWebServerApplicationContext + +Under the hood, Spring Boot uses a different type of `ApplicationContext` for embedded servlet container support. +The `ServletWebServerApplicationContext` is a special type of `WebApplicationContext` that bootstraps itself by searching for a single `ServletWebServerFactory` bean. +Usually a `TomcatServletWebServerFactory`, `JettyServletWebServerFactory`, or `UndertowServletWebServerFactory` has been auto-configured. + +| |You usually do not need to be aware of these implementation classes.
Most applications are auto-configured, and the appropriate `ApplicationContext` and `ServletWebServerFactory` are created on your behalf.| +|---|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +#### 1.3.4. Customizing Embedded Servlet Containers + +Common servlet container settings can be configured by using Spring `Environment` properties. +Usually, you would define the properties in your `application.properties` or `application.yaml` file. + +Common server settings include: + +* Network settings: Listen port for incoming HTTP requests (`server.port`), interface address to bind to `server.address`, and so on. + +* Session settings: Whether the session is persistent (`server.servlet.session.persistent`), session timeout (`server.servlet.session.timeout`), location of session data (`server.servlet.session.store-dir`), and session-cookie configuration (`server.servlet.session.cookie.*`). + +* Error management: Location of the error page (`server.error.path`) and so on. + +* [SSL](howto.html#howto.webserver.configure-ssl) + +* [HTTP compression](howto.html#howto.webserver.enable-response-compression) + +Spring Boot tries as much as possible to expose common settings, but this is not always possible. +For those cases, dedicated namespaces offer server-specific customizations (see `server.tomcat` and `server.undertow`). +For instance, [access logs](howto.html#howto.webserver.configure-access-logs) can be configured with specific features of the embedded servlet container. + +| |See the [`ServerProperties`](https://github.com/spring-projects/spring-boot/tree/v2.6.4/spring-boot-project/spring-boot-autoconfigure/src/main/java/org/springframework/boot/autoconfigure/web/ServerProperties.java) class for a complete list.| +|---|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +##### SameSite Cookies + +The `SameSite` cookie attribute can be used by web browsers to control if and how cookies are submitted in cross-site requests. +The attribute is particularly relevant for modern web browsers which have started to change the default value that is used when the attribute is missing. + +If you want to change the `SameSite` attribute of your session cookie, you can use the `server.servlet.session.cookie.same-site` property. +This property is supported by auto-configured Tomcat, Jetty and Undertow servers. +It is also used to configure Spring Session servlet based `SessionRepository` beans. + +For example, if you want your session cookie to have a `SameSite` attribute of `None`, you can add the following to your `application.properties` or `application.yaml` file: + +Properties + +``` +server.servlet.session.cookie.same-site=none +``` + +Yaml + +``` +server: + servlet: + session: + cookie: + same-site: "none" +``` + +If you want to change the `SameSite` attribute on other cookies added to your `HttpServletResponse`, you can use a `CookieSameSiteSupplier`. +The `CookieSameSiteSupplier` is passed a `Cookie` and may return a `SameSite` value, or `null`. + +There are a number of convenience factory and filter methods that you can use to quickly match specific cookies. +For example, adding the following bean will automatically apply a `SameSite` of `Lax` for all cookies with a name that matches the regular expression `myapp.*`. + +``` +import org.springframework.boot.web.servlet.server.CookieSameSiteSupplier; +import org.springframework.context.annotation.Bean; +import org.springframework.context.annotation.Configuration; + +@Configuration(proxyBeanMethods = false) +public class MySameSiteConfiguration { + + @Bean + public CookieSameSiteSupplier applicationCookieSameSiteSupplier() { + return CookieSameSiteSupplier.ofLax().whenHasNameMatching("myapp.*"); + } + +} + +``` + +##### Programmatic Customization + +If you need to programmatically configure your embedded servlet container, you can register a Spring bean that implements the `WebServerFactoryCustomizer` interface.`WebServerFactoryCustomizer` provides access to the `ConfigurableServletWebServerFactory`, which includes numerous customization setter methods. +The following example shows programmatically setting the port: + +``` +import org.springframework.boot.web.server.WebServerFactoryCustomizer; +import org.springframework.boot.web.servlet.server.ConfigurableServletWebServerFactory; +import org.springframework.stereotype.Component; + +@Component +public class MyWebServerFactoryCustomizer implements WebServerFactoryCustomizer { + + @Override + public void customize(ConfigurableServletWebServerFactory server) { + server.setPort(9000); + } + +} + +``` + +`TomcatServletWebServerFactory`, `JettyServletWebServerFactory` and `UndertowServletWebServerFactory` are dedicated variants of `ConfigurableServletWebServerFactory` that have additional customization setter methods for Tomcat, Jetty and Undertow respectively. +The following example shows how to customize `TomcatServletWebServerFactory` that provides access to Tomcat-specific configuration options: + +``` +import java.time.Duration; + +import org.springframework.boot.web.embedded.tomcat.TomcatServletWebServerFactory; +import org.springframework.boot.web.server.WebServerFactoryCustomizer; +import org.springframework.stereotype.Component; + +@Component +public class MyTomcatWebServerFactoryCustomizer implements WebServerFactoryCustomizer { + + @Override + public void customize(TomcatServletWebServerFactory server) { + server.addConnectorCustomizers((connector) -> connector.setAsyncTimeout(Duration.ofSeconds(20).toMillis())); + } + +} + +``` + +##### Customizing ConfigurableServletWebServerFactory Directly ##### + +For more advanced use cases that require you to extend from `ServletWebServerFactory`, you can expose a bean of such type yourself. + +Setters are provided for many configuration options. +Several protected method “hooks” are also provided should you need to do something more exotic. +See the [source code documentation](https://docs.spring.io/spring-boot/docs/2.6.4/api/org/springframework/boot/web/servlet/server/ConfigurableServletWebServerFactory.html) for details. + +| |Auto-configured customizers are still applied on your custom factory, so use that option carefully.| +|---|---------------------------------------------------------------------------------------------------| + +#### 1.3.5. JSP Limitations + +When running a Spring Boot application that uses an embedded servlet container (and is packaged as an executable archive), there are some limitations in the JSP support. + +* With Jetty and Tomcat, it should work if you use war packaging. + An executable war will work when launched with `java -jar`, and will also be deployable to any standard container. + JSPs are not supported when using an executable jar. + +* Undertow does not support JSPs. + +* Creating a custom `error.jsp` page does not override the default view for [error handling](#web.servlet.spring-mvc.error-handling).[Custom error pages](#web.servlet.spring-mvc.error-handling.error-pages) should be used instead. + +## 2. Reactive Web Applications + +Spring Boot simplifies development of reactive web applications by providing auto-configuration for Spring Webflux. + +### 2.1. The “Spring WebFlux Framework” + +Spring WebFlux is the new reactive web framework introduced in Spring Framework 5.0. +Unlike Spring MVC, it does not require the servlet API, is fully asynchronous and non-blocking, and implements the [Reactive Streams](https://www.reactive-streams.org/) specification through [the Reactor project](https://projectreactor.io/). + +Spring WebFlux comes in two flavors: functional and annotation-based. +The annotation-based one is quite close to the Spring MVC model, as shown in the following example: + +``` +import reactor.core.publisher.Flux; +import reactor.core.publisher.Mono; + +import org.springframework.web.bind.annotation.DeleteMapping; +import org.springframework.web.bind.annotation.GetMapping; +import org.springframework.web.bind.annotation.PathVariable; +import org.springframework.web.bind.annotation.RequestMapping; +import org.springframework.web.bind.annotation.RestController; + +@RestController +@RequestMapping("/users") +public class MyRestController { + + private final UserRepository userRepository; + + private final CustomerRepository customerRepository; + + public MyRestController(UserRepository userRepository, CustomerRepository customerRepository) { + this.userRepository = userRepository; + this.customerRepository = customerRepository; + } + + @GetMapping("/{user}") + public Mono getUser(@PathVariable Long userId) { + return this.userRepository.findById(userId); + } + + @GetMapping("/{user}/customers") + public Flux getUserCustomers(@PathVariable Long userId) { + return this.userRepository.findById(userId).flatMapMany(this.customerRepository::findByUser); + } + + @DeleteMapping("/{user}") + public void deleteUser(@PathVariable Long userId) { + this.userRepository.deleteById(userId); + } + +} + +``` + +“WebFlux.fn”, the functional variant, separates the routing configuration from the actual handling of the requests, as shown in the following example: + +``` +import org.springframework.context.annotation.Bean; +import org.springframework.context.annotation.Configuration; +import org.springframework.http.MediaType; +import org.springframework.web.reactive.function.server.RequestPredicate; +import org.springframework.web.reactive.function.server.RouterFunction; +import org.springframework.web.reactive.function.server.ServerResponse; + +import static org.springframework.web.reactive.function.server.RequestPredicates.accept; +import static org.springframework.web.reactive.function.server.RouterFunctions.route; + +@Configuration(proxyBeanMethods = false) +public class MyRoutingConfiguration { + + private static final RequestPredicate ACCEPT_JSON = accept(MediaType.APPLICATION_JSON); + + @Bean + public RouterFunction monoRouterFunction(MyUserHandler userHandler) { + return route() + .GET("/{user}", ACCEPT_JSON, userHandler::getUser) + .GET("/{user}/customers", ACCEPT_JSON, userHandler::getUserCustomers) + .DELETE("/{user}", ACCEPT_JSON, userHandler::deleteUser) + .build(); + } + +} + +``` + +``` +import reactor.core.publisher.Mono; + +import org.springframework.stereotype.Component; +import org.springframework.web.reactive.function.server.ServerRequest; +import org.springframework.web.reactive.function.server.ServerResponse; + +@Component +public class MyUserHandler { + + public Mono getUser(ServerRequest request) { + ... + } + + public Mono getUserCustomers(ServerRequest request) { + ... + } + + public Mono deleteUser(ServerRequest request) { + ... + } + +} + +``` + +WebFlux is part of the Spring Framework and detailed information is available in its [reference documentation](https://docs.spring.io/spring-framework/docs/5.3.16/reference/html/web-reactive.html#webflux-fn). + +| |You can define as many `RouterFunction` beans as you like to modularize the definition of the router.
Beans can be ordered if you need to apply a precedence.| +|---|-----------------------------------------------------------------------------------------------------------------------------------------------------------------| + +To get started, add the `spring-boot-starter-webflux` module to your application. + +| |Adding both `spring-boot-starter-web` and `spring-boot-starter-webflux` modules in your application results in Spring Boot auto-configuring Spring MVC, not WebFlux.
This behavior has been chosen because many Spring developers add `spring-boot-starter-webflux` to their Spring MVC application to use the reactive `WebClient`.
You can still enforce your choice by setting the chosen application type to `SpringApplication.setWebApplicationType(WebApplicationType.REACTIVE)`.| +|---|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +“WebFlux.fn”, the functional variant, separates the routing configuration from the actual handling of the requests, as shown in the following example: + +``` +import org.springframework.context.annotation.Bean; +import org.springframework.context.annotation.Configuration; +import org.springframework.http.MediaType; +import org.springframework.web.reactive.function.server.RequestPredicate; +import org.springframework.web.reactive.function.server.RouterFunction; +import org.springframework.web.reactive.function.server.ServerResponse; + +import static org.springframework.web.reactive.function.server.RequestPredicates.accept; +import static org.springframework.web.reactive.function.server.RouterFunctions.route; + +@Configuration(proxyBeanMethods = false) +public class MyRoutingConfiguration { + + private static final RequestPredicate ACCEPT_JSON = accept(MediaType.APPLICATION_JSON); + + @Bean + public RouterFunction monoRouterFunction(MyUserHandler userHandler) { + return route() + .GET("/{user}", ACCEPT_JSON, userHandler::getUser) + .GET("/{user}/customers", ACCEPT_JSON, userHandler::getUserCustomers) + .DELETE("/{user}", ACCEPT_JSON, userHandler::deleteUser) + .build(); + } + +} + +``` + +``` +import reactor.core.publisher.Mono; + +import org.springframework.stereotype.Component; +import org.springframework.web.reactive.function.server.ServerRequest; +import org.springframework.web.reactive.function.server.ServerResponse; + +@Component +public class MyUserHandler { + + public Mono getUser(ServerRequest request) { + ... + } + + public Mono getUserCustomers(ServerRequest request) { + ... + } + + public Mono deleteUser(ServerRequest request) { + ... + } + +} + +``` + +WebFlux is part of the Spring Framework and detailed information is available in its [reference documentation](https://docs.spring.io/spring-framework/docs/5.3.16/reference/html/web-reactive.html#webflux-fn). + +| |You can define as many `RouterFunction` beans as you like to modularize the definition of the router.
Beans can be ordered if you need to apply a precedence.| +|---|-----------------------------------------------------------------------------------------------------------------------------------------------------------------| + +To get started, add the `spring-boot-starter-webflux` module to your application. + +| |Adding both `spring-boot-starter-web` and `spring-boot-starter-webflux` modules in your application results in Spring Boot auto-configuring Spring MVC, not WebFlux.
This behavior has been chosen because many Spring developers add `spring-boot-starter-webflux` to their Spring MVC application to use the reactive `WebClient`.
You can still enforce your choice by setting the chosen application type to `SpringApplication.setWebApplicationType(WebApplicationType.REACTIVE)`.| +|---|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +#### 2.1.1. Spring WebFlux Auto-configuration + +Spring Boot provides auto-configuration for Spring WebFlux that works well with most applications. + +The auto-configuration adds the following features on top of Spring’s defaults: + +* Configuring codecs for `HttpMessageReader` and `HttpMessageWriter` instances (described [later in this document](#web.reactive.webflux.httpcodecs)). + +* Support for serving static resources, including support for WebJars (described [later in this document](#web.servlet.spring-mvc.static-content)). + +If you want to keep Spring Boot WebFlux features and you want to add additional [WebFlux configuration](https://docs.spring.io/spring-framework/docs/5.3.16/reference/html/web-reactive.html#webflux-config), you can add your own `@Configuration` class of type `WebFluxConfigurer` but **without** `@EnableWebFlux`. + +If you want to take complete control of Spring WebFlux, you can add your own `@Configuration` annotated with `@EnableWebFlux`. + +#### 2.1.2. HTTP Codecs with HttpMessageReaders and HttpMessageWriters + +Spring WebFlux uses the `HttpMessageReader` and `HttpMessageWriter` interfaces to convert HTTP requests and responses. +They are configured with `CodecConfigurer` to have sensible defaults by looking at the libraries available in your classpath. + +Spring Boot provides dedicated configuration properties for codecs, `spring.codec.*`. +It also applies further customization by using `CodecCustomizer` instances. +For example, `spring.jackson.*` configuration keys are applied to the Jackson codec. + +If you need to add or customize codecs, you can create a custom `CodecCustomizer` component, as shown in the following example: + +``` +import org.springframework.boot.web.codec.CodecCustomizer; +import org.springframework.context.annotation.Bean; +import org.springframework.context.annotation.Configuration; +import org.springframework.http.codec.ServerSentEventHttpMessageReader; + +@Configuration(proxyBeanMethods = false) +public class MyCodecsConfiguration { + + @Bean + public CodecCustomizer myCodecCustomizer() { + return (configurer) -> { + configurer.registerDefaults(false); + configurer.customCodecs().register(new ServerSentEventHttpMessageReader()); + // ... + }; + } + +} + +``` + +You can also leverage [Boot’s custom JSON serializers and deserializers](#web.servlet.spring-mvc.json). + +#### 2.1.3. Static Content + +By default, Spring Boot serves static content from a directory called `/static` (or `/public` or `/resources` or `/META-INF/resources`) in the classpath. +It uses the `ResourceWebHandler` from Spring WebFlux so that you can modify that behavior by adding your own `WebFluxConfigurer` and overriding the `addResourceHandlers` method. + +By default, resources are mapped on `/**`, but you can tune that by setting the `spring.webflux.static-path-pattern` property. +For instance, relocating all resources to `/resources/**` can be achieved as follows: + +Properties + +``` +spring.webflux.static-path-pattern=/resources/** +``` + +Yaml + +``` +spring: + webflux: + static-path-pattern: "/resources/**" +``` + +You can also customize the static resource locations by using `spring.web.resources.static-locations`. +Doing so replaces the default values with a list of directory locations. +If you do so, the default welcome page detection switches to your custom locations. +So, if there is an `index.html` in any of your locations on startup, it is the home page of the application. + +In addition to the “standard” static resource locations listed earlier, a special case is made for [Webjars content](https://www.webjars.org/). +Any resources with a path in `/webjars/**` are served from jar files if they are packaged in the Webjars format. + +| |Spring WebFlux applications do not strictly depend on the servlet API, so they cannot be deployed as war files and do not use the `src/main/webapp` directory.| +|---|--------------------------------------------------------------------------------------------------------------------------------------------------------------| + +#### 2.1.4. Welcome Page + +Spring Boot supports both static and templated welcome pages. +It first looks for an `index.html` file in the configured static content locations. +If one is not found, it then looks for an `index` template. +If either is found, it is automatically used as the welcome page of the application. + +#### 2.1.5. Template Engines + +As well as REST web services, you can also use Spring WebFlux to serve dynamic HTML content. +Spring WebFlux supports a variety of templating technologies, including Thymeleaf, FreeMarker, and Mustache. + +Spring Boot includes auto-configuration support for the following templating engines: + +* [FreeMarker](https://freemarker.apache.org/docs/) + +* [Thymeleaf](https://www.thymeleaf.org) + +* [Mustache](https://mustache.github.io/) + +When you use one of these templating engines with the default configuration, your templates are picked up automatically from `src/main/resources/templates`. + +#### 2.1.6. Error Handling + +Spring Boot provides a `WebExceptionHandler` that handles all errors in a sensible way. +Its position in the processing order is immediately before the handlers provided by WebFlux, which are considered last. +For machine clients, it produces a JSON response with details of the error, the HTTP status, and the exception message. +For browser clients, there is a “whitelabel” error handler that renders the same data in HTML format. +You can also provide your own HTML templates to display errors (see the [next section](#web.reactive.webflux.error-handling.error-pages)). + +The first step to customizing this feature often involves using the existing mechanism but replacing or augmenting the error contents. +For that, you can add a bean of type `ErrorAttributes`. + +To change the error handling behavior, you can implement `ErrorWebExceptionHandler` and register a bean definition of that type. +Because a `ErrorWebExceptionHandler` is quite low-level, Spring Boot also provides a convenient `AbstractErrorWebExceptionHandler` to let you handle errors in a WebFlux functional way, as shown in the following example: + +``` +import reactor.core.publisher.Mono; + +import org.springframework.boot.autoconfigure.web.WebProperties.Resources; +import org.springframework.boot.autoconfigure.web.reactive.error.AbstractErrorWebExceptionHandler; +import org.springframework.boot.web.reactive.error.ErrorAttributes; +import org.springframework.context.ApplicationContext; +import org.springframework.http.HttpStatus; +import org.springframework.http.MediaType; +import org.springframework.stereotype.Component; +import org.springframework.web.reactive.function.server.RouterFunction; +import org.springframework.web.reactive.function.server.RouterFunctions; +import org.springframework.web.reactive.function.server.ServerRequest; +import org.springframework.web.reactive.function.server.ServerResponse; +import org.springframework.web.reactive.function.server.ServerResponse.BodyBuilder; + +@Component +public class MyErrorWebExceptionHandler extends AbstractErrorWebExceptionHandler { + + public MyErrorWebExceptionHandler(ErrorAttributes errorAttributes, Resources resources, + ApplicationContext applicationContext) { + super(errorAttributes, resources, applicationContext); + } + + @Override + protected RouterFunction getRoutingFunction(ErrorAttributes errorAttributes) { + return RouterFunctions.route(this::acceptsXml, this::handleErrorAsXml); + } + + private boolean acceptsXml(ServerRequest request) { + return request.headers().accept().contains(MediaType.APPLICATION_XML); + } + + public Mono handleErrorAsXml(ServerRequest request) { + BodyBuilder builder = ServerResponse.status(HttpStatus.INTERNAL_SERVER_ERROR); + // ... additional builder calls + return builder.build(); + } + +} + +``` + +For a more complete picture, you can also subclass `DefaultErrorWebExceptionHandler` directly and override specific methods. + +In some cases, errors handled at the controller or handler function level are not recorded by the [metrics infrastructure](actuator.html#actuator.metrics.supported.spring-webflux). +Applications can ensure that such exceptions are recorded with the request metrics by setting the handled exception as a request attribute: + +``` +import org.springframework.boot.web.reactive.error.ErrorAttributes; +import org.springframework.stereotype.Controller; +import org.springframework.web.bind.annotation.ExceptionHandler; +import org.springframework.web.bind.annotation.GetMapping; +import org.springframework.web.reactive.result.view.Rendering; +import org.springframework.web.server.ServerWebExchange; + +@Controller +public class MyExceptionHandlingController { + + @GetMapping("/profile") + public Rendering userProfile() { + // ... + throw new IllegalStateException(); + } + + @ExceptionHandler(IllegalStateException.class) + public Rendering handleIllegalState(ServerWebExchange exchange, IllegalStateException exc) { + exchange.getAttributes().putIfAbsent(ErrorAttributes.ERROR_ATTRIBUTE, exc); + return Rendering.view("errorView").modelAttribute("message", exc.getMessage()).build(); + } + +} + +``` + +##### Custom Error Pages + +If you want to display a custom HTML error page for a given status code, you can add a file to an `/error` directory. +Error pages can either be static HTML (that is, added under any of the static resource directories) or built with templates. +The name of the file should be the exact status code or a series mask. + +For example, to map `404` to a static HTML file, your directory structure would be as follows: + +``` +src/ + +- main/ + +- java/ + | + + +- resources/ + +- public/ + +- error/ + | +- 404.html + +- +``` + +To map all `5xx` errors by using a Mustache template, your directory structure would be as follows: + +``` +src/ + +- main/ + +- java/ + | + + +- resources/ + +- templates/ + +- error/ + | +- 5xx.mustache + +- +``` + +#### 2.1.7. Web Filters + +Spring WebFlux provides a `WebFilter` interface that can be implemented to filter HTTP request-response exchanges.`WebFilter` beans found in the application context will be automatically used to filter each exchange. + +Where the order of the filters is important they can implement `Ordered` or be annotated with `@Order`. +Spring Boot auto-configuration may configure web filters for you. +When it does so, the orders shown in the following table will be used: + +| Web Filter | Order | +|---------------------------------------|--------------------------------| +| `MetricsWebFilter` |`Ordered.HIGHEST_PRECEDENCE + 1`| +|`WebFilterChainProxy` (Spring Security)| `-100` | +| `HttpTraceWebFilter` |`Ordered.LOWEST_PRECEDENCE - 10`| + +### 2.2. Embedded Reactive Server Support + +Spring Boot includes support for the following embedded reactive web servers: Reactor Netty, Tomcat, Jetty, and Undertow. +Most developers use the appropriate “Starter” to obtain a fully configured instance. +By default, the embedded server listens for HTTP requests on port 8080. + +### 2.3. Reactive Server Resources Configuration + +When auto-configuring a Reactor Netty or Jetty server, Spring Boot will create specific beans that will provide HTTP resources to the server instance: `ReactorResourceFactory` or `JettyResourceFactory`. + +By default, those resources will be also shared with the Reactor Netty and Jetty clients for optimal performances, given: + +* the same technology is used for server and client + +* the client instance is built using the `WebClient.Builder` bean auto-configured by Spring Boot + +Developers can override the resource configuration for Jetty and Reactor Netty by providing a custom `ReactorResourceFactory` or `JettyResourceFactory` bean - this will be applied to both clients and servers. + +You can learn more about the resource configuration on the client side in the [WebClient Runtime section](io.html#io.rest-client.webclient.runtime). + +## 3. Graceful Shutdown + +Graceful shutdown is supported with all four embedded web servers (Jetty, Reactor Netty, Tomcat, and Undertow) and with both reactive and servlet-based web applications. +It occurs as part of closing the application context and is performed in the earliest phase of stopping `SmartLifecycle` beans. +This stop processing uses a timeout which provides a grace period during which existing requests will be allowed to complete but no new requests will be permitted. +The exact way in which new requests are not permitted varies depending on the web server that is being used. +Jetty, Reactor Netty, and Tomcat will stop accepting requests at the network layer. +Undertow will accept requests but respond immediately with a service unavailable (503) response. + +| |Graceful shutdown with Tomcat requires Tomcat 9.0.33 or later.| +|---|--------------------------------------------------------------| + +To enable graceful shutdown, configure the `server.shutdown` property, as shown in the following example: + +Properties + +``` +server.shutdown=graceful +``` + +Yaml + +``` +server: + shutdown: "graceful" +``` + +To configure the timeout period, configure the `spring.lifecycle.timeout-per-shutdown-phase` property, as shown in the following example: + +Properties + +``` +spring.lifecycle.timeout-per-shutdown-phase=20s +``` + +Yaml + +``` +spring: + lifecycle: + timeout-per-shutdown-phase: "20s" +``` + +| |Using graceful shutdown with your IDE may not work properly if it does not send a proper `SIGTERM` signal.
See the documentation of your IDE for more details.| +|---|------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +## 4. Spring Security + +If [Spring Security](https://spring.io/projects/spring-security) is on the classpath, then web applications are secured by default. +Spring Boot relies on Spring Security’s content-negotiation strategy to determine whether to use `httpBasic` or `formLogin`. +To add method-level security to a web application, you can also add `@EnableGlobalMethodSecurity` with your desired settings. +Additional information can be found in the [Spring Security Reference Guide](https://docs.spring.io/spring-security/reference/5.6.2/servlet/authorization/method-security.html). + +The default `UserDetailsService` has a single user. +The user name is `user`, and the password is random and is printed at INFO level when the application starts, as shown in the following example: + +``` +Using generated security password: 78fa095d-3f4c-48b1-ad50-e24c31d5cf35 +``` + +| |If you fine-tune your logging configuration, ensure that the `org.springframework.boot.autoconfigure.security` category is set to log `INFO`-level messages.
Otherwise, the default password is not printed.| +|---|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +You can change the username and password by providing a `spring.security.user.name` and `spring.security.user.password`. + +The basic features you get by default in a web application are: + +* A `UserDetailsService` (or `ReactiveUserDetailsService` in case of a WebFlux application) bean with in-memory store and a single user with a generated password (see [`SecurityProperties.User`](https://docs.spring.io/spring-boot/docs/2.6.4/api/org/springframework/boot/autoconfigure/security/SecurityProperties.User.html) for the properties of the user). + +* Form-based login or HTTP Basic security (depending on the `Accept` header in the request) for the entire application (including actuator endpoints if actuator is on the classpath). + +* A `DefaultAuthenticationEventPublisher` for publishing authentication events. + +You can provide a different `AuthenticationEventPublisher` by adding a bean for it. + +### 4.1. MVC Security + +The default security configuration is implemented in `SecurityAutoConfiguration` and `UserDetailsServiceAutoConfiguration`.`SecurityAutoConfiguration` imports `SpringBootWebSecurityConfiguration` for web security and `UserDetailsServiceAutoConfiguration` configures authentication, which is also relevant in non-web applications. +To switch off the default web application security configuration completely or to combine multiple Spring Security components such as OAuth2 Client and Resource Server, add a bean of type `SecurityFilterChain` (doing so does not disable the `UserDetailsService` configuration or Actuator’s security). + +To also switch off the `UserDetailsService` configuration, you can add a bean of type `UserDetailsService`, `AuthenticationProvider`, or `AuthenticationManager`. + +Access rules can be overridden by adding a custom `SecurityFilterChain` or `WebSecurityConfigurerAdapter` bean. +Spring Boot provides convenience methods that can be used to override access rules for actuator endpoints and static resources.`EndpointRequest` can be used to create a `RequestMatcher` that is based on the `management.endpoints.web.base-path` property.`PathRequest` can be used to create a `RequestMatcher` for resources in commonly used locations. + +### 4.2. WebFlux Security + +Similar to Spring MVC applications, you can secure your WebFlux applications by adding the `spring-boot-starter-security` dependency. +The default security configuration is implemented in `ReactiveSecurityAutoConfiguration` and `UserDetailsServiceAutoConfiguration`.`ReactiveSecurityAutoConfiguration` imports `WebFluxSecurityConfiguration` for web security and `UserDetailsServiceAutoConfiguration` configures authentication, which is also relevant in non-web applications. +To switch off the default web application security configuration completely, you can add a bean of type `WebFilterChainProxy` (doing so does not disable the `UserDetailsService` configuration or Actuator’s security). + +To also switch off the `UserDetailsService` configuration, you can add a bean of type `ReactiveUserDetailsService` or `ReactiveAuthenticationManager`. + +Access rules and the use of multiple Spring Security components such as OAuth 2 Client and Resource Server can be configured by adding a custom `SecurityWebFilterChain` bean. +Spring Boot provides convenience methods that can be used to override access rules for actuator endpoints and static resources.`EndpointRequest` can be used to create a `ServerWebExchangeMatcher` that is based on the `management.endpoints.web.base-path` property. + +`PathRequest` can be used to create a `ServerWebExchangeMatcher` for resources in commonly used locations. + +For example, you can customize your security configuration by adding something like: + +``` +import org.springframework.boot.autoconfigure.security.reactive.PathRequest; +import org.springframework.context.annotation.Bean; +import org.springframework.context.annotation.Configuration; +import org.springframework.security.config.web.server.ServerHttpSecurity; +import org.springframework.security.web.server.SecurityWebFilterChain; + +@Configuration(proxyBeanMethods = false) +public class MyWebFluxSecurityConfiguration { + + @Bean + public SecurityWebFilterChain springSecurityFilterChain(ServerHttpSecurity http) { + http.authorizeExchange((spec) -> { + spec.matchers(PathRequest.toStaticResources().atCommonLocations()).permitAll(); + spec.pathMatchers("/foo", "/bar").authenticated(); + }); + http.formLogin(); + return http.build(); + } + +} + +``` + +### 4.3. OAuth2 + +[OAuth2](https://oauth.net/2/) is a widely used authorization framework that is supported by Spring. + +#### 4.3.1. Client + +If you have `spring-security-oauth2-client` on your classpath, you can take advantage of some auto-configuration to set up an OAuth2/Open ID Connect clients. +This configuration makes use of the properties under `OAuth2ClientProperties`. +The same properties are applicable to both servlet and reactive applications. + +You can register multiple OAuth2 clients and providers under the `spring.security.oauth2.client` prefix, as shown in the following example: + +Properties + +``` +spring.security.oauth2.client.registration.my-client-1.client-id=abcd +spring.security.oauth2.client.registration.my-client-1.client-secret=password +spring.security.oauth2.client.registration.my-client-1.client-name=Client for user scope +spring.security.oauth2.client.registration.my-client-1.provider=my-oauth-provider +spring.security.oauth2.client.registration.my-client-1.scope=user +spring.security.oauth2.client.registration.my-client-1.redirect-uri=https://my-redirect-uri.com +spring.security.oauth2.client.registration.my-client-1.client-authentication-method=basic +spring.security.oauth2.client.registration.my-client-1.authorization-grant-type=authorization-code + +spring.security.oauth2.client.registration.my-client-2.client-id=abcd +spring.security.oauth2.client.registration.my-client-2.client-secret=password +spring.security.oauth2.client.registration.my-client-2.client-name=Client for email scope +spring.security.oauth2.client.registration.my-client-2.provider=my-oauth-provider +spring.security.oauth2.client.registration.my-client-2.scope=email +spring.security.oauth2.client.registration.my-client-2.redirect-uri=https://my-redirect-uri.com +spring.security.oauth2.client.registration.my-client-2.client-authentication-method=basic +spring.security.oauth2.client.registration.my-client-2.authorization-grant-type=authorization_code + +spring.security.oauth2.client.provider.my-oauth-provider.authorization-uri=https://my-auth-server/oauth/authorize +spring.security.oauth2.client.provider.my-oauth-provider.token-uri=https://my-auth-server/oauth/token +spring.security.oauth2.client.provider.my-oauth-provider.user-info-uri=https://my-auth-server/userinfo +spring.security.oauth2.client.provider.my-oauth-provider.user-info-authentication-method=header +spring.security.oauth2.client.provider.my-oauth-provider.jwk-set-uri=https://my-auth-server/token_keys +spring.security.oauth2.client.provider.my-oauth-provider.user-name-attribute=name +``` + +Yaml + +``` +spring: + security: + oauth2: + client: + registration: + my-client-1: + client-id: "abcd" + client-secret: "password" + client-name: "Client for user scope" + provider: "my-oauth-provider" + scope: "user" + redirect-uri: "https://my-redirect-uri.com" + client-authentication-method: "basic" + authorization-grant-type: "authorization-code" + + my-client-2: + client-id: "abcd" + client-secret: "password" + client-name: "Client for email scope" + provider: "my-oauth-provider" + scope: "email" + redirect-uri: "https://my-redirect-uri.com" + client-authentication-method: "basic" + authorization-grant-type: "authorization_code" + + provider: + my-oauth-provider: + authorization-uri: "https://my-auth-server/oauth/authorize" + token-uri: "https://my-auth-server/oauth/token" + user-info-uri: "https://my-auth-server/userinfo" + user-info-authentication-method: "header" + jwk-set-uri: "https://my-auth-server/token_keys" + user-name-attribute: "name" +``` + +For OpenID Connect providers that support [OpenID Connect discovery](https://openid.net/specs/openid-connect-discovery-1_0.html), the configuration can be further simplified. +The provider needs to be configured with an `issuer-uri` which is the URI that the it asserts as its Issuer Identifier. +For example, if the `issuer-uri` provided is "https://example.com", then an `OpenID Provider Configuration Request` will be made to "https://example.com/.well-known/openid-configuration". +The result is expected to be an `OpenID Provider Configuration Response`. +The following example shows how an OpenID Connect Provider can be configured with the `issuer-uri`: + +Properties + +``` +spring.security.oauth2.client.provider.oidc-provider.issuer-uri=https://dev-123456.oktapreview.com/oauth2/default/ +``` + +Yaml + +``` +spring: + security: + oauth2: + client: + provider: + oidc-provider: + issuer-uri: "https://dev-123456.oktapreview.com/oauth2/default/" +``` + +By default, Spring Security’s `OAuth2LoginAuthenticationFilter` only processes URLs matching `/login/oauth2/code/*`. +If you want to customize the `redirect-uri` to use a different pattern, you need to provide configuration to process that custom pattern. +For example, for servlet applications, you can add your own `SecurityFilterChain` that resembles the following: + +``` +import org.springframework.context.annotation.Bean; +import org.springframework.context.annotation.Configuration; +import org.springframework.security.config.annotation.web.builders.HttpSecurity; +import org.springframework.security.web.SecurityFilterChain; + +@Configuration(proxyBeanMethods = false) +public class MyOAuthClientConfiguration { + + @Bean + public SecurityFilterChain securityFilterChain(HttpSecurity http) throws Exception { + http.authorizeRequests().anyRequest().authenticated(); + http.oauth2Login().redirectionEndpoint().baseUri("custom-callback"); + return http.build(); + } + +} + +``` + +| |Spring Boot auto-configures an `InMemoryOAuth2AuthorizedClientService` which is used by Spring Security for the management of client registrations.
The `InMemoryOAuth2AuthorizedClientService` has limited capabilities and we recommend using it only for development environments.
For production environments, consider using a `JdbcOAuth2AuthorizedClientService` or creating your own implementation of `OAuth2AuthorizedClientService`.| +|---|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +##### OAuth2 client registration for common providers + +For common OAuth2 and OpenID providers, including Google, Github, Facebook, and Okta, we provide a set of provider defaults (`google`, `github`, `facebook`, and `okta`, respectively). + +If you do not need to customize these providers, you can set the `provider` attribute to the one for which you need to infer defaults. +Also, if the key for the client registration matches a default supported provider, Spring Boot infers that as well. + +In other words, the two configurations in the following example use the Google provider: + +Properties + +``` +spring.security.oauth2.client.registration.my-client.client-id=abcd +spring.security.oauth2.client.registration.my-client.client-secret=password +spring.security.oauth2.client.registration.my-client.provider=google +spring.security.oauth2.client.registration.google.client-id=abcd +spring.security.oauth2.client.registration.google.client-secret=password +``` + +Yaml + +``` +spring: + security: + oauth2: + client: + registration: + my-client: + client-id: "abcd" + client-secret: "password" + provider: "google" + google: + client-id: "abcd" + client-secret: "password" +``` + +#### 4.3.2. Resource Server + +If you have `spring-security-oauth2-resource-server` on your classpath, Spring Boot can set up an OAuth2 Resource Server. +For JWT configuration, a JWK Set URI or OIDC Issuer URI needs to be specified, as shown in the following examples: + +Properties + +``` +spring.security.oauth2.resourceserver.jwt.jwk-set-uri=https://example.com/oauth2/default/v1/keys +``` + +Yaml + +``` +spring: + security: + oauth2: + resourceserver: + jwt: + jwk-set-uri: "https://example.com/oauth2/default/v1/keys" +``` + +Properties + +``` +spring.security.oauth2.resourceserver.jwt.issuer-uri=https://dev-123456.oktapreview.com/oauth2/default/ +``` + +Yaml + +``` +spring: + security: + oauth2: + resourceserver: + jwt: + issuer-uri: "https://dev-123456.oktapreview.com/oauth2/default/" +``` + +| |If the authorization server does not support a JWK Set URI, you can configure the resource server with the Public Key used for verifying the signature of the JWT.
This can be done using the `spring.security.oauth2.resourceserver.jwt.public-key-location` property, where the value needs to point to a file containing the public key in the PEM-encoded x509 format.| +|---|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +The same properties are applicable for both servlet and reactive applications. + +Alternatively, you can define your own `JwtDecoder` bean for servlet applications or a `ReactiveJwtDecoder` for reactive applications. + +In cases where opaque tokens are used instead of JWTs, you can configure the following properties to validate tokens through introspection: + +Properties + +``` +spring.security.oauth2.resourceserver.opaquetoken.introspection-uri=https://example.com/check-token +spring.security.oauth2.resourceserver.opaquetoken.client-id=my-client-id +spring.security.oauth2.resourceserver.opaquetoken.client-secret=my-client-secret +``` + +Yaml + +``` +spring: + security: + oauth2: + resourceserver: + opaquetoken: + introspection-uri: "https://example.com/check-token" + client-id: "my-client-id" + client-secret: "my-client-secret" +``` + +Again, the same properties are applicable for both servlet and reactive applications. + +Alternatively, you can define your own `OpaqueTokenIntrospector` bean for servlet applications or a `ReactiveOpaqueTokenIntrospector` for reactive applications. + +#### 4.3.3. Authorization Server + +Currently, Spring Security does not provide support for implementing an OAuth 2.0 Authorization Server. +However, this functionality is available from the [Spring Security OAuth](https://spring.io/projects/spring-security-oauth) project, which will eventually be superseded by Spring Security completely. +Until then, you can use the `spring-security-oauth2-autoconfigure` module to easily set up an OAuth 2.0 authorization server; see its [documentation](https://docs.spring.io/spring-security-oauth2-boot/) for instructions. + +### 4.4. SAML 2.0 + +#### 4.4.1. Relying Party + +If you have `spring-security-saml2-service-provider` on your classpath, you can take advantage of some auto-configuration to set up a SAML 2.0 Relying Party. +This configuration makes use of the properties under `Saml2RelyingPartyProperties`. + +A relying party registration represents a paired configuration between an Identity Provider, IDP, and a Service Provider, SP. +You can register multiple relying parties under the `spring.security.saml2.relyingparty` prefix, as shown in the following example: + +Properties + +``` +spring.security.saml2.relyingparty.registration.my-relying-party1.signing.credentials[0].private-key-location=path-to-private-key +spring.security.saml2.relyingparty.registration.my-relying-party1.signing.credentials[0].certificate-location=path-to-certificate +spring.security.saml2.relyingparty.registration.my-relying-party1.decryption.credentials[0].private-key-location=path-to-private-key +spring.security.saml2.relyingparty.registration.my-relying-party1.decryption.credentials[0].certificate-location=path-to-certificate +spring.security.saml2.relyingparty.registration.my-relying-party1.identityprovider.verification.credentials[0].certificate-location=path-to-verification-cert +spring.security.saml2.relyingparty.registration.my-relying-party1.identityprovider.entity-id=remote-idp-entity-id1 +spring.security.saml2.relyingparty.registration.my-relying-party1.identityprovider.sso-url=https://remoteidp1.sso.url + +spring.security.saml2.relyingparty.registration.my-relying-party2.signing.credentials[0].private-key-location=path-to-private-key +spring.security.saml2.relyingparty.registration.my-relying-party2.signing.credentials[0].certificate-location=path-to-certificate +spring.security.saml2.relyingparty.registration.my-relying-party2.decryption.credentials[0].private-key-location=path-to-private-key +spring.security.saml2.relyingparty.registration.my-relying-party2.decryption.credentials[0].certificate-location=path-to-certificate +spring.security.saml2.relyingparty.registration.my-relying-party2.identityprovider.verification.credentials[0].certificate-location=path-to-other-verification-cert +spring.security.saml2.relyingparty.registration.my-relying-party2.identityprovider.entity-id=remote-idp-entity-id2 +spring.security.saml2.relyingparty.registration.my-relying-party2.identityprovider.sso-url=https://remoteidp2.sso.url +``` + +Yaml + +``` +spring: + security: + saml2: + relyingparty: + registration: + my-relying-party1: + signing: + credentials: + - private-key-location: "path-to-private-key" + certificate-location: "path-to-certificate" + decryption: + credentials: + - private-key-location: "path-to-private-key" + certificate-location: "path-to-certificate" + identityprovider: + verification: + credentials: + - certificate-location: "path-to-verification-cert" + entity-id: "remote-idp-entity-id1" + sso-url: "https://remoteidp1.sso.url" + + my-relying-party2: + signing: + credentials: + - private-key-location: "path-to-private-key" + certificate-location: "path-to-certificate" + decryption: + credentials: + - private-key-location: "path-to-private-key" + certificate-location: "path-to-certificate" + identityprovider: + verification: + credentials: + - certificate-location: "path-to-other-verification-cert" + entity-id: "remote-idp-entity-id2" + sso-url: "https://remoteidp2.sso.url" +``` + +## 5. Spring Session + +Spring Boot provides [Spring Session](https://spring.io/projects/spring-session) auto-configuration for a wide range of data stores. +When building a servlet web application, the following stores can be auto-configured: + +* JDBC + +* Redis + +* Hazelcast + +* MongoDB + +The servlet auto-configuration replaces the need to use `@Enable*HttpSession`. + +When building a reactive web application, the following stores can be auto-configured: + +* Redis + +* MongoDB + +The reactive auto-configuration replaces the need to use `@Enable*WebSession`. + +If a single Spring Session module is present on the classpath, Spring Boot uses that store implementation automatically. +If you have more than one implementation, you must choose the [`StoreType`](https://github.com/spring-projects/spring-boot/tree/v2.6.4/spring-boot-project/spring-boot-autoconfigure/src/main/java/org/springframework/boot/autoconfigure/session/StoreType.java) that you wish to use to store the sessions. +For instance, to use JDBC as the back-end store, you can configure your application as follows: + +Properties + +``` +spring.session.store-type=jdbc +``` + +Yaml + +``` +spring: + session: + store-type: "jdbc" +``` + +| |You can disable Spring Session by setting the `store-type` to `none`.| +|---|---------------------------------------------------------------------| + +Each store has specific additional settings. +For instance, it is possible to customize the name of the table for the JDBC store, as shown in the following example: + +Properties + +``` +spring.session.jdbc.table-name=SESSIONS +``` + +Yaml + +``` +spring: + session: + jdbc: + table-name: "SESSIONS" +``` + +For setting the timeout of the session you can use the `spring.session.timeout` property. +If that property is not set with a servlet web application, the auto-configuration falls back to the value of `server.servlet.session.timeout`. + +You can take control over Spring Session’s configuration using `@Enable*HttpSession` (servlet) or `@Enable*WebSession` (reactive). +This will cause the auto-configuration to back off. +Spring Session can then be configured using the annotation’s attributes rather than the previously described configuration properties. + +## 6. Spring HATEOAS + +If you develop a RESTful API that makes use of hypermedia, Spring Boot provides auto-configuration for Spring HATEOAS that works well with most applications. +The auto-configuration replaces the need to use `@EnableHypermediaSupport` and registers a number of beans to ease building hypermedia-based applications, including a `LinkDiscoverers` (for client side support) and an `ObjectMapper` configured to correctly marshal responses into the desired representation. +The `ObjectMapper` is customized by setting the various `spring.jackson.*` properties or, if one exists, by a `Jackson2ObjectMapperBuilder` bean. + +You can take control of Spring HATEOAS’s configuration by using `@EnableHypermediaSupport`. +Note that doing so disables the `ObjectMapper` customization described earlier. + +| |`spring-boot-starter-hateoas` is specific to Spring MVC and should not be combined with Spring WebFlux.
In order to use Spring HATEOAS with Spring WebFlux, you can add a direct dependency on `org.springframework.hateoas:spring-hateoas` along with `spring-boot-starter-webflux`.| +|---|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +## 7. What to Read Next + +You should now have a good understanding of how to develop web applications with Spring Boot. +The next few sections describe how Spring Boot integrates with various [data technologies](data.html#data), [messaging systems](messaging.html#messaging), and other IO capabilities. +You can pick any of these based on your application’s needs. diff --git a/docs/en/spring-cloud-data-flow/README.md b/docs/en/spring-cloud-data-flow/README.md new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/docs/en/spring-cloud-data-flow/spring-cloud-dataflow.md b/docs/en/spring-cloud-data-flow/spring-cloud-dataflow.md new file mode 100644 index 0000000000000000000000000000000000000000..b7d44ed08aff07617ef518b4f11db14b1c613066 --- /dev/null +++ b/docs/en/spring-cloud-data-flow/spring-cloud-dataflow.md @@ -0,0 +1,12991 @@ +# Spring Cloud Data Flow Reference Guid + +# Preface + +## 1. About the documentation + +The documentation for this release is available in [HTML](https://docs.spring.io/spring-cloud-dataflow/docs/2.9.2/reference/htmlsingle). + +The latest copy of the Spring Cloud Data Flow reference guide can be found [here](https://docs.spring.io/spring-cloud-dataflow/docs/current-SNAPSHOT/reference/html/). + +Copies of this document may be made for your own use and for +distribution to others, provided that you do not charge any fee for such copies and +further provided that each copy contains this Copyright Notice, whether distributed in +print or electronically. + +## 2. Getting help + +Having trouble with Spring Cloud Data Flow? We would like to help! + +* Ask a question. We monitor [stackoverflow.com](https://stackoverflow.com) for questions + tagged with [`spring-cloud-dataflow`](https://stackoverflow.com/tags/spring-cloud-dataflow). + +* Report bugs with Spring Cloud Data Flow at [github.com/spring-cloud/spring-cloud-dataflow/issues](https://github.com/spring-cloud/spring-cloud-dataflow/issues). + +* Chat with the community and developers on [Gitter](https://gitter.im/spring-cloud/spring-cloud-dataflow). + +| |All of Spring Cloud Data Flow is open source, including the documentation! If you find problems
with the docs or if you just want to improve them, please [get involved](https://github.com/spring-cloud/spring-cloud-dataflow).| +|---|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +# Getting Started + +## 3. Getting Started - Local + +See the [Local Machine](https://dataflow.spring.io/docs/installation/local/) section of the microsite for more information on setting up docker compose and manual installation. + +Once you have the Data Flow server installed locally, you probably want to get started with orchestrating the deployment of readily available pre-built applications into coherent streaming or batch data pipelines. We have guides to help you get started with both [Stream](https://dataflow.spring.io/docs/stream-developer-guides/) and [Batch](https://dataflow.spring.io/docs/batch-developer-guides/) processing. + +## 4. Getting Started - Cloud Foundry + +This section covers how to get started with Spring Cloud Data Flow on Cloud Foundry. See the [Cloud Foundry](https://dataflow.spring.io/docs/installation/cloudfoundry/) section of the microsite for more information on installing Spring Cloud Data Flow on Cloud Foundry. + +Once you have the Data Flow server installed on Cloud Foundry, you probably want to get started with orchestrating the deployment of readily available pre-built applications into coherent streaming or batch data pipelines. We have guides to help you get started with both [Stream](https://dataflow.spring.io/docs/stream-developer-guides/) and [Batch](https://dataflow.spring.io/docs/batch-developer-guides/) processing. + +## 5. Getting Started - Kubernetes + +[Spring Cloud Data Flow](https://cloud.spring.io/spring-cloud-dataflow/) is a toolkit for building data integration and real-time data-processing pipelines. + +Pipelines consist of Spring Boot applications built with the Spring Cloud Stream or Spring Cloud Task microservice frameworks. +This makes Spring Cloud Data Flow suitable for a range of data-processing use cases, from import-export to event streaming and predictive analytics. + +This project provides support for using Spring Cloud Data Flow with Kubernetes as the runtime for these pipelines, with applications packaged as Docker images. + +See the [Kubernetes](https://dataflow.spring.io/docs/installation/kubernetes/) section of the microsite for more information on installing Spring Cloud Data Flow on Kubernetes. + +Once you have the Data Flow server installed on Kubernetes, you probably want to get started with orchestrating the deployment of readily available pre-built applications into a coherent streaming or batch data pipelines. We have guides to help you get started with both [Stream](https://dataflow.spring.io/docs/stream-developer-guides/) and [Batch](https://dataflow.spring.io/docs/batch-developer-guides/) processing. + +### 5.1. Application and Server Properties + +This section covers how you can customize the deployment of your applications. You can use a number of properties to influence settings for the applications that are deployed. Properties can be applied on a per-application basis or in the appropriate server configuration for all deployed applications. + +| |Properties set on a per-application basis always take precedence over properties set as the server configuration. This arrangement lets you override global server level properties on a per-application basis.| +|---|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +Properties to be applied for all deployed Tasks are defined in the `src/kubernetes/server/server-config-[binder].yaml` file and for Streams in `src/kubernetes/skipper/skipper-config-[binder].yaml`. Replace `[binder]` with the messaging middleware you are using — for example, `rabbit` or `kafka`. + +#### 5.1.1. Memory and CPU Settings + +Applications are deployed with default memory and CPU settings. If you need to, you can adjust these values. The following example shows how to set `Limits` to `1000m` for `CPU` and `1024Mi` for memory and `Requests` to `800m` for CPU and `640Mi` for memory: + +``` +deployer..kubernetes.limits.cpu=1000m +deployer..kubernetes.limits.memory=1024Mi +deployer..kubernetes.requests.cpu=800m +deployer..kubernetes.requests.memory=640Mi +``` + +Those values results in the following container settings being used: + +``` +Limits: + cpu: 1 + memory: 1Gi +Requests: + cpu: 800m + memory: 640Mi +``` + +You can also control the default values to which to set the `cpu` and `memory` globally. + +The following example shows how to set the CPU and memory for streams: + +``` +data: + application.yaml: |- + spring: + cloud: + skipper: + server: + platform: + kubernetes: + accounts: + default: + limits: + memory: 640mi + cpu: 500m +``` + +The following example shows how to set the CPU and memory for tasks: + +``` +data: + application.yaml: |- + spring: + cloud: + dataflow: + task: + platform: + kubernetes: + accounts: + default: + limits: + memory: 640mi + cpu: 500m +``` + +The settings we have used so far affect only the settings for the container. They do not affect the memory setting for the JVM process in the container. If you would like to set JVM memory settings, you can set an environment variable to do so. See the next section for details. + +#### 5.1.2. Environment Variables + +To influence the environment settings for a given application, you can use the `spring.cloud.deployer.kubernetes.environmentVariables` deployer property. +For example, a common requirement in production settings is to influence the JVM memory arguments. +You can do so by using the `JAVA_TOOL_OPTIONS` environment variable, as the following example shows: + +``` +deployer..kubernetes.environmentVariables=JAVA_TOOL_OPTIONS=-Xmx1024m +``` + +| |The `environmentVariables` property accepts a comma-delimited string. If an environment variable contains a value
that is also a comma-delimited string, it must be enclosed in single quotation marks — for example,`spring.cloud.deployer.kubernetes.environmentVariables=spring.cloud.stream.kafka.binder.brokers='somehost:9092,
anotherhost:9093'`| +|---|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +This overrides the JVM memory setting for the desired `` (replace `` with the name of your application). + +#### 5.1.3. Liveness and Readiness Probes + +The `liveness` and `readiness` probes use paths called `/health` and `/info`, respectively. They use a `delay` of `10` for both and a `period` of `60` and `10` respectively. You can change these defaults when you deploy the stream by using deployer properties. The liveness and readiness probes are applied only to streams. + +The following example changes the `liveness` probe (replace `` with the name of your application) by setting deployer properties: + +``` +deployer..kubernetes.livenessProbePath=/health +deployer..kubernetes.livenessProbeDelay=120 +deployer..kubernetes.livenessProbePeriod=20 +``` + +You can declare the same as part of the server global configuration for streams, as the following example shows: + +``` +data: + application.yaml: |- + spring: + cloud: + skipper: + server: + platform: + kubernetes: + accounts: + default: + livenessProbePath: /health + livenessProbeDelay: 120 + livenessProbePeriod: 20 +``` + +Similarly, you can swap `liveness` for `readiness` to override the default `readiness` settings. + +By default, port 8080 is used as the probe port. You can change the defaults for both `liveness` and `readiness` probe ports by using deployer properties, as the following example shows: + +``` +deployer..kubernetes.readinessProbePort=7000 +deployer..kubernetes.livenessProbePort=7000 +``` + +You can declare the same as part of the global configuration for streams, as the following example shows: + +``` +data: + application.yaml: |- + spring: + cloud: + skipper: + server: + platform: + kubernetes: + accounts: + default: + readinessProbePort: 7000 + livenessProbePort: 7000 +``` + +| |By default, the `liveness` and `readiness` probe paths use Spring Boot 2.x+ actuator endpoints. To use Spring Boot 1.x actuator endpoint paths, you must adjust the `liveness` and `readiness` values, as the following example shows (replace `` with the name of your application):

```
deployer..kubernetes.livenessProbePath=/health
deployer..kubernetes.readinessProbePath=/info
```

To automatically set both `liveness` and `readiness` endpoints on a per-application basis to the default Spring Boot 1.x paths, you can set the following property:

```
deployer..kubernetes.bootMajorVersion=1
```| +|---|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +You can access secured probe endpoints by using credentials stored in a [Kubernetes secret](https://kubernetes.io/docs/concepts/configuration/secret/). You can use an existing secret, provided the credentials are contained under the `credentials` key name of the secret’s `data` block. You can configure probe authentication on a per-application basis. When enabled, it is applied to both the `liveness` and `readiness` probe endpoints by using the same credentials and authentication type. Currently, only `Basic` authentication is supported. + +To create a new secret: + +1. Generate the base64 string with the credentials used to access the secured probe endpoints. + + Basic authentication encodes a username and a password as a base64 string in the format of `username:password`. + + The following example (which includes output and in which you should replace `user` and `pass` with your values) shows how to generate a base64 string: + + ``` + $ echo -n "user:pass" | base64 + dXNlcjpwYXNz + ``` + +2. With the encoded credentials, create a file (for example, `myprobesecret.yml`) with the following contents: + + ``` + apiVersion: v1 + kind: Secret + metadata: + name: myprobesecret + type: Opaque + data: + credentials: GENERATED_BASE64_STRING + ``` + +3. Replace `GENERATED_BASE64_STRING` with the base64-encoded value generated earlier. + +4. Create the secret by using `kubectl`, as the following example shows: + + ``` + $ kubectl create -f ./myprobesecret.yml + secret "myprobesecret" created + ``` + +5. Set the following deployer properties to use authentication when accessing probe endpoints, as the following example shows: + + ``` + deployer..kubernetes.probeCredentialsSecret=myprobesecret + ``` + + Replace `` with the name of the application to which to apply authentication. + +#### 5.1.4. Using `SPRING_APPLICATION_JSON` + +You can use a `SPRING_APPLICATION_JSON` environment variable to set Data Flow server properties (including the configuration of Maven repository settings) that are common across all of the Data Flow server implementations. These settings go at the server level in the container `env` section of a deployment YAML. The following example shows how to do so: + +``` +env: +- name: SPRING_APPLICATION_JSON + value: "{ \"maven\": { \"local-repository\": null, \"remote-repositories\": { \"repo1\": { \"url\": \"https://repo.spring.io/libs-snapshot\"} } } }" +``` + +#### 5.1.5. Private Docker Registry + +You can pull Docker images from a private registry on a per-application basis. First, you must create a secret in the cluster. Follow the [Pull an Image from a Private Registry](https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/) guide to create the secret. + +Once you have created the secret, you can use the `imagePullSecret` property to set the secret to use, as the following example shows: + +``` +deployer..kubernetes.imagePullSecret=mysecret +``` + +Replace `` with the name of your application and `mysecret` with the name of the secret you created earlier. + +You can also configure the image pull secret at the global server level. + +The following example shows how to do so for streams: + +``` +data: + application.yaml: |- + spring: + cloud: + skipper: + server: + platform: + kubernetes: + accounts: + default: + imagePullSecret: mysecret +``` + +The following example shows how to do so for tasks: + +``` +data: + application.yaml: |- + spring: + cloud: + dataflow: + task: + platform: + kubernetes: + accounts: + default: + imagePullSecret: mysecret +``` + +Replace `mysecret` with the name of the secret you created earlier. + +#### 5.1.6. Annotations + +You can add annotations to Kubernetes objects on a per-application basis. The supported object types are pod `Deployment`, `Service`, and `Job`. Annotations are defined in a `key:value` format, allowing for multiple annotations separated by a comma. For more information and use cases on annotations, see [Annotations](https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/). + +The following example shows how you can configure applications to use annotations: + +``` +deployer..kubernetes.podAnnotations=annotationName:annotationValue +deployer..kubernetes.serviceAnnotations=annotationName:annotationValue,annotationName2:annotationValue2 +deployer..kubernetes.jobAnnotations=annotationName:annotationValue +``` + +Replace `` with the name of your application and the value of your annotations. + +#### 5.1.7. Entry Point Style + +An entry point style affects how application properties are passed to the container to be deployed. Currently, three styles are supported: + +* `exec` (default): Passes all application properties and command line arguments in the deployment request as container arguments. Application properties are transformed into the format of `--key=value`. + +* `shell`: Passes all application properties and command line arguments as environment variables. Each of the applicationor command-line argument properties is transformed into an uppercase string and `.` characters are replaced with `_`. + +* `boot`: Creates an environment variable called `SPRING_APPLICATION_JSON` that contains a JSON representation of all application properties. Command line arguments from the deployment request are set as container args. + +| |In all cases, environment variables defined at the server-level configuration and on a per-application basis are sent on to the container as is.| +|---|------------------------------------------------------------------------------------------------------------------------------------------------| + +You can configure an application as follows: + +``` +deployer..kubernetes.entryPointStyle= +``` + +Replace `` with the name of your application and `` with your desired entry point style. + +You can also configure the entry point style at the global server level. + +The following example shows how to do so for streams: + +``` +data: + application.yaml: |- + spring: + cloud: + skipper: + server: + platform: + kubernetes: + accounts: + default: + entryPointStyle: entryPointStyle +``` + +The following example shows how to do so for tasks: + +``` +data: + application.yaml: |- + spring: + cloud: + dataflow: + task: + platform: + kubernetes: + accounts: + default: + entryPointStyle: entryPointStyle +``` + +Replace `entryPointStyle` with the desired entry point style. + +You should choose an Entry Point Style of either `exec` or `shell`, to correspond to how the `ENTRYPOINT` syntax is defined in the container’s `Dockerfile`. For more information and uses cases on `exec` versus `shell`, see the [ENTRYPOINT](https://docs.docker.com/engine/reference/builder/#entrypoint) section of the Docker documentation. + +Using the `boot` entry point style corresponds to using the `exec` style `ENTRYPOINT`. Command line arguments from the deployment request are passed to the container, with the addition of application properties being mapped into the `SPRING_APPLICATION_JSON` environment variable rather than command line arguments. + +| |When you use the `boot` Entry Point Style, the `deployer..kubernetes.environmentVariables` property must not contain `SPRING_APPLICATION_JSON`.| +|---|------------------------------------------------------------------------------------------------------------------------------------------------------------| + +#### 5.1.8. Deployment Service Account + +You can configure a custom service account for application deployments through properties. You can use an existing service account or create a new one. One way to create a service account is by using `kubectl`, as the following example shows: + +``` +$ kubectl create serviceaccount myserviceaccountname +serviceaccount "myserviceaccountname" created +``` + +Then you can configure individual applications as follows: + +``` +deployer..kubernetes.deploymentServiceAccountName=myserviceaccountname +``` + +Replace `` with the name of your application and `myserviceaccountname` with your service account name. + +You can also configure the service account name at the global server level. + +The following example shows how to do so for streams: + +``` +data: + application.yaml: |- + spring: + cloud: + skipper: + server: + platform: + kubernetes: + accounts: + default: + deploymentServiceAccountName: myserviceaccountname +``` + +The following example shows how to do so for tasks: + +``` +data: + application.yaml: |- + spring: + cloud: + dataflow: + task: + platform: + kubernetes: + accounts: + default: + deploymentServiceAccountName: myserviceaccountname +``` + +Replace `myserviceaccountname` with the service account name to be applied to all deployments. + +#### 5.1.9. Image Pull Policy + +An image pull policy defines when a Docker image should be pulled to the local registry. Currently, three policies are supported: + +* `IfNotPresent` (default): Do not pull an image if it already exists. + +* `Always`: Always pull the image regardless of whether it already exists. + +* `Never`: Never pull an image. Use only an image that already exists. + +The following example shows how you can individually configure applications: + +``` +deployer..kubernetes.imagePullPolicy=Always +``` + +Replace `` with the name of your application and `Always` with your desired image pull policy. + +You can configure an image pull policy at the global server level. + +The following example shows how to do so for streams: + +``` +data: + application.yaml: |- + spring: + cloud: + skipper: + server: + platform: + kubernetes: + accounts: + default: + imagePullPolicy: Always +``` + +The following example shows how to do so for tasks: + +``` +data: + application.yaml: |- + spring: + cloud: + dataflow: + task: + platform: + kubernetes: + accounts: + default: + imagePullPolicy: Always +``` + +Replace `Always` with your desired image pull policy. + +#### 5.1.10. Deployment Labels + +You can set custom labels on objects related to [Deployment](https://kubernetes.io/docs/concepts/workloads/controllers/deployment/). See [Labels](https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/) for more information on labels. Labels are specified in `key:value` format. + +The following example shows how you can individually configure applications: + +``` +deployer..kubernetes.deploymentLabels=myLabelName:myLabelValue +``` + +Replace `` with the name of your application, `myLabelName` with your label name, and `myLabelValue` with the value of your label. + +Additionally, you can apply multiple labels, as the following example shows: + +``` +deployer..kubernetes.deploymentLabels=myLabelName:myLabelValue,myLabelName2:myLabelValue2 +``` + +#### 5.1.11. Tolerations + +Tolerations work with taints to ensure pods are not scheduled onto particular nodes. +Tolerations are set into the pod configuration while taints are set onto nodes. +See the [Taints and Tolerations](https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/) section of the Kubernetes reference for more information. + +The following example shows how you can individually configure applications: + +``` +deployer..kubernetes.tolerations=[{key: 'mykey', operator: 'Equal', value: 'myvalue', effect: 'NoSchedule'}] +``` + +Replace `` with the name of your application and the key-value pairs according to your desired toleration configuration. + +You can configure tolerations at the global server level as well. + +The following example shows how to do so for streams: + +``` +data: + application.yaml: |- + spring: + cloud: + skipper: + server: + platform: + kubernetes: + accounts: + default: + tolerations: + - key: mykey + operator: Equal + value: myvalue + effect: NoSchedule +``` + +The following example shows how to do so for tasks: + +``` +data: + application.yaml: |- + spring: + cloud: + dataflow: + task: + platform: + kubernetes: + accounts: + default: + tolerations: + - key: mykey + operator: Equal + value: myvalue + effect: NoSchedule +``` + +Replace the `tolerations` key-value pairs according to your desired toleration configuration. + +#### 5.1.12. Secret References + +Secrets can be referenced and their entire data contents can be decoded and inserted into the pod environment as individual variables. +See the [Configure all key-value pairs in a Secret as container environment variables](https://kubernetes.io/docs/tasks/inject-data-application/distribute-credentials-secure/#configure-all-key-value-pairs-in-a-secret-as-container-environment-variables) section of the Kubernetes reference for more information. + +The following example shows how you can individually configure applications: + +``` +deployer..kubernetes.secretRefs=testsecret +``` + +You can also specify multiple secrets, as follows: + +``` +deployer..kubernetes.secretRefs=[testsecret,anothersecret] +``` + +Replace `` with the name of your application and the `secretRefs` attribute with the appropriate values for your application environment and secret. + +You can configure secret references at the global server level as well. + +The following example shows how to do so for streams: + +``` +data: + application.yaml: |- + spring: + cloud: + skipper: + server: + platform: + kubernetes: + accounts: + default: + secretRefs: + - testsecret + - anothersecret +``` + +The following example shows how to do so for tasks: + +``` +data: + application.yaml: |- + spring: + cloud: + dataflow: + task: + platform: + kubernetes: + accounts: + default: + secretRefs: + - testsecret + - anothersecret +``` + +Replace the items of `secretRefs` with one or more secret names. + +#### 5.1.13. Secret Key References + +Secrets can be referenced and their decoded value can be inserted into the pod environment. +See the [Using Secrets as Environment Variables](https://kubernetes.io/docs/concepts/configuration/secret/#using-secrets-as-environment-variables) section of the Kubernetes reference for more information. + +The following example shows how you can individually configure applications: + +``` +deployer..kubernetes.secretKeyRefs=[{envVarName: 'MY_SECRET', secretName: 'testsecret', dataKey: 'password'}] +``` + +Replace `` with the name of your application and the `envVarName`, `secretName`, and `dataKey` attributes with the appropriate values for your application environment and secret. + +You can configure secret key references at the global server level as well. + +The following example shows how to do so for streams: + +``` +data: + application.yaml: |- + spring: + cloud: + skipper: + server: + platform: + kubernetes: + accounts: + default: + secretKeyRefs: + - envVarName: MY_SECRET + secretName: testsecret + dataKey: password +``` + +The following example shows how to do so for tasks: + +``` +data: + application.yaml: |- + spring: + cloud: + dataflow: + task: + platform: + kubernetes: + accounts: + default: + secretKeyRefs: + - envVarName: MY_SECRET + secretName: testsecret + dataKey: password +``` + +Replace the `envVarName`, `secretName`, and `dataKey` attributes with the appropriate values for your secret. + +#### 5.1.14. ConfigMap References + +A ConfigMap can be referenced and its entire data contents can be decoded and inserted into the pod environment as individual variables. +See the [Configure all key-value pairs in a ConfigMap as container environment variables](https://kubernetes.io/docs/tasks/configure-pod-container/configure-pod-configmap/#configure-all-key-value-pairs-in-a-configmap-as-container-environment-variables) section of the Kubernetes reference for more information. + +The following example shows how you can individually configure applications: + +``` +deployer..kubernetes.configMapRefs=testcm +``` + +You can also specify multiple ConfigMap instances, as follows: + +``` +deployer..kubernetes.configMapRefs=[testcm,anothercm] +``` + +Replace `` with the name of your application and the `configMapRefs` attribute with the appropriate values for your application environment and ConfigMap. + +You can configure ConfigMap references at the global server level as well. + +The following example shows how to do so for streams. Edit the appropriate `skipper-config-(binder).yaml`, replacing `(binder)` with the corresponding binder in use: + +``` +data: + application.yaml: |- + spring: + cloud: + skipper: + server: + platform: + kubernetes: + accounts: + default: + configMapRefs: + - testcm + - anothercm +``` + +The following example shows how to do so for tasks by editing the `server-config.yaml` file: + +``` +data: + application.yaml: |- + spring: + cloud: + dataflow: + task: + platform: + kubernetes: + accounts: + default: + configMapRefs: + - testcm + - anothercm +``` + +Replace the items of `configMapRefs` with one or more secret names. + +#### 5.1.15. ConfigMap Key References + +A ConfigMap can be referenced and its associated key value inserted into the pod environment. +See the [Define container environment variables using ConfigMap data](https://kubernetes.io/docs/tasks/configure-pod-container/configure-pod-configmap/#define-container-environment-variables-using-configmap-data) section of the Kubernetes reference for more information. + +The following example shows how you can individually configure applications: + +``` +deployer..kubernetes.configMapKeyRefs=[{envVarName: 'MY_CM', configMapName: 'testcm', dataKey: 'platform'}] +``` + +Replace `` with the name of your application and the `envVarName`, `configMapName`, and `dataKey` attributes with the appropriate values for your application environment and ConfigMap. + +You can configure ConfigMap references at the global server level as well. + +The following example shows how to do so for streams. Edit the appropriate `skipper-config-(binder).yaml`, replacing `(binder)` with the corresponding binder in use: + +``` +data: + application.yaml: |- + spring: + cloud: + skipper: + server: + platform: + kubernetes: + accounts: + default: + configMapKeyRefs: + - envVarName: MY_CM + configMapName: testcm + dataKey: platform +``` + +The following example shows how to do so for tasks by editing the `server-config.yaml` file: + +``` +data: + application.yaml: |- + spring: + cloud: + dataflow: + task: + platform: + kubernetes: + accounts: + default: + configMapKeyRefs: + - envVarName: MY_CM + configMapName: testcm + dataKey: platform +``` + +Replace the `envVarName`, `configMapName`, and `dataKey` attributes with the appropriate values for your ConfigMap. + +#### 5.1.16. Pod Security Context + +You can confiure the pod security context to run processes under the specified UID (user ID) or GID (group ID). +This is useful when you want to not run processes under the default `root` UID and GID. +You can define either the `runAsUser` (UID) or `fsGroup` (GID), and you can configure them to work together. +See the [Security Context](https://kubernetes.io/docs/tasks/configure-pod-container/security-context/) section of the Kubernetes reference for more information. + +The following example shows how you can individually configure application pods: + +``` +deployer..kubernetes.podSecurityContext={runAsUser: 65534, fsGroup: 65534} +``` + +Replace `` with the name of your application and the `runAsUser` and/or `fsGroup` attributes with the appropriate values for your container environment. + +You can configure the pod security context at the global server level as well. + +The following example shows how to do so for streams. Edit the appropriate `skipper-config-(binder).yaml`, replacing `(binder)` with the corresponding binder in use: + +``` +data: + application.yaml: |- + spring: + cloud: + skipper: + server: + platform: + kubernetes: + accounts: + default: + podSecurityContext: + runAsUser: 65534 + fsGroup: 65534 +``` + +The following example shows how to do so for tasks by editing the `server-config.yaml` file: + +``` +data: + application.yaml: |- + spring: + cloud: + dataflow: + task: + platform: + kubernetes: + accounts: + default: + podSecurityContext: + runAsUser: 65534 + fsGroup: 65534 +``` + +Replace the `runAsUser` and/or `fsGroup` attributes with the appropriate values for your container environment. + +#### 5.1.17. Service Ports + +When you deploy applications, a kubernetes Service object is created with a default port of `8080`. If the `server.port` property is set, it overrides the default port value. You can add additional ports to the Service object on a per-application basis. You can add multiple ports with a comma delimiter. + +The following example shows how you can configure additional ports on a Service object for an application: + +``` +deployer..kubernetes.servicePorts=5000 +deployer..kubernetes.servicePorts=5000,9000 +``` + +Replace `` with the name of your application and the value of your ports. + +#### 5.1.18. StatefulSet Init Container + +When deploying an application by using a StatefulSet, an Init Container is used to set the instance index in the pod. +By default, the image used is `busybox`, which you can be customize. + +The following example shows how you can individually configure application pods: + +``` +deployer..kubernetes.statefulSetInitContainerImageName=myimage:mylabel +``` + +Replace `` with the name of your application and the `statefulSetInitContainerImageName` attribute with the appropriate value for your environment. + +You can configure the StatefulSet Init Container at the global server level as well. + +The following example shows how to do so for streams. Edit the appropriate `skipper-config-(binder).yaml`, replacing `(binder)` with the corresponding binder in use: + +``` +data: + application.yaml: |- + spring: + cloud: + skipper: + server: + platform: + kubernetes: + accounts: + default: + statefulSetInitContainerImageName: myimage:mylabel +``` + +The following example shows how to do so for tasks by editing the `server-config.yaml` file: + +``` +data: + application.yaml: |- + spring: + cloud: + dataflow: + task: + platform: + kubernetes: + accounts: + default: + statefulSetInitContainerImageName: myimage:mylabel +``` + +Replace the `statefulSetInitContainerImageName` attribute with the appropriate value for your environment. + +#### 5.1.19. Init Containers + +When you deploy applications, you can set a custom Init Container on a per-application basis. +Refer to the [Init Containers](https://kubernetes.io/docs/concepts/workloads/pods/init-containers/) section of the Kubernetes reference for more information. + +The following example shows how you can configure an Init Container for an application: + +``` +deployer..kubernetes.initContainer={containerName: 'test', imageName: 'busybox:latest', commands: ['sh', '-c', 'echo hello']} +``` + +Replace `` with the name of your application and set the values of the `initContainer` attributes appropriate for your Init Container. + +#### 5.1.20. Lifecycle Support + +When you deploy applications, you may attach `postStart` and `preStop` [Lifecycle handlers](https://kubernetes.io/docs/tasks/configure-pod-container/attach-handler-lifecycle-event/) to execute commands. +The Kubernetes API supports other types of handlers besides `exec`. This feature may be extended to support additional actions in a future release. +To configure the Lifecycle handlers as shown in the linked page above,specify each command as a comma-delimited list, using the following property keys: + +``` +deployer..kubernetes.lifecycle.postStart.exec.command=/bin/sh,-c,'echo Hello from the postStart handler > /usr/share/message' +deployer..kubernetes.lifecycle.preStop.exec.command=/bin/sh,-c,'nginx -s quit; while killall -0 nginx; do sleep 1; done' +``` + +#### 5.1.21. Additional Containers + +When you deploy applications, you may need one or more containers to be deployed along with the main container. +This would allow you to adapt some deployment patterns such as sidecar, adapter in case of multi container pod setup. + +The following example shows how you can configure additional containers for an application: + +``` +deployer..kubernetes.additionalContainers=[{name: 'c1', image: 'busybox:latest', command: ['sh', '-c', 'echo hello1'], volumeMounts: [{name: 'test-volume', mountPath: '/tmp', readOnly: true}]},{name: 'c2', image: 'busybox:1.26.1', command: ['sh', '-c', 'echo hello2']}] +``` + +# Applications + +A selection of pre-built [stream](https://cloud.spring.io/spring-cloud-stream-app-starters/) and [task or batch](https://cloud.spring.io/spring-cloud-task-app-starters/) starter applications for various data integration and processing scenarios to facilitate learning and experimentation. The table in the next section includes the pre-built applications at a glance. For more details, review how to [register supported applications](#supported-apps-and-tasks). + +## 6. Available Applications + +| Source | Processor | Sink | Task | +|-----------------------------------------------------------------------------------------------------------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------|--------------------------------------------------------------------------------------------------------------------------------------| +| [sftp](https://docs.spring.io/spring-cloud-stream-app-starters/docs/current/reference/htmlsingle/#spring-cloud-stream-modules-sftp-source) | [tcp-client](https://docs.spring.io/spring-cloud-stream-app-starters/docs/current/reference/htmlsingle/#spring-cloud-stream-modules-tcp-client-processor) | [mqtt](https://docs.spring.io/spring-cloud-stream-app-starters/docs/current/reference/htmlsingle/#spring-cloud-stream-modules-mqtt-sink) | [timestamp](https://docs.spring.io/spring-cloud-task-app-starters/docs/current/reference/htmlsingle/#_timestamp_task) | +| [jms](https://docs.spring.io/spring-cloud-stream-app-starters/docs/current/reference/htmlsingle/#spring-cloud-stream-modules-jms-source) | [scriptable-transform](https://docs.spring.io/spring-cloud-stream-app-starters/docs/current/reference/htmlsingle/#spring-cloud-stream-modules-scriptable-transform) | [log](https://docs.spring.io/spring-cloud-stream-app-starters/docs/current/reference/htmlsingle/#spring-cloud-stream-modules-log-sink) |[composed-task-runner](https://docs.spring.io/spring-cloud-task-app-starters/docs/current/reference/htmlsingle/#_composed_task_runner)| +| [ftp](https://docs.spring.io/spring-cloud-stream-app-starters/docs/current/reference/htmlsingle/#spring-cloud-stream-modules-ftp-source) | [transform](https://docs.spring.io/spring-cloud-stream-app-starters/docs/current/reference/htmlsingle/#spring-clound-stream-modules-transform-processor) | [throughput](https://docs.spring.io/spring-cloud-stream-app-starters/docs/current/reference/htmlsingle/#spring-cloud-stream-modules-throughput-sink) | [timestamp-batch](https://docs.spring.io/spring-cloud-task-app-starters/docs/current/reference/htmlsingle/#_timestamp_batch_task) | +| [time](https://docs.spring.io/spring-cloud-stream-app-starters/docs/current/reference/htmlsingle/#spring-cloud-stream-modules-time-source) | [header-enricher](https://docs.spring.io/spring-cloud-stream-app-starters/docs/current/reference/htmlsingle/#spring-cloud-stream-modules-header-enricher-processor) | [mongodb](https://docs.spring.io/spring-cloud-stream-app-starters/docs/current/reference/htmlsingle/#spring-cloud-stream-modules-mongodb-sink) | | +| [load-generator](https://docs.spring.io/spring-cloud-stream-app-starters/docs/current/reference/htmlsingle/#spring-cloud-stream-modules-load-generator-source) | [python-http](https://docs.spring.io/spring-cloud-stream-app-starters/docs/current/reference/htmlsingle/#spring-cloud-stream-modules-python-http-processor) | [ftp](https://docs.spring.io/spring-cloud-stream-app-starters/docs/current/reference/htmlsingle/#spring-cloud-stream-modules-ftp-sink) | | +| [syslog](https://docs.spring.io/spring-cloud-stream-app-starters/docs/current/reference/htmlsingle/#spring-cloud-stream-modules-syslog-source) | [twitter-sentiment](https://docs.spring.io/spring-cloud-stream-app-starters/docs/current/reference/htmlsingle/#spring-cloud-stream-modules-twitter-sentiment-processor) | [jdbc](https://docs.spring.io/spring-cloud-stream-app-starters/docs/current/reference/htmlsingle/#spring-cloud-stream-modules-jdbc-sink) | | +| [s3](https://docs.spring.io/spring-cloud-stream-app-starters/docs/current/reference/htmlsingle/#spring-cloud-stream-modules-aws-s3-source) | [splitter](https://docs.spring.io/spring-cloud-stream-app-starters/docs/current/reference/htmlsingle/#spring-cloud-stream-modules-splitter) | [cassandra](https://docs.spring.io/spring-cloud-stream-app-starters/docs/current/reference/htmlsingle/#spring-cloud-stream-modules-cassandra-sink) | | +| [loggregator](https://docs.spring.io/spring-cloud-stream-app-starters/docs/current/reference/htmlsingle/#spring-cloud-stream-modules-loggregator-source) | [image-recognition](https://docs.spring.io/spring-cloud-stream-app-starters/docs/current/reference/htmlsingle/#spring-cloud-stream-modules-image-recognition-processor) | [router](https://docs.spring.io/spring-cloud-stream-app-starters/docs/current/reference/htmlsingle/#spring-cloud-stream-modules-router-sink) | | +|[triggertask (deprecated)](https://docs.spring.io/spring-cloud-stream-app-starters/docs/current/reference/htmlsingle/#spring-cloud-stream-modules-trigger-source)| [bridge](https://docs.spring.io/spring-cloud-stream-app-starters/docs/current/reference/htmlsingle/#spring-cloud-stream-modules-bridge-processor) | [redis-pubsub](https://docs.spring.io/spring-cloud-stream-app-starters/docs/current/reference/htmlsingle/#spring-cloud-stream-modules-redis-sink) | | +| [twitterstream](https://docs.spring.io/spring-cloud-stream-app-starters/docs/current/reference/htmlsingle/#spring-cloud-stream-modules-twitterstream-source) | [pmml](https://docs.spring.io/spring-cloud-stream-app-starters/docs/current/reference/htmlsingle/#spring-cloud-stream-modules-pmml-processor) | [file](https://docs.spring.io/spring-cloud-stream-app-starters/docs/current/reference/htmlsingle/#spring-cloud-stream-modules-file-sink) | | +| [mongodb](https://docs.spring.io/spring-cloud-stream-app-starters/docs/current/reference/htmlsingle/#spring-cloud-stream-modules-mongodb-source) | [python-jython](https://docs.spring.io/spring-cloud-stream-app-starters/docs/current/reference/htmlsingle/#spring-cloud-stream-modules-python-jython-processor) | [websocket](https://docs.spring.io/spring-cloud-stream-app-starters/docs/current/reference/htmlsingle/#spring-cloud-stream-modules-websocket-sink) | | +| [gemfire-cq](https://docs.spring.io/spring-cloud-stream-app-starters/docs/current/reference/htmlsingle/#spring-cloud-stream-modules-gemfire-cq-source) | [groovy-transform](https://docs.spring.io/spring-cloud-stream-app-starters/docs/current/reference/htmlsingle/#spring-cloud-stream-modules-groovy-transform-processor) | [s3](https://docs.spring.io/spring-cloud-stream-app-starters/docs/current/reference/htmlsingle/#spring-cloud-stream-modules-aws-s3-sink) | | +| [http](https://docs.spring.io/spring-cloud-stream-app-starters/docs/current/reference/htmlsingle/#spring-cloud-stream-modules-http-source) | [httpclient](https://docs.spring.io/spring-cloud-stream-app-starters/docs/current/reference/htmlsingle/#spring-cloud-stream-modules-httpclient-processor) | [rabbit](https://docs.spring.io/spring-cloud-stream-app-starters/docs/current/reference/htmlsingle/#spring-cloud-stream-modules-rabbit-sink) | | +| [rabbit](https://docs.spring.io/spring-cloud-stream-app-starters/docs/current/reference/htmlsingle/#spring-cloud-stream-modules-rabbit-source) | [filter](https://docs.spring.io/spring-cloud-stream-app-starters/docs/current/reference/htmlsingle/#spring-cloud-stream-modules-filter-processor) | [counter](https://docs.spring.io/spring-cloud-stream-app-starters/docs/current/reference/htmlsingle/#spring-cloud-stream-modules-counter-sink) | | +| [tcp](https://docs.spring.io/spring-cloud-stream-app-starters/docs/current/reference/htmlsingle/#spring-cloud-stream-modules-tcp-source) | [pose-estimation](https://docs.spring.io/spring-cloud-stream-app-starters/docs/current/reference/htmlsingle/#spring-cloud-stream-modules-pose-estimation-processor) | [pgcopy](https://docs.spring.io/spring-cloud-stream-app-starters/docs/current/reference/htmlsingle/#spring-cloud-stream-modules-pgcopy-sink) | | +| [trigger](https://docs.spring.io/spring-cloud-stream-app-starters/docs/current/reference/htmlsingle/#spring-cloud-stream-modules-trigger-source) | [grpc](https://docs.spring.io/spring-cloud-stream-app-starters/docs/current/reference/htmlsingle/#spring-cloud-stream-modules-grpc-processor) | [gpfdist](https://github.com/spring-cloud-stream-app-starters/gpfdist) | | +| [mqtt](https://docs.spring.io/spring-cloud-stream-app-starters/docs/current/reference/htmlsingle/#spring-cloud-stream-modules-mqtt-source) | [groovy-filter](https://docs.spring.io/spring-cloud-stream-app-starters/docs/current/reference/htmlsingle/#spring-cloud-stream-modules-groovy-filter-processor) | [sftp](https://docs.spring.io/spring-cloud-stream-app-starters/docs/current/reference/htmlsingle/#spring-cloud-stream-modules-sftp-sink) | | +| [tcp-client](https://docs.spring.io/spring-cloud-stream-app-starters/docs/current/reference/htmlsingle/#spring-cloud-stream-modules-tcp-client-source) | [aggregator](https://docs.spring.io/spring-cloud-stream-app-starters/docs/current/reference/htmlsingle/#spring-cloud-stream-modules-aggregator-processor) |[task-launcher-dataflow](https://docs.spring.io/spring-cloud-stream-app-starters/docs/current/reference/htmlsingle/#spring-cloud-stream-modules-task-launcher-dataflow-sink)| | +| [mail](https://docs.spring.io/spring-cloud-stream-app-starters/docs/current/reference/htmlsingle/#spring-cloud-stream-modules-mail-source) | [counter](https://docs.spring.io/spring-cloud-stream-app-starters/docs/current/reference/htmlsingle/#spring-cloud-stream-modules-counter-processor) | [hdfs](https://docs.spring.io/spring-cloud-stream-app-starters/docs/current/reference/htmlsingle/#spring-cloud-stream-modules-hdfs-sink) | | +| [jdbc](https://docs.spring.io/spring-cloud-stream-app-starters/docs/current/reference/htmlsingle/#spring-cloud-stream-modules-jdbc-source) | [tensorflow](https://docs.spring.io/spring-cloud-stream-app-starters/docs/current/reference/htmlsingle/#spring-cloud-stream-modules-tensorflow-processor) | [tcp](https://docs.spring.io/spring-cloud-stream-app-starters/docs/current/reference/htmlsingle/#spring-cloud-stream-modules-tcp-sink) | | +| [gemfire](https://docs.spring.io/spring-cloud-stream-app-starters/docs/current/reference/htmlsingle/#spring-cloud-stream-modules-gemfire-source) |[tasklaunchrequest-transform (deprecated)](https://docs.spring.io/spring-cloud-stream-app-starters/docs/current/reference/htmlsingle/#spring-cloud-stream-modules-tasklaunchrequest-transform)| [gemfire](https://docs.spring.io/spring-cloud-stream-app-starters/docs/current/reference/htmlsingle/#spring-cloud-stream-modules-gemfire-sink) | | +| [file](https://docs.spring.io/spring-cloud-stream-app-starters/docs/current/reference/htmlsingle/#spring-cloud-stream-modules-file-source) | [object-detection](https://docs.spring.io/spring-cloud-stream-app-starters/docs/current/reference/htmlsingle/#spring-cloud-stream-modules-object-detection-processor) | | | +| [sftp-dataflow](https://docs.spring.io/spring-cloud-stream-app-starters/docs/current/reference/htmlsingle/#spring-cloud-stream-modules-sftp-dataflow-source) | | | | + +# Architecture + +## 7. Introduction + +Spring Cloud Data Flow simplifies the development and deployment of applications that are focused on data-processing use cases. + +The [Architecture](https://dataflow.spring.io/docs/concepts/architecture/) section of the microsite describes Data Flow’s architecture. + +# Configuration + +## 8. Maven + +If you want to override specific Maven configuration properties (remote repositories, proxies, and others) or run the Data Flow Server behind a proxy, +you need to specify those properties as command-line arguments when you start the Data Flow Server, as shown in the following example: + +``` +$ java -jar spring-cloud-dataflow-server-2.9.2.jar --spring.config.additional-location=/home/joe/maven.yml +``` + +The preceding command assumes a `maven.yaml` similar to the following: + +``` +maven: + localRepository: mylocal + remote-repositories: + repo1: + url: https://repo1 + auth: + username: user1 + password: pass1 + snapshot-policy: + update-policy: daily + checksum-policy: warn + release-policy: + update-policy: never + checksum-policy: fail + repo2: + url: https://repo2 + policy: + update-policy: always + checksum-policy: fail + proxy: + host: proxy1 + port: "9010" + auth: + username: proxyuser1 + password: proxypass1 +``` + +By default, the protocol is set to `http`. You can omit the auth properties if the proxy does not need a username and password. Also, by default, the maven `localRepository` is set to `${user.home}/.m2/repository/`. +As shown in the preceding example, you can specify the remote repositories along with their authentication (if needed). If the remote repositories are behind a proxy, you can specify the proxy properties, as shown in the preceding example. + +You can specify the repository policies for each remote repository configuration, as shown in the preceding example. +The key `policy` is applicable to both the `snapshot` and the `release` repository policies. + +See the [Repository Policies](https://github.com/apache/maven-resolver/blob/master/maven-resolver-api/src/main/java/org/eclipse/aether/repository/RepositoryPolicy.java) topic for the list of +supported repository policies. + +As these are Spring Boot `@ConfigurationProperties` you need to specify by adding them to the `SPRING_APPLICATION_JSON` environment variable. The following example shows how the JSON is structured: + +``` +$ SPRING_APPLICATION_JSON=' +{ + "maven": { + "local-repository": null, + "remote-repositories": { + "repo1": { + "url": "https://repo1", + "auth": { + "username": "repo1user", + "password": "repo1pass" + } + }, + "repo2": { + "url": "https://repo2" + } + }, + "proxy": { + "host": "proxyhost", + "port": 9018, + "auth": { + "username": "proxyuser", + "password": "proxypass" + } + } + } +} +' +``` + +### 8.1. Wagon + +There is a limited support for using `Wagon` transport with Maven. Currently, this +exists to support *preemptive* authentication with `http`-based repositories +and needs to be enabled manually. + +Wagon-based `http` transport is enabled by setting the `maven.use-wagon` property +to `true`. Then you can enable *preemptive* authentication for each remote +repository. Configuration loosely follows the similar patterns found in[HttpClient HTTP Wagon](https://maven.apache.org/guides/mini/guide-http-settings.html). +At the time of this writing, documentation in Maven’s own site is slightly misleading +and missing most of the possible configuration options. + +The `maven.remote-repositories..wagon.http` namespace contains all Wagon`http` related settings, and the keys directly under it map to supported `http` methods — namely, `all`, `put`, `get` and `head`, as in Maven’s own configuration. +Under these method configurations, you can then set various options, such as`use-preemptive`. A simpl *preemptive* configuration to send an auth +header with all requests to a specified remote repository would look like the following example: + +``` +maven: + use-wagon: true + remote-repositories: + springRepo: + url: https://repo.example.org + wagon: + http: + all: + use-preemptive: true + auth: + username: user + password: password +``` + +Instead of configuring `all` methods, you can tune settings for `get`and `head` requests only, as follows: + +``` +maven: + use-wagon: true + remote-repositories: + springRepo: + url: https://repo.example.org + wagon: + http: + get: + use-preemptive: true + head: + use-preemptive: true + use-default-headers: true + connection-timeout: 1000 + read-timeout: 1000 + headers: + sample1: sample2 + params: + http.socket.timeout: 1000 + http.connection.stalecheck: true + auth: + username: user + password: password +``` + +There are settings for `use-default-headers`, `connection-timeout`,`read-timeout`, request `headers`, and HttpClient `params`. For more about parameters, +see [Wagon ConfigurationUtils](https://github.com/apache/maven-wagon/blob/master/wagon-providers/wagon-http-shared/src/main/java/org/apache/maven/wagon/shared/http/ConfigurationUtils.java). + +## 9. Security + +By default, the Data Flow server is unsecured and runs on an unencrypted HTTP connection. +You can secure your REST endpoints as well as the Data Flow Dashboard by enabling HTTPS +and requiring clients to authenticate with [OAuth 2.0](https://oauth.net/2/). + +| |Appendix [Azure](#appendix-identity-provider-azure) contains more information how to
setup *Azure Active Directory* integration.| +|---|------------------------------------------------------------------------------------------------------------------------------------| + +| |By default, the REST endpoints (administration, management, and health) as well as the Dashboard UI do not require authenticated access.| +|---|----------------------------------------------------------------------------------------------------------------------------------------| + +While you can theoretically choose any OAuth provider in conjunction with +Spring Cloud Data Flow, we recommend using the[CloudFoundry User Account and Authentication (UAA) Server](https://github.com/cloudfoundry/uaa). + +Not only is the UAA OpenID certified and is used by Cloud Foundry, but you can +also use it in local stand-alone deployment scenarios. Furthermore, the UAA not +only provides its own user store, but it also provides comprehensive LDAP integration. + +#### 9.1. Enabling HTTPS + +By default, the dashboard, management, and health endpoints use HTTP as a transport. +You can switch to HTTPS by adding a certificate to your configuration in`application.yml`, as shown in the following example: + +``` +server: + port: 8443 (1) + ssl: + key-alias: yourKeyAlias (2) + key-store: path/to/keystore (3) + key-store-password: yourKeyStorePassword (4) + key-password: yourKeyPassword (5) + trust-store: path/to/trust-store (6) + trust-store-password: yourTrustStorePassword (7) +``` + +|**1**| As the default port is `9393`, you may choose to change the port to a more common HTTPs-typical port. | +|-----|-------------------------------------------------------------------------------------------------------------------------------------------------------| +|**2**| The alias (or name) under which the key is stored in the keystore. | +|**3**| The path to the keystore file. You can also specify classpath resources, by using the classpath prefix - for example: `classpath:path/to/keystore`. | +|**4**| The password of the keystore. | +|**5**| The password of the key. | +|**6**|The path to the truststore file. You can also specify classpath resources, by using the classpath prefix - for example: `classpath:path/to/trust-store`| +|**7**| The password of the trust store. | + +| |If HTTPS is enabled, it completely replaces HTTP as the protocol over
which the REST endpoints and the Data Flow Dashboard interact. Plain HTTP requests
fail. Therefore, make sure that you configure your Shell accordingly.| +|---|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +##### Using Self-Signed Certificates + +For testing purposes or during development, it might be convenient to create self-signed certificates. +To get started, execute the following command to create a certificate: + +``` +$ keytool -genkey -alias dataflow -keyalg RSA -keystore dataflow.keystore \ + -validity 3650 -storetype JKS \ + -dname "CN=localhost, OU=Spring, O=Pivotal, L=Kailua-Kona, ST=HI, C=US" (1) + -keypass dataflow -storepass dataflow +``` + +|**1**|`CN` is the important parameter here. It should match the domain you are trying to access - for example, `localhost`.| +|-----|---------------------------------------------------------------------------------------------------------------------| + +Then add the following lines to your `application.yml` file: + +``` +server: + port: 8443 + ssl: + enabled: true + key-alias: dataflow + key-store: "/your/path/to/dataflow.keystore" + key-store-type: jks + key-store-password: dataflow + key-password: dataflow +``` + +This is all you need to do for the Data Flow Server. Once you start the server, +you should be able to access it at `[localhost:8443/](https://localhost:8443/)`. +As this is a self-signed certificate, you should hit a warning in your browser, which +you need to ignore. + +| |*Never* use self-signed certificates in production.| +|---|---------------------------------------------------| + +##### Self-Signed Certificates and the Shell + +By default, self-signed certificates are an issue for the shell, and additional steps +are necessary to make the shell work with self-signed certificates. Two options +are available: + +* Add the self-signed certificate to the JVM truststore. + +* Skip certificate validation. + +###### Adding the Self-signed Certificate to the JVM Truststore + +In order to use the JVM truststore option, you need to +export the previously created certificate from the keystore, as follows: + +``` +$ keytool -export -alias dataflow -keystore dataflow.keystore -file dataflow_cert -storepass dataflow +``` + +Next, you need to create a truststore that the shell can use, as follows: + +``` +$ keytool -importcert -keystore dataflow.truststore -alias dataflow -storepass dataflow -file dataflow_cert -noprompt +``` + +Now you are ready to launch the Data Flow Shell with the following JVM arguments: + +``` +$ java -Djavax.net.ssl.trustStorePassword=dataflow \ + -Djavax.net.ssl.trustStore=/path/to/dataflow.truststore \ + -Djavax.net.ssl.trustStoreType=jks \ + -jar spring-cloud-dataflow-shell-2.9.2.jar +``` + +| |If you run into trouble establishing a connection over SSL, you can enable additional
logging by using and setting the `javax.net.debug` JVM argument to `ssl`.| +|---|-------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +Do not forget to target the Data Flow Server with the following command: + +``` +dataflow:> dataflow config server https://localhost:8443/ +``` + +###### Skipping Certificate Validation + +Alternatively, you can also bypass the certification validation by providing the +optional `--dataflow.skip-ssl-validation=true` command-line parameter. + +If you set this command-line parameter, the shell accepts any (self-signed) SSL +certificate. + +| |If possible, you should avoid using this option. Disabling the trust manager
defeats the purpose of SSL and makes your application vulnerable to man-in-the-middle attacks.| +|---|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +#### 9.2. Authentication by using OAuth 2.0 + +To support authentication and authorization, Spring Cloud Data +Flow uses [OAuth 2.0](https://oauth.net/2/). +It lets you integrate Spring Cloud Data Flow into Single Sign On (SSO) +environments. + +| |As of Spring Cloud Data Flow 2.0, OAuth2 is the only mechanism
for providing authentication and authorization.| +|---|------------------------------------------------------------------------------------------------------------------| + +The following OAuth2 Grant Types are used: + +* **Authorization Code**: Used for the GUI (browser) integration. Visitors are redirected to your OAuth Service for authentication + +* **Password**: Used by the shell (and the REST integration), so visitors can log in with username and password + +* **Client Credentials**: Retrieves an access token directly from your OAuth provider and passes it to the Data Flow server by using the Authorization HTTP header + +| |Currently, Spring Cloud Data Flow uses opaque tokens and not transparent
tokens (JWT).| +|---|------------------------------------------------------------------------------------------| + +You can access the REST endpoints in two ways: + +* **Basic authentication**, which uses the *Password Grant Type* to authenticate with your OAuth2 service + +* **Access token**, which uses the Client *Credentials Grant Type* + +| |When you set up authentication, you really should enable HTTPS
as well, especially in production environments.| +|---|------------------------------------------------------------------------------------------------------------------| + +You can turn on OAuth2 authentication by adding the following to `application.yml` or by setting +environment variables. The following example shows the minimal setup needed for[CloudFoundry User Account and Authentication (UAA) Server](https://github.com/cloudfoundry/uaa): + +``` +spring: + security: + oauth2: (1) + client: + registration: + uaa: (2) + client-id: myclient + client-secret: mysecret + redirect-uri: '{baseUrl}/login/oauth2/code/{registrationId}' + authorization-grant-type: authorization_code + scope: + - openid (3) + provider: + uaa: + jwk-set-uri: http://uaa.local:8080/uaa/token_keys + token-uri: http://uaa.local:8080/uaa/oauth/token + user-info-uri: http://uaa.local:8080/uaa/userinfo (4) + user-name-attribute: user_name (5) + authorization-uri: http://uaa.local:8080/uaa/oauth/authorize + resourceserver: + opaquetoken: + introspection-uri: http://uaa.local:8080/uaa/introspect (6) + client-id: dataflow + client-secret: dataflow +``` + +|**1**| Providing this property activates OAuth2 security. | +|-----|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +|**2**| The provider ID. You can specify more than one provider. | +|**3**|As the UAA is an OpenID provider, you must at least specify the `openid` scope.
If your provider also provides additional scopes to control the role assignments,
you must specify those scopes here as well.| +|**4**| OpenID endpoint. Used to retrieve user information such as the username. Mandatory. | +|**5**| The JSON property of the response that contains the username. | +|**6**| Used to introspect and validate a directly passed-in token. Mandatory. | + +You can verify that basic authentication is working properly by using curl, as follows: + +``` +curl -u myusername:mypassword http://localhost:9393/ -H 'Accept: application/json' +``` + +As a result, you should see a list of available REST endpoints. + +| |When you access the Root URL with a web browser and
security enabled, you are redirected to the Dashboard UI. To see the
list of REST endpoints, specify the `application/json` `Accept` header. Also be sure
to add the `Accept` header by using tools such as[Postman](https://chrome.google.com/webstore/detail/postman/fhbjgbiflinjbdggehcddcbncdddomop?hl=en) (Chrome)
or [RESTClient](https://addons.mozilla.org/en-GB/firefox/addon/restclient/) (Firefox).| +|---|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +Besides Basic Authentication, you can also provide an access token, to +access the REST API. To do so, retrieve an +OAuth2 Access Token from your OAuth2 provider and pass that access token to +the REST Api by using the **Authorization** HTTP header, as follows: + +``` +$ curl -H "Authorization: Bearer " http://localhost:9393/ -H 'Accept: application/json' +``` + +#### 9.3. Customizing Authorization + +The preceding content mostly deals with authentication — that is, how to assess +the identity of the user. In this section, we discuss the available**authorization** options — that is, who can do what. + +The authorization rules are defined in `dataflow-server-defaults.yml` (part of +the Spring Cloud Data Flow Core module). + +Because the determination of security roles is environment-specific, +Spring Cloud Data Flow, by default, assigns all roles to authenticated OAuth2 +users. The `DefaultDataflowAuthoritiesExtractor` class is used for that purpose. + +Alternatively, you can have Spring Cloud Data Flow map OAuth2 scopes to Data Flow roles by +setting the boolean property `map-oauth-scopes` for your provider to `true` (the default is `false`). +For example, if your provider’s ID is `uaa`, the property would be`spring.cloud.dataflow.security.authorization.provider-role-mappings.uaa.map-oauth-scopes`. + +For more details, see the chapter on [Role Mappings](#configuration-security-role-mapping). + +You can also customize the role-mapping behavior by providing your own Spring bean definition that +extends Spring Cloud Data Flow’s `AuthorityMapper` interface. In that case, +the custom bean definition takes precedence over the default one provided by +Spring Cloud Data Flow. + +The default scheme uses seven roles to protect the [REST endpoints](#api-guide)that Spring Cloud Data Flow exposes: + +* **ROLE\_CREATE**: For anything that involves creating, such as creating streams or tasks + +* **ROLE\_DEPLOY**: For deploying streams or launching tasks + +* **ROLE\_DESTROY**: For anything that involves deleting streams, tasks, and so on. + +* **ROLE\_MANAGE**: For Boot management endpoints + +* **ROLE\_MODIFY**: For anything that involves mutating the state of the system + +* **ROLE\_SCHEDULE**: For scheduling related operation (such as scheduling a task) + +* **ROLE\_VIEW**: For anything that relates to retrieving state + +As mentioned earlier in this section, all authorization-related default settings are specified +in `dataflow-server-defaults.yml`, which is part of the Spring Cloud Data Flow Core +Module. Nonetheless, you can override those settings, if desired — for example, +in `application.yml`. The configuration takes the form of a YAML list (as some +rules may have precedence over others). Consequently, you need to copy and paste +the whole list and tailor it to your needs (as there is no way to merge lists). + +| |Always refer to your version of the `application.yml` file, as the following snippet may be outdated.| +|---|-----------------------------------------------------------------------------------------------------| + +The default rules are as follows: + +``` +spring: + cloud: + dataflow: + security: + authorization: + enabled: true + loginUrl: "/" + permit-all-paths: "/authenticate,/security/info,/assets/**,/dashboard/logout-success-oauth.html,/favicon.ico" + rules: + # About + + - GET /about => hasRole('ROLE_VIEW') + + # Audit + + - GET /audit-records => hasRole('ROLE_VIEW') + - GET /audit-records/** => hasRole('ROLE_VIEW') + + # Boot Endpoints + + - GET /management/** => hasRole('ROLE_MANAGE') + + # Apps + + - GET /apps => hasRole('ROLE_VIEW') + - GET /apps/** => hasRole('ROLE_VIEW') + - DELETE /apps/** => hasRole('ROLE_DESTROY') + - POST /apps => hasRole('ROLE_CREATE') + - POST /apps/** => hasRole('ROLE_CREATE') + - PUT /apps/** => hasRole('ROLE_MODIFY') + + # Completions + + - GET /completions/** => hasRole('ROLE_VIEW') + + # Job Executions & Batch Job Execution Steps && Job Step Execution Progress + + - GET /jobs/executions => hasRole('ROLE_VIEW') + - PUT /jobs/executions/** => hasRole('ROLE_MODIFY') + - GET /jobs/executions/** => hasRole('ROLE_VIEW') + - GET /jobs/thinexecutions => hasRole('ROLE_VIEW') + + # Batch Job Instances + + - GET /jobs/instances => hasRole('ROLE_VIEW') + - GET /jobs/instances/* => hasRole('ROLE_VIEW') + + # Running Applications + + - GET /runtime/streams => hasRole('ROLE_VIEW') + - GET /runtime/streams/** => hasRole('ROLE_VIEW') + - GET /runtime/apps => hasRole('ROLE_VIEW') + - GET /runtime/apps/** => hasRole('ROLE_VIEW') + + # Stream Definitions + + - GET /streams/definitions => hasRole('ROLE_VIEW') + - GET /streams/definitions/* => hasRole('ROLE_VIEW') + - GET /streams/definitions/*/related => hasRole('ROLE_VIEW') + - POST /streams/definitions => hasRole('ROLE_CREATE') + - DELETE /streams/definitions/* => hasRole('ROLE_DESTROY') + - DELETE /streams/definitions => hasRole('ROLE_DESTROY') + + # Stream Deployments + + - DELETE /streams/deployments/* => hasRole('ROLE_DEPLOY') + - DELETE /streams/deployments => hasRole('ROLE_DEPLOY') + - POST /streams/deployments/** => hasRole('ROLE_MODIFY') + - GET /streams/deployments/** => hasRole('ROLE_VIEW') + + # Stream Validations + + - GET /streams/validation/ => hasRole('ROLE_VIEW') + - GET /streams/validation/* => hasRole('ROLE_VIEW') + + # Stream Logs + - GET /streams/logs/* => hasRole('ROLE_VIEW') + + # Task Definitions + + - POST /tasks/definitions => hasRole('ROLE_CREATE') + - DELETE /tasks/definitions/* => hasRole('ROLE_DESTROY') + - GET /tasks/definitions => hasRole('ROLE_VIEW') + - GET /tasks/definitions/* => hasRole('ROLE_VIEW') + + # Task Executions + + - GET /tasks/executions => hasRole('ROLE_VIEW') + - GET /tasks/executions/* => hasRole('ROLE_VIEW') + - POST /tasks/executions => hasRole('ROLE_DEPLOY') + - POST /tasks/executions/* => hasRole('ROLE_DEPLOY') + - DELETE /tasks/executions/* => hasRole('ROLE_DESTROY') + + # Task Schedules + + - GET /tasks/schedules => hasRole('ROLE_VIEW') + - GET /tasks/schedules/* => hasRole('ROLE_VIEW') + - GET /tasks/schedules/instances => hasRole('ROLE_VIEW') + - GET /tasks/schedules/instances/* => hasRole('ROLE_VIEW') + - POST /tasks/schedules => hasRole('ROLE_SCHEDULE') + - DELETE /tasks/schedules/* => hasRole('ROLE_SCHEDULE') + + # Task Platform Account List */ + + - GET /tasks/platforms => hasRole('ROLE_VIEW') + + # Task Validations + + - GET /tasks/validation/ => hasRole('ROLE_VIEW') + - GET /tasks/validation/* => hasRole('ROLE_VIEW') + + # Task Logs + - GET /tasks/logs/* => hasRole('ROLE_VIEW') + + # Tools + + - POST /tools/** => hasRole('ROLE_VIEW') +``` + +The format of each line is the following: + +``` +HTTP_METHOD URL_PATTERN '=>' SECURITY_ATTRIBUTE +``` + +where: + +* HTTP\_METHOD is one HTTP method (such as PUT or GET), capital case. + +* URL\_PATTERN is an Ant-style URL pattern. + +* SECURITY\_ATTRIBUTE is a SpEL expression. See [Expression-Based Access Control](https://docs.spring.io/spring-security/site/docs/current/reference/htmlsingle/#el-access). + +* Each of those is separated by one or whitespace characters (spaces, tabs, and so on). + +Be mindful that the above is a YAML list, not a map (thus the use of '-' dashes +at the start of each line) that lives under the `spring.cloud.dataflow.security.authorization.rules` key. + +##### Authorization — Shell and Dashboard Behavior + +When security is enabled, the dashboard and the shell are role-aware, +meaning that, depending on the assigned roles, not all functionality may be visible. + +For instance, shell commands for which the user does not have the necessary roles +are marked as unavailable. + +| |Currently, the shell’s `help` command lists commands that are unavailable.
Please track the following issue: [github.com/spring-projects/spring-shell/issues/115](https://github.com/spring-projects/spring-shell/issues/115)| +|---|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +Conversely, for the Dashboard, the UI does not show pages or page elements for +which the user is not authorized. + +##### Securing the Spring Boot Management Endpoints + +When security is enabled, the[Spring Boot HTTP Management Endpoints](https://docs.spring.io/spring-boot/docs/2.1.1.RELEASE/reference/html/production-ready-monitoring.html)are secured in the same way as the other REST endpoints. The management REST endpoints +are available under `/management` and require the `MANAGEMENT` role. + +The default configuration in `dataflow-server-defaults.yml` is as follows: + +``` +management: + endpoints: + web: + base-path: /management + security: + roles: MANAGE +``` + +| |Currently, you should not customize the default management path.| +|---|----------------------------------------------------------------| + +#### 9.4. Setting up UAA Authentication + +For local deployment scenarios, we recommend using the [CloudFoundry User +Account and Authentication (UAA) Server](https://github.com/cloudfoundry/uaa), which is [OpenID certified](https://openid.net/certification/). +While the UAA is used by [Cloud Foundry](https://www.cloudfoundry.org/), +it is also a fully featured stand alone OAuth2 server with enterprise features, such as[LDAP integration](https://github.com/cloudfoundry/uaa/blob/develop/docs/UAA-LDAP.md). + +##### Requirements + +You need to check out, build and run UAA. To do so, make sure that you: + +* Use Java 8. + +* Have [Git](https://git-scm.com/) installed. + +* Have the [CloudFoundry UAA Command Line Client](https://github.com/cloudfoundry/cf-uaac) installed. + +* Use a different host name for UAA when running on the same machine — for example, `[uaa/](http://uaa/)`. + +If you run into issues installing *uaac*, you may have to set the `GEM_HOME` environment +variable: + +``` +export GEM_HOME="$HOME/.gem" +``` + +You should also ensure that `~/.gem/gems/cf-uaac-4.2.0/bin` has been added to your path. + +##### Prepare UAA for JWT + +As the UAA is an OpenID provider and uses JSON Web Tokens (JWT), it needs to have +a private key for signing those JWTs: + +``` +openssl genrsa -out signingkey.pem 2048 +openssl rsa -in signingkey.pem -pubout -out verificationkey.pem +export JWT_TOKEN_SIGNING_KEY=$(cat signingkey.pem) +export JWT_TOKEN_VERIFICATION_KEY=$(cat verificationkey.pem) +``` + +Later, once the UAA is started, you can see the keys when you access `[uaa:8080/uaa/token_keys](http://uaa:8080/uaa/token_keys)`. + +| |Here, the `uaa` in the URL `[uaa:8080/uaa/token_keys](http://uaa:8080/uaa/token_keys)` is the hostname.| +|---|-------------------------------------------------------------------------------------------------------| + +##### Download and Start UAA + +To download and install UAA, run the following commands: + +``` +git clone https://github.com/pivotal/uaa-bundled.git +cd uaa-bundled +./mvnw clean install +java -jar target/uaa-bundled-1.0.0.BUILD-SNAPSHOT.jar +``` + +The configuration of the UAA is driven by a YAML file `uaa.yml`, or you can script the configuration +using the UAA Command Line Client: + +``` +uaac target http://uaa:8080/uaa +uaac token client get admin -s adminsecret +uaac client add dataflow \ + --name dataflow \ + --secret dataflow \ + --scope cloud_controller.read,cloud_controller.write,openid,password.write,scim.userids,sample.create,sample.view,dataflow.create,dataflow.deploy,dataflow.destroy,dataflow.manage,dataflow.modify,dataflow.schedule,dataflow.view \ + --authorized_grant_types password,authorization_code,client_credentials,refresh_token \ + --authorities uaa.resource,dataflow.create,dataflow.deploy,dataflow.destroy,dataflow.manage,dataflow.modify,dataflow.schedule,dataflow.view,sample.view,sample.create\ + --redirect_uri http://localhost:9393/login \ + --autoapprove openid + +uaac group add "sample.view" +uaac group add "sample.create" +uaac group add "dataflow.view" +uaac group add "dataflow.create" + +uaac user add springrocks -p mysecret --emails [email protected] +uaac user add vieweronly -p mysecret --emails [email protected] + +uaac member add "sample.view" springrocks +uaac member add "sample.create" springrocks +uaac member add "dataflow.view" springrocks +uaac member add "dataflow.create" springrocks +uaac member add "sample.view" vieweronly +``` + +The preceding script sets up the dataflow client as well as two users: + +* User *springrocks* has have both scopes: `sample.view` and `sample.create`. + +* User *vieweronly* has only one scope: `sample.view`. + +Once added, you can quickly double-check that the UAA has the users created: + +``` +curl -v -d"username=springrocks&password=mysecret&client_id=dataflow&grant_type=password" -u "dataflow:dataflow" http://uaa:8080/uaa/oauth/token -d 'token_format=opaque' +``` + +The preceding command should produce output similar to the following: + +``` +* Trying 127.0.0.1... +* TCP_NODELAY set +* Connected to uaa (127.0.0.1) port 8080 (#0) +* Server auth using Basic with user 'dataflow' +> POST /uaa/oauth/token HTTP/1.1 +> Host: uaa:8080 +> Authorization: Basic ZGF0YWZsb3c6ZGF0YWZsb3c= +> User-Agent: curl/7.54.0 +> Accept: */* +> Content-Length: 97 +> Content-Type: application/x-www-form-urlencoded +> +* upload completely sent off: 97 out of 97 bytes +< HTTP/1.1 200 +< Cache-Control: no-store +< Pragma: no-cache +< X-XSS-Protection: 1; mode=block +< X-Frame-Options: DENY +< X-Content-Type-Options: nosniff +< Content-Type: application/json;charset=UTF-8 +< Transfer-Encoding: chunked +< Date: Thu, 31 Oct 2019 21:22:59 GMT +< +* Connection #0 to host uaa left intact +{"access_token":"0329c8ecdf594ee78c271e022138be9d","token_type":"bearer","id_token":"eyJhbGciOiJSUzI1NiIsImprdSI6Imh0dHBzOi8vbG9jYWxob3N0OjgwODAvdWFhL3Rva2VuX2tleXMiLCJraWQiOiJsZWdhY3ktdG9rZW4ta2V5IiwidHlwIjoiSldUIn0.eyJzdWIiOiJlZTg4MDg4Ny00MWM2LTRkMWQtYjcyZC1hOTQ4MmFmNGViYTQiLCJhdWQiOlsiZGF0YWZsb3ciXSwiaXNzIjoiaHR0cDovL2xvY2FsaG9zdDo4MDkwL3VhYS9vYXV0aC90b2tlbiIsImV4cCI6MTU3MjYwMDE3OSwiaWF0IjoxNTcyNTU2OTc5LCJhbXIiOlsicHdkIl0sImF6cCI6ImRhdGFmbG93Iiwic2NvcGUiOlsib3BlbmlkIl0sImVtYWlsIjoic3ByaW5ncm9ja3NAc29tZXBsYWNlLmNvbSIsInppZCI6InVhYSIsIm9yaWdpbiI6InVhYSIsImp0aSI6IjAzMjljOGVjZGY1OTRlZTc4YzI3MWUwMjIxMzhiZTlkIiwiZW1haWxfdmVyaWZpZWQiOnRydWUsImNsaWVudF9pZCI6ImRhdGFmbG93IiwiY2lkIjoiZGF0YWZsb3ciLCJncmFudF90eXBlIjoicGFzc3dvcmQiLCJ1c2VyX25hbWUiOiJzcHJpbmdyb2NrcyIsInJldl9zaWciOiJlOTkyMDQxNSIsInVzZXJfaWQiOiJlZTg4MDg4Ny00MWM2LTRkMWQtYjcyZC1hOTQ4MmFmNGViYTQiLCJhdXRoX3RpbWUiOjE1NzI1NTY5Nzl9.bqYvicyCPB5cIIu_2HEe5_c7nSGXKw7B8-reTvyYjOQ2qXSMq7gzS4LCCQ-CMcb4IirlDaFlQtZJSDE-_UsM33-ThmtFdx--TujvTR1u2nzot4Pq5A_ThmhhcCB21x6-RNNAJl9X9uUcT3gKfKVs3gjE0tm2K1vZfOkiGhjseIbwht2vBx0MnHteJpVW6U0pyCWG_tpBjrNBSj9yLoQZcqrtxYrWvPHaa9ljxfvaIsOnCZBGT7I552O1VRHWMj1lwNmRNZy5koJFPF7SbhiTM8eLkZVNdR3GEiofpzLCfoQXrr52YbiqjkYT94t3wz5C6u1JtBtgc2vq60HmR45bvg","refresh_token":"6ee95d017ada408697f2d19b04f7aa6c-r","expires_in":43199,"scope":"scim.userids openid sample.create cloud_controller.read password.write cloud_controller.write sample.view","jti":"0329c8ecdf594ee78c271e022138be9d"} +``` + +By using the `token_format` parameter, you can request the token to be either: + +* opaque + +* jwt + +## 10. Configuration - Local + +### 10.1. Feature Toggles + +Spring Cloud Data Flow Server offers specific set of features that can be enabled/disabled when launching. These features include all the lifecycle operations and REST endpoints (server and client implementations, including the shell and the UI) for: + +* Streams (requires Skipper) + +* Tasks + +* Task Scheduler + +One can enable and disable these features by setting the following boolean properties when launching the Data Flow server: + +* `spring.cloud.dataflow.features.streams-enabled` + +* `spring.cloud.dataflow.features.tasks-enabled` + +* `spring.cloud.dataflow.features.schedules-enabled` + +By default, stream (requires Skipper), and tasks are enabled and Task Scheduler is disabled by default. + +The REST `/about` endpoint provides information on the features that have been enabled and disabled. + +### 10.2. Database + +A relational database is used to store stream and task definitions as well as the state of executed tasks. +Spring Cloud Data Flow provides schemas for **H2**, **MySQL**, **Oracle**, **PostgreSQL**, **Db2**, and **SQL Server**. The schema is automatically created when the server starts. + +By default, Spring Cloud Data Flow offers an embedded instance of the **H2** database. The **H2** database is good +for development purposes but is not recommended for production use. + +| |**H2** database is not supported as an external mode.| +|---|-----------------------------------------------------| + +The JDBC drivers for **MySQL** (through the MariaDB driver), **PostgreSQL**, **SQL Server**, and embedded **H2** are available without additional configuration. +If you are using any other database, then you need to put the corresponding JDBC driver jar on the classpath of the server. + +The database properties can be passed as environment variables or command-line arguments to the Data Flow Server. + +#### 10.2.1. MySQL + +The following example shows how to define a MySQL database connection using MariaDB driver. + +``` +java -jar spring-cloud-dataflow-server/target/spring-cloud-dataflow-server-2.9.2.jar \ + --spring.datasource.url=jdbc:mysql://localhost:3306/mydb \ + --spring.datasource.username= \ + --spring.datasource.password= \ + --spring.datasource.driver-class-name=org.mariadb.jdbc.Driver +``` + +MySQL versions up to *5.7* can be used with a MariaDB driver. Starting from version *8.0* MySQL’s own driver has to be used. + +``` +java -jar spring-cloud-dataflow-server/target/spring-cloud-dataflow-server-2.9.2.jar \ + --spring.datasource.url=jdbc:mysql://localhost:3306/mydb \ + --spring.datasource.username= \ + --spring.datasource.password= \ + --spring.datasource.driver-class-name=com.mysql.jdbc.Driver +``` + +| |Due to licensing restrictions we’re unable to bundle MySQL driver. You need to add it to
server’s classpath yourself.| +|---|-------------------------------------------------------------------------------------------------------------------------| + +#### 10.2.2. MariaDB + +The following example shows how to define a MariaDB database connection with command Line arguments + +``` +java -jar spring-cloud-dataflow-server/target/spring-cloud-dataflow-server-2.9.2.jar \ + --spring.datasource.url=jdbc:mariadb://localhost:3306/mydb?useMysqlMetadata=true \ + --spring.datasource.username= \ + --spring.datasource.password= \ + --spring.datasource.driver-class-name=org.mariadb.jdbc.Driver +``` + +Starting with MariaDB v2.4.1 connector release, it is required to also add `useMysqlMetadata=true`to the JDBC URL. This is a required workaround until when MySQL and MariaDB entirely switch as two +different databases. + +MariaDB version *10.3* introduced a support for real database sequences which is yet another breaking +change while toolings around these databases fully support MySQL and MariaDB as a separate database +types. Workaround is to use older hibernate dialect which doesn’t try to use sequences. + +``` +java -jar spring-cloud-dataflow-server/target/spring-cloud-dataflow-server-2.9.2.jar \ + --spring.datasource.url=jdbc:mariadb://localhost:3306/mydb?useMysqlMetadata=true \ + --spring.datasource.username= \ + --spring.datasource.password= \ + --spring.jpa.properties.hibernate.dialect=org.hibernate.dialect.MariaDB102Dialect \ + --spring.datasource.driver-class-name=org.mariadb.jdbc.Driver +``` + +#### 10.2.3. PostgreSQL + +The following example shows how to define a PostgreSQL database connection with command line arguments: + +``` +java -jar spring-cloud-dataflow-server/target/spring-cloud-dataflow-server-2.9.2.jar \ + --spring.datasource.url=jdbc:postgresql://localhost:5432/mydb \ + --spring.datasource.username= \ + --spring.datasource.password= \ + --spring.datasource.driver-class-name=org.postgresql.Driver +``` + +#### 10.2.4. SQL Server + +The following example shows how to define a SQL Server database connection with command line arguments: + +``` +java -jar spring-cloud-dataflow-server/target/spring-cloud-dataflow-server-2.9.2.jar \ + --spring.datasource.url='jdbc:sqlserver://localhost:1433;databaseName=mydb' \ + --spring.datasource.username= \ + --spring.datasource.password= \ + --spring.datasource.driver-class-name=com.microsoft.sqlserver.jdbc.SQLServerDriver +``` + +#### 10.2.5. Db2 + +The following example shows how to define a Db2 database connection with command line arguments: + +``` +java -jar spring-cloud-dataflow-server/target/spring-cloud-dataflow-server-2.9.2.jar \ + --spring.datasource.url=jdbc:db2://localhost:50000/mydb \ + --spring.datasource.username= \ + --spring.datasource.password= \ + --spring.datasource.driver-class-name=com.ibm.db2.jcc.DB2Driver +``` + +| |Due to licensing restrictions we’re unable to bundle Db2 driver. You need to add it to
server’s classpath yourself.| +|---|-----------------------------------------------------------------------------------------------------------------------| + +#### 10.2.6. Oracle + +The following example shows how to define a Oracle database connection with command line arguments: + +``` +java -jar spring-cloud-dataflow-server/target/spring-cloud-dataflow-server-2.9.2.jar \ + --spring.datasource.url=jdbc:oracle:thin:@localhost:1521/MYDB \ + --spring.datasource.username= \ + --spring.datasource.password= \ + --spring.datasource.driver-class-name=oracle.jdbc.OracleDriver +``` + +| |Due to licensing restrictions we’re unable to bundle Oracle driver. You need to add it to
server’s classpath yourself.| +|---|--------------------------------------------------------------------------------------------------------------------------| + +#### 10.2.7. Adding a Custom JDBC Driver + +To add a custom driver for the database (for example, Oracle), you should rebuild the Data Flow Server and add the dependency to the Maven `pom.xml` file. +You need to modify the maven `pom.xml` of `spring-cloud-dataflow-server` module. +There are GA release tags in GitHub repository, so you can switch to desired GA tags to add the drivers on the production-ready codebase. + +To add a custom JDBC driver dependency for the Spring Cloud Data Flow server: + +1. Select the tag that corresponds to the version of the server you want to rebuild and clone the github repository. + +2. Edit the spring-cloud-dataflow-server/pom.xml and, in the `dependencies` section, add the dependency for the database driver required. In the following example , an Oracle driver has been chosen: + +``` + +... + + com.oracle.jdbc + ojdbc8 + 12.2.0.1 + +... + +``` + +1. Build the application as described in [Building Spring Cloud Data Flow](#building) + +You can also provide default values when rebuilding the server by adding the necessary properties to the dataflow-server.yml file, +as shown in the following example for PostgreSQL: + +``` +spring: + datasource: + url: jdbc:postgresql://localhost:5432/mydb + username: myuser + password: mypass + driver-class-name:org.postgresql.Driver +``` + +1. Alternatively, you can build a custom Spring Cloud Data Flow server with your build files. + There are examples of a custom server builds in our [samples repo](https://github.com/spring-cloud/spring-cloud-dataflow-samples/tree/master/custom-dataflow-builds) if there is a need to add a driver jars. + +#### 10.2.8. Schema Handling + +On default database schema is managed with *Flyway* which is convenient if it’s +possible to give enough permissions to a database user. + +Here’s a description what happens when *Skipper* server is started: + +* Flyway checks if `flyway_schema_history` table exists. + +* Does a baseline(to version 1) if schema is not empty as *Dataflow* tables + may be in place if a shared DB is used. + +* If schema is empty, flyway assumes to start from a scratch. + +* Goes through all needed schema migrations. + +Here’s a description what happens when *Dataflow* server is started: + +* Flyway checks if `flyway_schema_history_dataflow` table exists. + +* Does a baseline(to version 1) if schema is not empty as *Skipper* tables + may be in place if a shared DB is used. + +* If schema is empty, flyway assumes to start from a scratch. + +* Goes through all needed schema migrations. + +* Due to historical reasons, if we detect that schema is from *1.7.x* line + we convert these to structures needed from *2.0.x* onwards and fully + continue with flyway. + +| |We have schema ddl’s in our source code[schemas](https://github.com/spring-cloud/spring-cloud-dataflow/tree/master/spring-cloud-dataflow-server-core/src/main/resources/schemas)which can be used manually if *Flyway* is disabled by using configuration`spring.flyway.enabled=false`. This is a good option if company’s databases
are restricted and i.e. applications itself cannot create schemas.| +|---|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +### 10.3. Deployer Properties + +You can use the following configuration properties of the [Local deployer](https://github.com/spring-cloud/spring-cloud-deployer-local) to customize how Streams and Tasks are deployed. +When deploying using the Data Flow shell, you can use the syntax `deployer..local.`. See below for an example shell usage. +These properties are also used when configuring [Local Task Platforms](#configuration-local-tasks) in the Data Flow server and local platforms in Skipper for deploying Streams. + +|Deployer Property Name| Description | Default Value | +|----------------------|--------------------------------------------------------------------------------------------------------|-----------------------------------------------------------------------------------------------------------------------------------------------------| +|workingDirectoriesRoot| Directory in which all created processes will run and create log files. | java.io.tmpdir | +| envVarsToInherit |Array of regular expression patterns for environment variables that are passed to launched applications.|\<"TMP", "LANG", "LANGUAGE", "LC\_.\*", "PATH", "SPRING\_APPLICATION\_JSON"\> on windows and \<"TMP", "LANG", "LANGUAGE", "LC\_.\*", "PATH"\> on Unix| +| deleteFilesOnExit | Whether to delete created files and directories on JVM exit. | true | +| javaCmd | Command to run java | java | +| shutdownTimeout | Max number of seconds to wait for app shutdown. | 30 | +| javaOpts | The Java Options to pass to the JVM, e.g -Dtest=foo | \ | +| inheritLogging | allow logging to be redirected to the output stream of the process that triggered child process. | false | +| debugPort | Port for remote debugging | \ | + +As an example, to set Java options for the time application in the `ticktock` stream, use the following stream deployment properties. + +``` +dataflow:> stream create --name ticktock --definition "time --server.port=9000 | log" +dataflow:> stream deploy --name ticktock --properties "deployer.time.local.javaOpts=-Xmx2048m -Dtest=foo" +``` + +As a convenience, you can set the `deployer.memory` property to set the Java option `-Xmx`, as shown in the following example: + +``` +dataflow:> stream deploy --name ticktock --properties "deployer.time.memory=2048m" +``` + +At deployment time, if you specify an `-Xmx` option in the `deployer..local.javaOpts` property in addition to a value of the `deployer..local.memory` option, the value in the `javaOpts` property has precedence. Also, the `javaOpts` property set when deploying the application has precedence over the Data Flow Server’s `spring.cloud.deployer.local.javaOpts` property. + +### 10.4. Logging + +Spring Cloud Data Flow `local` server is automatically configured to use `RollingFileAppender` for logging. +The logging configuration is located on the classpath contained in a file named `logback-spring.xml`. + +By default, the log file is configured to use: + +``` + +``` + +with the logback configuration for the `RollingPolicy`: + +``` + + ${LOG_FILE}.log + + + ${LOG_FILE}.${LOG_FILE_ROLLING_FILE_NAME_PATTERN:-%d{yyyy-MM-dd}}.%i.gz + ${LOG_FILE_MAX_SIZE:-100MB} + ${LOG_FILE_MAX_HISTORY:-30} + ${LOG_FILE_TOTAL_SIZE_CAP:-500MB} + + + ${FILE_LOG_PATTERN} + + +``` + +To check the `java.io.tmpdir` for the current Spring Cloud Data Flow Server `local` server, + +``` +jinfo | grep "java.io.tmpdir" +``` + +If you want to change or override any of the properties `LOG_FILE`, `LOG_PATH`, `LOG_TEMP`, `LOG_FILE_MAX_SIZE`, `LOG_FILE_MAX_HISTORY` and `LOG_FILE_TOTAL_SIZE_CAP`, please set them as system properties. + +### 10.5. Streams + +Data Flow Server delegates to the Skipper server the management of the Stream’s lifecycle. Set the configuration property `spring.cloud.skipper.client.serverUri` to the location of Skipper, e.g. + +``` +$ java -jar spring-cloud-dataflow-server-2.9.2.jar --spring.cloud.skipper.client.serverUri=https://192.51.100.1:7577/api +``` + +The configuration of show streams are deployed and to which platforms, is done by configuration of `platform accounts` on the Skipper server. +See the documentation on [platforms](https://docs.spring.io/spring-cloud-skipper/docs/current/reference/htmlsingle/#platforms) for more information. + +### 10.6. Tasks + +The Data Flow server is responsible for deploying Tasks. +Tasks that are launched by Data Flow write their state to the same database that is used by the Data Flow server. +For Tasks which are Spring Batch Jobs, the job and step execution data is also stored in this database. +As with streams launched by Skipper, Tasks can be launched to multiple platforms. +If no platform is defined, a platform named `default` is created using the default values of the class [LocalDeployerProperties](https://github.com/spring-cloud/spring-cloud-deployer-local/blob/master/spring-cloud-deployer-local/src/main/java/org/springframework/cloud/deployer/spi/local/LocalDeployerProperties.java), which is summarized in the table [Local Deployer Properties](#configuration-local-deployer) + +To configure new platform accounts for the local platform, provide an entry under the `spring.cloud.dataflow.task.platform.local` section in your `application.yaml` file for via another Spring Boot supported mechanism. +In the following example, two local platform accounts named `localDev` and `localDevDebug` are created. +The keys such as `shutdownTimeout` and `javaOpts` are local deployer properties. + +``` +spring: + cloud: + dataflow: + task: + platform: + local: + accounts: + localDev: + shutdownTimeout: 60 + javaOpts: "-Dtest=foo -Xmx1024m" + localDevDebug: + javaOpts: "-Xdebug -Xmx2048m" +``` + +| |By defining one platform as `default` allows you to skip using `platformName` where its use would otherwise be required.| +|---|------------------------------------------------------------------------------------------------------------------------| + +When launching a task, pass the value of the platform account name using the task launch option `--platformName` If you do not pass a value for `platformName`, the value `default` will be used. + +| |When deploying a task to multiple platforms, the configuration of the task needs to connect to the same database as the Data Flow Server.| +|---|-----------------------------------------------------------------------------------------------------------------------------------------| + +You can configure the Data Flow server that is running locally to deploy tasks to Cloud Foundry or Kubernetes. See the sections on [Cloud Foundry Task Platform Configuration](#configuration-cloudfoundry-tasks) and [Kubernetes Task Platform Configuration](#configuration-kubernetes-tasks) for more information. + +Detailed examples for launching and scheduling tasks across multiple platforms, are available in this section [Multiple Platform Support for Tasks](https://dataflow.spring.io/docs/recipes/multi-platform-deployment/) on [dataflow.spring.io](http://dataflow.spring.io). + +##### Start Skipper + +``` +git clone https://github.com/spring-cloud/spring-cloud-skipper.git +cd spring-cloud/spring-cloud-skipper +./mvnw clean package -DskipTests=true +java -jar spring-cloud-skipper-server/target/spring-cloud-skipper-server-2.2.0.BUILD-SNAPSHOT.jar +``` + +##### Start Spring Cloud Data Flow + +``` +git clone https://github.com/spring-cloud/spring-cloud-dataflow.git +cd spring-cloud-dataflow +./mvnw clean package -DskipTests=true +cd .. +``` + +Create a yaml file scdf.yml with the following contents: + +``` +spring: + cloud: + dataflow: + security: + authorization: + provider-role-mappings: + uaa: + map-oauth-scopes: true + role-mappings: + ROLE_CREATE: foo.create + ROLE_DEPLOY: foo.create + ROLE_DESTROY: foo.create + ROLE_MANAGE: foo.create + ROLE_MODIFY: foo.create + ROLE_SCHEDULE: foo.create + ROLE_VIEW: foo.view + security: + oauth2: + client: + registration: + uaa: + redirect-uri: '{baseUrl}/login/oauth2/code/{registrationId}' + authorization-grant-type: authorization_code + client-id: dataflow + client-secret: dataflow + scope: (1) + - openid + - foo.create + - foo.view + provider: + uaa: + jwk-set-uri: http://uaa:8080/uaa/token_keys + token-uri: http://uaa:8080/uaa/oauth/token + user-info-uri: http://uaa:8080/uaa/userinfo (2) + user-name-attribute: user_name + authorization-uri: http://uaa:8080/uaa/oauth/authorize + resourceserver: + opaquetoken: (3) + introspection-uri: http://uaa:8080/uaa/introspect + client-id: dataflow + client-secret: dataflow +``` + +|**1**|If you use scopes to identify roles, please make sure to also request
the relevant scopes, e.g `dataflow.view`, `dataflow.create` and don’t forget to request the `openid` scope| +|-----|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +|**2**| Used to retrieve profile information, e.g. username for display purposes (mandatory) | +|**3**| Used for token introspection and validation (mandatory) | + +The `introspection-uri` property is especially important when passing an externally retrieved (opaque) +OAuth Access Token to Spring Cloud Data Flow. In that case Spring Cloud Data Flow will take the OAuth Access, +and use the UAA’s [Introspect Token Endpoint](https://docs.cloudfoundry.org/api/uaa/version/74.4.0/index.html#introspect-token)to not only check the validity of the token but also retrieve the associated OAuth scopes from the UAA + +Finally startup Spring Cloud Data Flow: + +``` +java -jar spring-cloud-dataflow/spring-cloud-dataflow-server/target/spring-cloud-dataflow-server-2.4.0.BUILD-SNAPSHOT.jar --spring.config.additional-location=scdf.yml +``` + +##### Role Mappings + +By default all roles are assigned to users that login to Spring Cloud Data Flow. +However, you can set the property: + +`spring.cloud.dataflow.security.authorization.provider-role-mappings.uaa.map-oauth-scopes: true` + +This will instruct the underlying `DefaultAuthoritiesExtractor` to map +OAuth scopes to the respective authorities. The following scopes are supported: + +* Scope `dataflow.create` maps to the `CREATE` role + +* Scope `dataflow.deploy` maps to the `DEPLOY` role + +* Scope `dataflow.destroy` maps to the `DESTROY` role + +* Scope `dataflow.manage` maps to the `MANAGE` role + +* Scope `dataflow.modify` maps to the `MODIFY` role + +* Scope `dataflow.schedule` maps to the `SCHEDULE` role + +* Scope `dataflow.view` maps to the `VIEW` role + +Additionally you can also map arbitrary scopes to each of the Data Flow roles: + +``` +spring: + cloud: + dataflow: + security: + authorization: + provider-role-mappings: + uaa: + map-oauth-scopes: true (1) + role-mappings: + ROLE_CREATE: dataflow.create (2) + ROLE_DEPLOY: dataflow.deploy + ROLE_DESTROY: dataflow.destoy + ROLE_MANAGE: dataflow.manage + ROLE_MODIFY: dataflow.modify + ROLE_SCHEDULE: dataflow.schedule + ROLE_VIEW: dataflow.view +``` + +|**1**| Enables explicit mapping support from OAuth scopes to Data Flow roles | +|-----|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +|**2**|When role mapping support is enabled, you must provide a mapping for
all 7 Spring Cloud Data Flow roles **ROLE\_CREATE**, **ROLE\_DEPLOY**, **ROLE\_DESTROY**, **ROLE\_MANAGE**, **ROLE\_MODIFY**, **ROLE\_SCHEDULE**, **ROLE\_VIEW**.| + +| |You can assign an OAuth scope to multiple Spring Cloud Data Flow roles, giving you flexible regarding the granularity of your authorization configuration.| +|---|----------------------------------------------------------------------------------------------------------------------------------------------------------| + +#### 10.6.4. LDAP Authentication + +LDAP Authentication (Lightweight Directory Access Protocol) is indirectly +provided by Spring Cloud Data Flow using the UAA. The UAA itself provides[comprehensive LDAP support](https://github.com/cloudfoundry/uaa/blob/develop/docs/UAA-LDAP.md). + +| |While you may use your own OAuth2 authentication server, the LDAP support
documented here requires using the UAA as authentication server. For any
other provider, please consult the documentation for that particular provider.| +|---|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +The UAA supports authentication against an LDAP (Lightweight Directory Access Protocol) +server using the following modes: + +* [Direct bind](https://github.com/cloudfoundry/uaa/blob/develop/docs/UAA-LDAP.md#ldap-search-and-bind) + +* [Search and bind](https://github.com/cloudfoundry/uaa/blob/develop/docs/UAA-LDAP.md#ldap-bind) + +* [Search and Compare](https://github.com/cloudfoundry/uaa/blob/develop/docs/UAA-LDAP.md#ldap-search-and-compare) + +| |When integrating with an external identity provider such as LDAP, authentication
within the UAA becomes **chained**. UAA first attempts to authenticate with
a user’s credentials against the UAA user store before the external provider,
LDAP. For more information, see[Chained Authentication](https://github.com/cloudfoundry/uaa/blob/develop/docs/UAA-LDAP.md#chained-authentication)in the *User Account and Authentication LDAP Integration* GitHub documentation.| +|---|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +##### LDAP Role Mapping + +The OAuth2 authentication server (UAA), provides comprehensive support +for [mapping LDAP groups to OAuth scopes](https://github.com/cloudfoundry/uaa/blob/develop/docs/UAA-LDAP.md#scopes). + +The following options exist: + +* `ldap/ldap-groups-null.xml` No groups will be mapped + +* `ldap/ldap-groups-as-scopes.xml` Group names will be retrieved from an LDAP attribute. E.g. `CN` + +* `ldap/ldap-groups-map-to-scopes.xml` Groups will be mapped to UAA groups using the external\_group\_mapping table + +These values are specified via the configuration property `ldap.groups.file controls`. Under the covers +these values reference a Spring XML configuration file. + +| |During test and development it might be necessary to make frequent changes
to LDAP groups and users and see those reflected in the UAA. However, user
information is cached for the duration of the login. The following script
helps to retrieve the updated information quickly:

```
#!/bin/bash
uaac token delete --all
uaac target http://localhost:8080/uaa
uaac token owner get cf -s "" -p
uaac token client get admin -s adminsecret
uaac user get
```| +|---|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +##### LDAP Security and UAA Example Application + +In order to get up and running quickly and to help you understand the security architecture, we +provide the [LDAP Security and UAA Example](https://github.com/spring-cloud/spring-cloud-dataflow-samples/tree/master/security-ldap-uaa-example)on GitHub. + +| |This is solely a demo/example application and shall not be used in production.| +|---|------------------------------------------------------------------------------| + +The setup consists of: + +* Spring Cloud Data Flow Server + +* Skipper Server + +* CloudFoundry User Account and Authentication (UAA) Server + +* Lightweight Directory Access Protocol (LDAP) Server (provided by [Apache Directory Server](https://directory.apache.org/) (ApacheDS)) + +Ultimately, as part of this example, you will learn how to configure and launch +a Composed Task using this security setup. + +#### 10.6.5. Spring Security OAuth2 Resource/Authorization Server Sample + +For local testing and development, you may also use the Resource and Authorization +Server support provided by[Spring Security OAuth](https://projects.spring.io/spring-security-oauth/). It +allows you to easily create your own (very basic) OAuth2 Server with the following simple annotations: + +* `@EnableResourceServer` + +* `@EnableAuthorizationServer` + +| |In fact the UAA uses Spring Security OAuth2 under the covers, thus the basic endpoints
are the same.| +|---|--------------------------------------------------------------------------------------------------------| + +A working example application can be found at:[https://github.com/ghillert/oauth-test-server/](https://github.com/ghillert/oauth-test-server/) + +Clone the project and configure Spring Cloud Data Flow with the respective Client ID and Client Secret: + +``` +security: + oauth2: + client: + client-id: myclient + client-secret: mysecret + access-token-uri: http://127.0.0.1:9999/oauth/token + user-authorization-uri: http://127.0.0.1:9999/oauth/authorize + resource: + user-info-uri: http://127.0.0.1:9999/me + token-info-uri: http://127.0.0.1:9999/oauth/check_token +``` + +| |This sample application is not intended for production use| +|---|----------------------------------------------------------| + +#### 10.6.6. Data Flow Shell Authentication + +When using the Shell, the credentials can either be provided via username and password +or by specifying a *credentials-provider* command. If your OAuth2 provider supports +the *Password* Grant Type you can start the *Data Flow Shell* with: + +``` +$ java -jar spring-cloud-dataflow-shell-2.9.2.jar \ + --dataflow.uri=http://localhost:9393 \ (1) + --dataflow.username=my_username \ (2) + --dataflow.password=my_password \ (3) + --skip-ssl-validation true \ (4) +``` + +|**1**| Optional, defaults to [localhost:9393](http://localhost:9393). | +|-----|----------------------------------------------------------------------------------------------------------------| +|**2**| Mandatory. | +|**3**| If the password is not provided, the user is prompted for it. | +|**4**|Optional, defaults to `false`, ignores certificate errors (when using self-signed certificates). Use cautiously!| + +| |Keep in mind that when authentication for Spring Cloud Data Flow is enabled,
the underlying OAuth2 provider **must** support the *Password* OAuth2 Grant Type
if you want to use the Shell via username/password authentication.| +|---|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +From within the Data Flow Shell you can also provide credentials by using the following command: + +``` +server-unknown:>dataflow config server \ + --uri http://localhost:9393 \ (1) + --username myuser \ (2) + --password mysecret \ (3) + --skip-ssl-validation true \ (4) +``` + +|**1**| Optional, defaults to [localhost:9393](http://localhost:9393). | +|-----|-------------------------------------------------------------------------------------------| +|**2**| Mandatory.. | +|**3**| If security is enabled, and the password is not provided, the user is prompted for it. | +|**4**|Optional, ignores certificate errors (when using self-signed certificates). Use cautiously!| + +The following image shows a typical shell command to connect to and authenticate a Data +Flow Server: + +![Target and Authenticate with the Data Flow Server from within the Shell](https://raw.githubusercontent.com/spring-cloud/spring-cloud-dataflow/master/spring-cloud-dataflow-docs/src/main/asciidoc/images/dataflow-security-shell-target.png) + +Figure 1. Target and Authenticate with the Data Flow Server from within the Shell + +Once successfully targeted, you should see the following output: + +``` +dataflow:>dataflow config info +dataflow config info + +╔═══════════╤═══════════════════════════════════════╗ +║Credentials│[username='my_username, password=****']║ +╠═══════════╪═══════════════════════════════════════╣ +║Result │ ║ +║Target │http://localhost:9393 ║ +╚═══════════╧═══════════════════════════════════════╝ +``` + +Alternatively, you can specify the *credentials-provider* command in order to +pass-in a bearer token directly, instead of providing a username and password. +This works from within the shell or by providing the`--dataflow.credentials-provider-command` command-line argument when starting the Shell. + +| |When using the *credentials-provider* command, please be aware that your
specified command **must** return a *Bearer token* (Access Token prefixed with *Bearer*).
For instance, in Unix environments the following simplistic command can be used:

```
$ java -jar spring-cloud-dataflow-shell-2.9.2.jar \
--dataflow.uri=http://localhost:9393 \
--dataflow.credentials-provider-command="echo Bearer 123456789"
```| +|---|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +### 10.7. About Configuration + +The Spring Cloud Data Flow About Restful API result contains a display name, +version, and, if specified, a URL for each of the major dependencies that +comprise Spring Cloud Data Flow. The result (if enabled) also contains the +sha1 and or sha256 checksum values for the shell dependency. The information +that is returned for each of the dependencies is configurable by setting the following +properties: + +* spring.cloud.dataflow.version-info.spring-cloud-dataflow-core.name: the + name to be used for the core. + +* spring.cloud.dataflow.version-info.spring-cloud-dataflow-core.version: + the version to be used for the core. + +* spring.cloud.dataflow.version-info.spring-cloud-dataflow-dashboard.name: the + name to be used for the dashboard. + +* spring.cloud.dataflow.version-info.spring-cloud-dataflow-dashboard.version: + the version to be used for the dashboard. + +* spring.cloud.dataflow.version-info.spring-cloud-dataflow-implementation.name: the + name to be used for the implementation. + +* spring.cloud.dataflow.version-info.spring-cloud-dataflow-implementation.version: + the version to be used for the implementation. + +* spring.cloud.dataflow.version-info.spring-cloud-dataflow-shell.name: the + name to be used for the shell. + +* spring.cloud.dataflow.version-info.spring-cloud-dataflow-shell.version: + the version to be used for the shell. + +* spring.cloud.dataflow.version-info.spring-cloud-dataflow-shell.url: + the URL to be used for downloading the shell dependency. + +* spring.cloud.dataflow.version-info.spring-cloud-dataflow-shell.checksum-sha1: the sha1 + checksum value that is returned with the shell dependency info. + +* spring.cloud.dataflow.version-info.spring-cloud-dataflow-shell.checksum-sha256: + the sha256 checksum value that is returned with the shell dependency info. + +* spring.cloud.dataflow.version-info.spring-cloud-dataflow-shell.checksum-sha1-url: + if the `spring.cloud.dataflow.version-info.spring-cloud-dataflow-shell.checksum-sha1`is not specified, SCDF uses the contents of the file specified at this URL for the checksum. + +* spring.cloud.dataflow.version-info.spring-cloud-dataflow-shell.checksum-sha256-url: + if the `spring.cloud.dataflow.version-info.spring-cloud-dataflow-shell.checksum-sha256`is not specified, SCDF uses the contents of the file specified at this URL for the checksum. + +#### 10.7.1. Enabling Shell Checksum values + +By default, checksum values are not displayed for the shell dependency. If +you need this feature enabled, set the`spring.cloud.dataflow.version-info.dependency-fetch.enabled` property to true. + +#### 10.7.2. Reserved Values for URLs + +There are reserved values (surrounded by curly braces) that you can insert into +the URL that will make sure that the links are up to date: + +* repository: if using a build-snapshot, milestone, or release candidate of + Data Flow, the repository refers to the repo-spring-io repository. Otherwise, it + refers to Maven Central. + +* version: Inserts the version of the jar/pom. + +For example,`[myrepository/org/springframework/cloud/spring-cloud-dataflow-shell/{version}/spring-cloud-dataflow-shell-{version}.jar](https://myrepository/org/springframework/cloud/spring-cloud-dataflow-shell/{version}/spring-cloud-dataflow-shell-{version}.jar)`produces`[myrepository/org/springframework/cloud/spring-cloud-dataflow-shell/1.2.3.RELEASE/spring-cloud-dataflow-shell-1.2.3.RELEASE.jar](https://myrepository/org/springframework/cloud/spring-cloud-dataflow-shell/1.2.3.RELEASE/spring-cloud-dataflow-shell-1.2.3.RELEASE.jar)`if you were using the 1.2.3.RELEASE version of the Spring Cloud Data Flow Shell + +## 11. Configuration - Cloud Foundry + +This section describes how to configure Spring Cloud Data Flow server’s features, such as security and which relational database to use. +It also describes how to configure Spring Cloud Data Flow shell’s features. + +### 11.1. Feature Toggles + +Data Flow server offers a specific set of features that you can enable or disable when launching. These features include all the lifecycle operations and REST endpoints (server, client implementations including Shell and the UI) for: + +* Streams + +* Tasks + +You can enable or disable these features by setting the following boolean properties when you launch the Data Flow server: + +* `spring.cloud.dataflow.features.streams-enabled` + +* `spring.cloud.dataflow.features.tasks-enabled` + +By default, all features are enabled. + +The REST endpoint (`/features`) provides information on the enabled and disabled features. + +### 11.2. Deployer Properties + +You can use the following configuration properties of the Data Flow server’s [Cloud Foundry deployer](https://github.com/spring-cloud/spring-cloud-deployer-cloudfoundry) to customize how applications are deployed. +When deploying with the Data Flow shell, you can use the syntax `deployer..cloudfoundry.`. See below for an example shell usage. +These properties are also used when configuring the [Cloud Foundry Task platforms](#configuration-cloudfoundry-tasks) in the Data Flow server and and Kubernetes platforms in Skipper for deploying Streams. + +| Deployer Property Name | Description | Default Value | +|-------------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| services | The names of services to bind to the deployed application. | \ | +| host | The host name to use as part of the route. | hostname derived by Cloud Foundry | +| domain | The domain to use when mapping routes for the application. | \ | +| routes | The list of routes that the application should be bound to. Mutually exclusive with host and domain. | \ | +| buildpack | The buildpack to use for deploying the application. Deprecated use buildpacks. | [github.com/cloudfoundry/java-buildpack.git#v4.29.1](https://github.com/cloudfoundry/java-buildpack.git#v4.29.1) | +| buildpacks | The list of buildpacks to use for deploying the application. | [github.com/cloudfoundry/java-buildpack.git#v4.29.1](https://github.com/cloudfoundry/java-buildpack.git#v4.29.1) | +| memory | The amount of memory to allocate. Default unit is mebibytes, 'M' and 'G" suffixes supported | 1024m | +| disk | The amount of disk space to allocate. Default unit is mebibytes, 'M' and 'G" suffixes supported. | 1024m | +| healthCheck | The type of health check to perform on deployed application. Values can be HTTP, NONE, PROCESS, and PORT | PORT | +| healthCheckHttpEndpoint | The path that the http health check will use, | /health | +| healthCheckTimeout | The timeout value for health checks in seconds. | 120 | +| instances | The number of instances to run. | 1 | +|enableRandomAppNamePrefix| Flag to enable prefixing the app name with a random prefix. | true | +| apiTimeout | Timeout for blocking API calls, in seconds. | 360 | +| statusTimeout | Timeout for status API operations in milliseconds | 5000 | +|useSpringApplicationJson | Flag to indicate whether application properties are fed into `SPRING_APPLICATION_JSON` or as separate environment variables. | true | +| stagingTimeout | Timeout allocated for staging the application. | 15 minutes | +| startupTimeout | Timeout allocated for starting the application. | 5 minutes | +| appNamePrefix | String to use as prefix for name of deployed application | The Spring Boot property `spring.application.name` of the application that is using the deployer library. | +| deleteRoutes | Whether to also delete routes when un-deploying an application. | true | +| javaOpts | The Java Options to pass to the JVM, e.g -Dtest=foo | \ | +| pushTasksEnabled | Whether to push task applications or assume that the application already exists when launched. | true | +|autoDeleteMavenArtifacts | Whether to automatically delete Maven artifacts from the local repository when deployed. | true | +| env.\ |Defines a top level environment variable. This is useful for customizing [Java build pack configuration](https://github.com/cloudfoundry/java-buildpack#configuration-and-extension) which must be included as top level environment variables in the application manifest, as the Java build pack does not recognize `SPRING_APPLICATION_JSON`.|The deployer determines if the app has [Java CfEnv](https://github.com/pivotal-cf/java-cfenv) in its classpath. If so, it applies the required [configuration](https://github.com/pivotal-cf/java-cfenv#pushing-your-application-to-cloud-foundry).| + +Here are some examples using the Cloud Foundry deployment properties: + +* You can set the buildpack that is used to deploy each application. For example, to use the Java offline buildback, + set the following environment variable: + +``` +cf set-env dataflow-server SPRING_CLOUD_DATAFLOW_TASK_PLATFORM_CLOUDFOUNDRY_ACCOUNTS[default]_DEPLOYMENT_BUILDPACKS java_buildpack_offline +``` + +* Setting `buildpack` is now deprecated in favour of `buildpacks` which allows you to pass on more than one if needed. More about this can be found from [How Buildpacks Work](https://docs.cloudfoundry.org/buildpacks/understand-buildpacks.html). + +* You can customize the health check mechanism used by Cloud Foundry to assert whether apps are running by using the `SPRING_CLOUD_DATAFLOW_TASK_PLATFORM_CLOUDFOUNDRY_ACCOUNTS[default]_DEPLOYMENT_HEALTH_CHECK` environment variable. The current supported options + are `http` (the default), `port`, and `none`. + +You can also set environment variables that specify the HTTP-based health check endpoint and timeout: `SPRING_CLOUD_DATAFLOW_TASK_PLATFORM_CLOUDFOUNDRY_ACCOUNTS[default]_DEPLOYMENT_HEALTH_CHECK_ENDPOINT` and `SPRING_CLOUD_DATAFLOW_TASK_PLATFORM_CLOUDFOUNDRY_ACCOUNTS[default]_DEPLOYMENT_HEALTH_CHECK_TIMEOUT`, respectively. These default to `/health` (the Spring Boot default location) and `120` seconds. + +* You can also specify deployment properties by using the DSL. For instance, if you want to set the allocated memory for the `http` application to 512m and also bind a mysql service to the `jdbc` application, you can run the following commands: + +``` +dataflow:> stream create --name mysqlstream --definition "http | jdbc --tableName=names --columns=name" +dataflow:> stream deploy --name mysqlstream --properties "deployer.http.memory=512, deployer.jdbc.cloudfoundry.services=mysql" +``` + +| |You can configure these settings separately for stream and task apps. To alter settings for tasks,
substitute `TASK` for `STREAM` in the property name, as the following example shows:

```
cf set-env dataflow-server SPRING_CLOUD_DATAFLOW_TASK_PLATFORM_CLOUDFOUNDRY_ACCOUNTS[default]_DEPLOYMENT_MEMORY 512
```| +|---|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +### 11.3. Tasks + +The Data Flow server is responsible for deploying Tasks. +Tasks that are launched by Data Flow write their state to the same database that is used by the Data Flow server. +For Tasks which are Spring Batch Jobs, the job and step execution data is also stored in this database. +As with Skipper, Tasks can be launched to multiple platforms. +When Data Flow is running on Cloud Foundry, a Task platfom must be defined. +To configure new platform accounts that target Cloud Foundry, provide an entry under the `spring.cloud.dataflow.task.platform.cloudfoundry` section in your `application.yaml` file for via another Spring Boot supported mechanism. +In the following example, two Cloud Foundry platform accounts named `dev` and `qa` are created. +The keys such as `memory` and `disk` are [Cloud Foundry Deployer Properties](#configuration-cloudfoundry-deployer). + +``` +spring: + cloud: + dataflow: + task: + platform: + cloudfoundry: + accounts: + dev: + connection: + url: https://api.run.pivotal.io + org: myOrg + space: mySpace + domain: cfapps.io + username: [email protected] + password: drowssap + skipSslValidation: false + deployment: + memory: 512m + disk: 2048m + instances: 4 + services: rabbit,mysql + appNamePrefix: dev1 + qa: + connection: + url: https://api.run.pivotal.io + org: myOrgQA + space: mySpaceQA + domain: cfapps.io + username: [email protected] + password: drowssap + skipSslValidation: true + deployment: + memory: 756m + disk: 724m + instances: 2 + services: rabbitQA,mysqlQA + appNamePrefix: qa1 +``` + +| |By defining one platform as `default` allows you to skip using `platformName` where its use would otherwise be required.| +|---|------------------------------------------------------------------------------------------------------------------------| + +When launching a task, pass the value of the platform account name using the task launch option `--platformName` If you do not pass a value for `platformName`, the value `default` will be used. + +| |When deploying a task to multiple platforms, the configuration of the task needs to connect to the same database as the Data Flow Server.| +|---|-----------------------------------------------------------------------------------------------------------------------------------------| + +You can configure the Data Flow server that is on Cloud Foundry to deploy tasks to Cloud Foundry or Kubernetes. See the section on [Kubernetes Task Platform Configuration](#configuration-kubernetes-tasks) for more information. + +Detailed examples for launching and scheduling tasks across multiple platforms, are available in this section [Multiple Platform Support for Tasks](https://dataflow.spring.io/docs/recipes/multi-platform-deployment/) on [dataflow.spring.io](http://dataflow.spring.io). + +### 11.4. Application Names and Prefixes + +To help avoid clashes with routes across spaces in Cloud Foundry, a naming strategy that provides a random prefix to a +deployed application is available and is enabled by default. You can override the [default configurations](https://github.com/spring-cloud/spring-cloud-deployer-cloudfoundry#application-name-settings-and-deployments)and set the respective properties by using `cf set-env` commands. + +For instance, if you want to disable the randomization, you can override it by using the following command: + +``` +cf set-env dataflow-server SPRING_CLOUD_DATAFLOW_TASK_PLATFORM_CLOUDFOUNDRY_ACCOUNTS[default]_DEPLOYMENT_ENABLE_RANDOM_APP_NAME_PREFIX false +``` + +### 11.5. Custom Routes + +As an alternative to a random name or to get even more control over the hostname used by the deployed apps, you can use +custom deployment properties, as the following example shows: + +``` +dataflow:>stream create foo --definition "http | log" + +sdataflow:>stream deploy foo --properties "deployer.http.cloudfoundry.domain=mydomain.com, + deployer.http.cloudfoundry.host=myhost, + deployer.http.cloudfoundry.route-path=my-path" +``` + +The preceding example binds the `http` app to the `[myhost.mydomain.com/my-path](https://myhost.mydomain.com/my-path)` URL. Note that this +example shows **all** of the available customization options. In practice, you can use only one or two out of the three. + +### 11.6. Docker Applications + +Starting with version 1.2, it is possible to register and deploy Docker based apps as part of streams and tasks by using +Data Flow for Cloud Foundry. + +If you use Spring Boot and RabbitMQ-based Docker images, you can provide a common deployment property +to facilitate binding the apps to the RabbitMQ service. Assuming your RabbitMQ service is named `rabbit`, you can provide the following: + +``` +cf set-env dataflow-server SPRING_APPLICATION_JSON '{"spring.cloud.dataflow.applicationProperties.stream.spring.rabbitmq.addresses": "${vcap.services.rabbit.credentials.protocols.amqp.uris}"}' +``` + +For Spring Cloud Task apps, you can use something similar to the following, if you use a database service instance named `mysql`: + +``` +cf set-env SPRING_DATASOURCE_URL '${vcap.services.mysql.credentials.jdbcUrl}' +cf set-env SPRING_DATASOURCE_USERNAME '${vcap.services.mysql.credentials.username}' +cf set-env SPRING_DATASOURCE_PASSWORD '${vcap.services.mysql.credentials.password}' +cf set-env SPRING_DATASOURCE_DRIVER_CLASS_NAME 'org.mariadb.jdbc.Driver' +``` + +For non-Java or non-Boot applications, your Docker app must parse the `VCAP_SERVICES` variable in order to bind to any available services. + +| |Passing application properties

When using non-Boot applications, chances are that you want to pass the application properties by using traditional
environment variables, as opposed to using the special `SPRING_APPLICATION_JSON` variable. To do so, set the
following variables for streams and tasks, respectively:

```
SPRING_CLOUD_DATAFLOW_TASK_PLATFORM_CLOUDFOUNDRY_ACCOUNTS[default]_DEPLOYMENT_USE_SPRING_APPLICATION_JSON=false
```| +|---|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +### 11.7. Application-level Service Bindings + +When deploying streams in Cloud Foundry, you can take advantage of application-specific service bindings, so not all +services are globally configured for all the apps orchestrated by Spring Cloud Data Flow. + +For instance, if you want to provide a `mysql` service binding only for the `jdbc` application in the following stream +definition, you can pass the service binding as a deployment property: + +``` +dataflow:>stream create --name httptojdbc --definition "http | jdbc" +dataflow:>stream deploy --name httptojdbc --properties "deployer.jdbc.cloudfoundry.services=mysqlService" +``` + +where `mysqlService` is the name of the service specifically bound only to the `jdbc` application and the `http`application does not get the binding by this method. + +If you have more than one service to bind, they can be passed as comma-separated items +(for example: `deployer.jdbc.cloudfoundry.services=mysqlService,someService`). + +### 11.8. Configuring Service binding parameters + +The CloudFoundry API supports providing configuration parameters when binding a service instance. Some service brokers require or +recommend binding configuration. +For example, binding the [Google Cloud Platform service](https://docs.pivotal.io/partners/gcp-sb/using.html) using the CF CLI looks something like: + +``` +cf bind-service my-app my-google-bigquery-example -c '{"role":"bigquery.user"}' +``` + +Likewise the [NFS Volume Service](https://docs.cloudfoundry.org/devguide/services/using-vol-services.html) supports binding configuration such as: + +``` +cf bind-service my-app nfs_service_instance -c '{"uid":"1000","gid":"1000","mount":"/var/volume1","readonly":true}' +``` + +Starting with version 2.0, Data Flow for Cloud Foundry allows you to provide binding configuration parameters may be provided in the app level or server level `cloudfoundry.services` deployment property. For example, to bind to the nfs service, as above : + +``` +dataflow:> stream deploy --name mystream --properties "deployer..cloudfoundry.services='nfs_service_instance uid:1000,gid:1000,mount:/var/volume1,readonly:true'" +``` + +The format is intended to be compatible with the Data Flow DSL parser. +Generally, the `cloudfoundry.services` deployment property accepts a comma delimited value. +Since a comma is also used to separate configuration parameters, and to avoid white space issues, any item including configuration parameters must be enclosed in singe quotes. Valid values incude things like: + +``` +rabbitmq,'nfs_service_instance uid:1000,gid:1000,mount:/var/volume1,readonly:true',mysql,'my-google-bigquery-example role:bigquery.user' +``` + +| |Spaces are permitted within single quotes and `=` may be used instead of `:` to delimit key-value pairs.| +|---|--------------------------------------------------------------------------------------------------------| + +### 11.9. User-provided Services + +In addition to marketplace services, Cloud Foundry supports[User-provided Services](https://docs.cloudfoundry.org/devguide/services/user-provided.html) (UPS). Throughout this reference manual, +regular services have been mentioned, but there is nothing precluding the use of User-provided Services as well, whether for use as the +messaging middleware (for example, if you want to use an external Apache Kafka installation) or for use by some +of the stream applications (for example, an Oracle Database). + +Now we review an example of extracting and supplying the connection credentials from a UPS. + +The following example shows a sample UPS setup for Apache Kafka: + +``` +cf create-user-provided-service kafkacups -p '{”brokers":"HOST:PORT","zkNodes":"HOST:PORT"}' +``` + +The UPS credentials are wrapped within `VCAP_SERVICES`, and they can be supplied directly in the stream definition, as +the following example shows. + +``` +stream create fooz --definition "time | log" +stream deploy fooz --properties "app.time.spring.cloud.stream.kafka.binder.brokers=${vcap.services.kafkacups.credentials.brokers},app.time.spring.cloud.stream.kafka.binder.zkNodes=${vcap.services.kafkacups.credentials.zkNodes},app.log.spring.cloud.stream.kafka.binder.brokers=${vcap.services.kafkacups.credentials.brokers},app.log.spring.cloud.stream.kafka.binder.zkNodes=${vcap.services.kafkacups.credentials.zkNodes}" +``` + +### 11.10. Database Connection Pool + +As of Data Flow 2.0, the Spring Cloud Connector library is no longer used to create the DataSource. +The library [java-cfenv](https://github.com/pivotal-cf/java-cfenv) is now used which allows you to set [Spring Boot properties](https://docs.spring.io/spring-boot/docs/current/reference/htmlsingle/#boot-features-connect-to-production-database) to configure the connection pool. + +### 11.11. Maximum Disk Quota + +By default, every application in Cloud Foundry starts with 1G disk quota and this can be adjusted to a default maximum of +2G. The default maximum can also be overridden up to 10G by using Pivotal Cloud Foundry’s (PCF) Ops Manager GUI. + +This configuration is relevant for Spring Cloud Data Flow because every task deployment is composed of applications +(typically Spring Boot uber-jar’s), and those applications are resolved from a remote maven repository. After resolution, +the application artifacts are downloaded to the local Maven Repository for caching and reuse. With this happening in the background, +the default disk quota (1G) can fill up rapidly, especially when we experiment with streams that +are made up of unique applications. In order to overcome this disk limitation and depending +on your scaling requirements, you may want to change the default maximum from 2G to 10G. Let’s review the +steps to change the default maximum disk quota allocation. + +#### 11.11.1. PCF’s Operations Manager + +From PCF’s Ops Manager, select the “Pivotal Elastic Runtime” tile and navigate to the “Application Developer Controls” tab. +Change the “Maximum Disk Quota per App (MB)” setting from 2048 (2G) to 10240 (10G). Save the disk quota update and click +“Apply Changes” to complete the configuration override. + +### 11.12. Scale Application + +Once the disk quota change has been successfully applied and assuming you have a [running application](#running-on-cloudfoundry), +you can scale the application with a new `disk_limit` through the CF CLI, as the following example shows: + +``` +→ cf scale dataflow-server -k 10GB + +Scaling app dataflow-server in org ORG / space SPACE as user... +OK + +.... +.... +.... +.... + + state since cpu memory disk details +#0 running 2016-10-31 03:07:23 PM 1.8% 497.9M of 1.1G 193.9M of 10G +``` + +You can then list the applications and see the new maximum disk space, as the following example shows: + +``` +→ cf apps +Getting apps in org ORG / space SPACE as user... +OK + +name requested state instances memory disk urls +dataflow-server started 1/1 1.1G 10G dataflow-server.apps.io +``` + +### 11.13. Managing Disk Use + +Even when configuring the Data Flow server to use 10G of space, there is the possibility of exhausting +the available space on the local disk. To prevent this, `jar` artifacts downloaded from external sources, i.e., apps registered as `http` or `maven` resources, are automatically deleted whenever the application is deployed, whether or not the deployment request succeeds. +This behavior is optimal for production environments in which container runtime stability is more critical than I/O latency incurred during deployment. +In development environments deployment happens more frequently. Additionally, the `jar` artifact (or a lighter `metadata` jar) contains metadata describing application configuration properties +which is used by various operations related to application configuration, more frequently performed during pre-production activities (see [Application Metadata](https://dataflow.spring.io/docs/applications/application-metadata) for details). +To provide a more responsive interactive developer experience at the expense of more disk usage in pre-production environments, you can set the CloudFoundry deployer property `autoDeleteMavenArtifacts` to `false`. + +If you deploy the Data Flow server by using the default `port` health check type, you must explicitly monitor the disk space on the server in order to avoid running out space. +If you deploy the server by using the `http` health check type (see the next example), the Data Flow server is restarted if there is low disk space. +This is due to Spring Boot’s [Disk Space Health Indicator](https://github.com/spring-projects/spring-boot/blob/v1.5.14.RELEASE/spring-boot-actuator/src/main/java/org/springframework/boot/actuate/health/DiskSpaceHealthIndicator.java). +You can [configure](https://docs.spring.io/spring-boot/docs/1.5.14.RELEASE/reference/htmlsingle/#common-application-properties) the settings of the Disk Space Health Indicator by using the properties that have the `management.health.diskspace` prefix. + +For version 1.7, we are investigating the use of [Volume Services](https://docs.cloudfoundry.org/devguide/services/using-vol-services.html) for the Data Flow server to store `.jar` artifacts before pushing them to Cloud Foundry. + +The following example shows how to deploy the `http` health check type to an endpoint called `/management/health`: + +``` +--- + ... + health-check-type: http + health-check-http-endpoint: /management/health +``` + +### 11.14. Application Resolution Alternatives + +Though we recommend using a Maven Artifactory for application [Register a Stream Application](#spring-cloud-dataflow-register-stream-apps), +there might be situations where one of the following alternative approaches would make sense. + +* We have custom-built and maintain a [SCDF APP Tool](https://github.com/spring-cloud-stream-app-starters/scdf-app-tool)that can run as a regular Spring Boot application in Cloud Foundry, but it will in turn host and serve the application + JARs for SCDF at runtime. + +* With the help of Spring Boot, we can serve [static content](https://docs.spring.io/spring-boot/docs/current/reference/html/boot-features-developing-web-applications.html#boot-features-spring-mvc-static-content)in Cloud Foundry. A simple Spring Boot application can bundle all the required stream and task applications. By having it + run on Cloud Foundry, the static application can then serve the über-jar’s. From the shell, you can, for example, register the + application with the name `http-source.jar` by using `--uri=http:///http-source.jar`. + +* The über-jar’s can be hosted on any external server that’s reachable over HTTP. They can be resolved from raw GitHub URIs + as well. From the shell, you can, for example, register the app with the name `http-source.jar` by using `--uri=http:///http-source.jar`. + +* [Static Buildpack](https://docs.cloudfoundry.org/buildpacks/staticfile/index.html) support in Cloud Foundry is another + option. A similar HTTP resolution works on this model, too. + +* [Volume Services](https://docs.cloudfoundry.org/devguide/services/using-vol-services.html) is another great option. + The required über-jars can be hosted in an external file system. With the help of volume-services, you can, for + example, register the application with the name `http-source.jar` by using `--uri=file:///http-source.jar`. + +### 11.15. Security + +By default, the Data Flow server is unsecured and runs on an unencrypted HTTP connection. You can secure your REST endpoints +(as well as the Data Flow Dashboard) by enabling HTTPS and requiring clients to authenticate. +For more details about securing the +REST endpoints and configuring to authenticate against an OAUTH backend (UAA and SSO running on Cloud Foundry), +see the security section from the core [[configuration-local-security]](#configuration-local-security). You can configure the security details in `dataflow-server.yml` or pass them as environment variables through `cf set-env` commands. + +#### 11.15.1. Authentication + +Spring Cloud Data Flow can either integrate with Pivotal Single Sign-On Service +(for example, on PWS) or Cloud Foundry User Account and Authentication (UAA) Server. + +##### Pivotal Single Sign-On Service + +When deploying Spring Cloud Data Flow to Cloud Foundry, you can bind the +application to the Pivotal Single Sign-On Service. By doing so, Spring Cloud +Data Flow takes advantage of the[Java CFEnv](https://github.com/pivotal-cf/java-cfenv), +which provides Cloud Foundry-specific auto-configuration support for OAuth 2.0. + +To do so, bind the Pivotal Single Sign-On Service to your Data Flow Server application and +provide the following properties: + +``` +SPRING_CLOUD_DATAFLOW_SECURITY_CFUSEUAA: false (1) +SECURITY_OAUTH2_CLIENT_CLIENTID: "${security.oauth2.client.clientId}" +SECURITY_OAUTH2_CLIENT_CLIENTSECRET: "${security.oauth2.client.clientSecret}" +SECURITY_OAUTH2_CLIENT_ACCESSTOKENURI: "${security.oauth2.client.accessTokenUri}" +SECURITY_OAUTH2_CLIENT_USERAUTHORIZATIONURI: "${security.oauth2.client.userAuthorizationUri}" +SECURITY_OAUTH2_RESOURCE_USERINFOURI: "${security.oauth2.resource.userInfoUri}" +``` + +|**1**|It is important that the property `spring.cloud.dataflow.security.cf-use-uaa` is set to `false`| +|-----|-----------------------------------------------------------------------------------------------| + +Authorization is similarly supported for non-Cloud Foundry security scenarios. +See the security section from the core Data Flow [[configuration-local-security]](#configuration-local-security). + +As the provisioning of roles can vary widely across environments, we by +default assign all Spring Cloud Data Flow roles to users. + +You can customize this behavior by providing your own [`AuthoritiesExtractor`](https://docs.spring.io/spring-boot/docs/current/api/org/springframework/boot/autoconfigure/security/oauth2/resource/AuthoritiesExtractor.html). + +The following example shows one possible approach to set the custom `AuthoritiesExtractor` on the `UserInfoTokenServices`: + +``` +public class MyUserInfoTokenServicesPostProcessor + implements BeanPostProcessor { + + @Override + public Object postProcessBeforeInitialization(Object bean, String beanName) { + if (bean instanceof UserInfoTokenServices) { + final UserInfoTokenServices userInfoTokenServices == (UserInfoTokenServices) bean; + userInfoTokenServices.setAuthoritiesExtractor(ctx.getBean(AuthoritiesExtractor.class)); + } + return bean; + } + + @Override + public Object postProcessAfterInitialization(Object bean, String beanName) { + return bean; + } +} +``` + +Then you can declare it in your configuration class as follows: + +``` +@Bean +public BeanPostProcessor myUserInfoTokenServicesPostProcessor() { + BeanPostProcessor postProcessor == new MyUserInfoTokenServicesPostProcessor(); + return postProcessor; +} +``` + +##### Cloud Foundry UAA + +The availability of Cloud Foundry User Account and Authentication (UAA) depends on the Cloud Foundry environment. +In order to provide UAA integration, you have to provide the necessary +OAuth2 configuration properties (for example, by setting the `SPRING_APPLICATION_JSON`property). + +The following JSON example shows how to create a security configuration: + +``` +{ + "security.oauth2.client.client-id": "scdf", + "security.oauth2.client.client-secret": "scdf-secret", + "security.oauth2.client.access-token-uri": "https://login.cf.myhost.com/oauth/token", + "security.oauth2.client.user-authorization-uri": "https://login.cf.myhost.com/oauth/authorize", + "security.oauth2.resource.user-info-uri": "https://login.cf.myhost.com/userinfo" +} +``` + +By default, the `spring.cloud.dataflow.security.cf-use-uaa` property is set to `true`. This property activates a special[`AuthoritiesExtractor`](https://docs.spring.io/spring-boot/docs/current/api/org/springframework/boot/autoconfigure/security/oauth2/resource/AuthoritiesExtractor.html) called `CloudFoundryDataflowAuthoritiesExtractor`. + +If you do not use CloudFoundry UAA, you should set `spring.cloud.dataflow.security.cf-use-uaa` to `false`. + +Under the covers, this `AuthoritiesExtractor` calls out to the[Cloud Foundry +Apps API](https://apidocs.cloudfoundry.org/253/apps/retrieving_permissions_on_a_app.html) and ensure that users are in fact Space Developers. + +If the authenticated user is verified as a Space Developer, all roles are assigned. + +### 11.16. Configuration Reference + +You must provide several pieces of configuration. These are Spring Boot `@ConfigurationProperties`, so you can set +them as environment variables or by any other means that Spring Boot supports. The following listing is in environment +variable format, as that is an easy way to get started configuring Boot applications in Cloud Foundry. +Note that in the future, you will be able to deploy tasks to multiple platforms, but for 2.0.0.M1 you can deploy only to a single platform and the name must be `default`. + +``` +# Default values appear after the equal signs. +# Example values, typical for Pivotal Web Services, are included as comments. + +# URL of the CF API (used when using cf login -a for example) - for example, https://api.run.pivotal.io +SPRING_CLOUD_DATAFLOW_TASK_PLATFORM_CLOUDFOUNDRY_ACCOUNTS[default]_CONNECTION_URL= + +# The name of the organization that owns the space above - for example, youruser-org +SPRING_CLOUD_DATAFLOW_TASK_PLATFORM_CLOUDFOUNDRY_ACCOUNTS[default]_CONNECTION_ORG= + +# The name of the space into which modules will be deployed - for example, development +SPRING_CLOUD_DATAFLOW_TASK_PLATFORM_CLOUDFOUNDRY_ACCOUNTS[default]_CONNECTION_SPACE= + +# The root domain to use when mapping routes - for example, cfapps.io +SPRING_CLOUD_DATAFLOW_TASK_PLATFORM_CLOUDFOUNDRY_ACCOUNTS[default]_CONNECTION_DOMAIN= + +# The user name and password of the user to use to create applications +SPRING_CLOUD_DATAFLOW_TASK_PLATFORM_CLOUDFOUNDRY_ACCOUNTS[default]_CONNECTION_USERNAME= +SPRING_CLOUD_DATAFLOW_TASK_PLATFORM_CLOUDFOUNDRY_ACCOUNTS[default]_CONNECTION_PASSWORD + +# The identity provider to be used when accessing the Cloud Foundry API (optional). +# The passed string has to be a URL-Encoded JSON Object, containing the field origin with value as origin_key of an identity provider - for example, {"origin":"uaa"} +SPRING_CLOUD_DATAFLOW_TASK_PLATFORM_CLOUDFOUNDRY_ACCOUNTS[default]_CONNECTION_LOGIN_HINT= + +# Whether to allow self-signed certificates during SSL validation (you should NOT do so in production) +SPRING_CLOUD_DATAFLOW_TASK_PLATFORM_CLOUDFOUNDRY_ACCOUNTS[default]_CONNECTION_SKIP_SSL_VALIDATION + +# A comma-separated set of service instance names to bind to every deployed task application. +# Among other things, this should include an RDBMS service that is used +# for Spring Cloud Task execution reporting, such as my_postgres +SPRING_CLOUD_DATAFLOW_TASK_PLATFORM_CLOUDFOUNDRY_ACCOUNTS[default]_DEPLOYMENT_SERVICES +spring.cloud.deployer.cloudfoundry.task.services= + +# Timeout, in seconds, to use when doing blocking API calls to Cloud Foundry +SPRING_CLOUD_DATAFLOW_TASK_PLATFORM_CLOUDFOUNDRY_ACCOUNTS[default]_DEPLOYMENT_API_TIMEOUT= + +# Timeout, in milliseconds, to use when querying the Cloud Foundry API to compute app status +SPRING_CLOUD_DATAFLOW_TASK_PLATFORM_CLOUDFOUNDRY_ACCOUNTS[default]_DEPLOYMENT_STATUS_TIMEOUT +``` + +Note that you can set `spring.cloud.deployer.cloudfoundry.services`,`spring.cloud.deployer.cloudfoundry.buildpacks`, or the Spring Cloud Deployer-standard`spring.cloud.deployer.memory` and `spring.cloud.deployer.disk`as part of an individual deployment request by using the `deployer.` shortcut, as the following example shows: + +``` +stream create --name ticktock --definition "time | log" +stream deploy --name ticktock --properties "deployer.time.memory=2g" +``` + +The commands in the preceding example deploy the time source with 2048MB of memory, while the log sink uses the default 1024MB. + +When you deploy a stream, you can also pass `JAVA_OPTS` as a deployment property, as the following example shows: + +``` +stream deploy --name ticktock --properties "deployer.time.cloudfoundry.javaOpts=-Duser.timezone=America/New_York" +``` + +### 11.17. Debugging + +If you want to get better insights into what is happening when your streams and tasks are being deployed, you may want +to turn on the following features: + +* Reactor “stacktraces”, showing which operators were involved before an error occurred. This feature is helpful, as the deployer + relies on project reactor and regular stacktraces may not always allow understanding the flow before an error happened. + Note that this comes with a performance penalty, so it is disabled by default. + +``` +spring.cloud.dataflow.server.cloudfoundry.debugReactor == true +``` + +* Deployer and Cloud Foundry client library request and response logs. This feature allows seeing a detailed conversation between + the Data Flow server and the Cloud Foundry Cloud Controller. + +``` +logging.level.cloudfoundry-client == DEBUG +``` + +### 11.18. Spring Cloud Config Server + +You can use Spring Cloud Config Server to centralize configuration properties for Spring Boot applications. Likewise, +both Spring Cloud Data Flow and the applications orchestrated by Spring Cloud Data Flow can be integrated with +a configuration server to use the same capabilities. + +#### 11.18.1. Stream, Task, and Spring Cloud Config Server + +Similar to Spring Cloud Data Flow server, you can configure both the stream and task applications to resolve the centralized properties from the configuration server. +Setting the `spring.cloud.config.uri` property for the deployed applications is a common way to bind to the configuration server. +See the [Spring Cloud Config Client](https://cloud.spring.io/spring-cloud-config/spring-cloud-config.html#_spring_cloud_config_client) reference guide for more information. +Since this property is likely to be used across all applications deployed by the Data Flow server, the Data Flow server’s `spring.cloud.dataflow.applicationProperties.stream` property for stream applications and `spring.cloud.dataflow.applicationProperties.task` property for task applications can be used to pass the `uri` of the Config Server to each deployed stream or task application. See the section on [Common Application Properties](#spring-cloud-dataflow-global-properties) for more information. + +Note that, if you use applications from the [App Starters project](https://cloud.spring.io/spring-cloud-stream-app-starters/), these applications already embed the `spring-cloud-services-starter-config-client` dependency. +If you build your application from scratch and want to add the client side support for config server, you can add a dependency reference to the config server client library. The following snippet shows a Maven example: + +``` +... + + io.pivotal.spring.cloud + spring-cloud-services-starter-config-client + CONFIG_CLIENT_VERSION + +... +``` + +where `CONFIG_CLIENT_VERSION` can be the latest release of the [Spring Cloud Config Server](https://github.com/pivotal-cf/spring-cloud-services-connector/releases)client for Pivotal Cloud Foundry. + +| |You may see a `WARN` logging message if the application that uses this library cannot connect to the configuration
server when the application starts and whenever the `/health` endpoint is accessed.
If you know that you are not using config server functionality, you can disable the client library by setting the`SPRING_CLOUD_CONFIG_ENABLED` environment variable to `false`.| +|---|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +#### 11.18.2. Sample Manifest Template + +The following SCDF and Skipper `manifest.yml` templates includes the required environment variables for the Skipper and Spring Cloud Data Flow server and deployed applications and tasks to successfully run on Cloud Foundry and automatically resolve centralized properties from `my-config-server` at runtime: + +``` +--- +applications: +- name: data-flow-server + host: data-flow-server + memory: 2G + disk_quota: 2G + instances: 1 + path: {PATH TO SERVER UBER-JAR} + env: + SPRING_APPLICATION_NAME: data-flow-server + MAVEN_REMOTE_REPOSITORIES_REPO1_URL: https://repo.spring.io/libs-snapshot + SPRING_CLOUD_DATAFLOW_TASK_PLATFORM_CLOUDFOUNDRY_ACCOUNTS[default]_CONNECTION_URL: https://api.sys.huron.cf-app.com + SPRING_CLOUD_DATAFLOW_TASK_PLATFORM_CLOUDFOUNDRY_ACCOUNTS[default]_CONNECTION_ORG: sabby20 + SPRING_CLOUD_DATAFLOW_TASK_PLATFORM_CLOUDFOUNDRY_ACCOUNTS[default]_CONNECTION_SPACE: sabby20 + SPRING_CLOUD_DATAFLOW_TASK_PLATFORM_CLOUDFOUNDRY_ACCOUNTS[default]_CONNECTION_DOMAIN: apps.huron.cf-app.com + SPRING_CLOUD_DATAFLOW_TASK_PLATFORM_CLOUDFOUNDRY_ACCOUNTS[default]_CONNECTION_USERNAME: admin + SPRING_CLOUD_DATAFLOW_TASK_PLATFORM_CLOUDFOUNDRY_ACCOUNTS[default]_CONNECTION_PASSWORD: *** + SPRING_CLOUD_DATAFLOW_TASK_PLATFORM_CLOUDFOUNDRY_ACCOUNTS[default]_CONNECTION_SKIP_SSL_VALIDATION: true + SPRING_CLOUD_DATAFLOW_TASK_PLATFORM_CLOUDFOUNDRY_ACCOUNTS[default]_DEPLOYMENT_SERVICES: mysql + SPRING_CLOUD_SKIPPER_CLIENT_SERVER_URI: https:///api +services: +- mysql +- my-config-server + +--- +applications: +- name: skipper-server + host: skipper-server + memory: 1G + disk_quota: 1G + instances: 1 + timeout: 180 + buildpack: java_buildpack + path: + env: + SPRING_APPLICATION_NAME: skipper-server + SPRING_CLOUD_SKIPPER_SERVER_ENABLE_LOCAL_PLATFORM: false + SPRING_CLOUD_SKIPPER_SERVER_STRATEGIES_HEALTHCHECK_TIMEOUTINMILLIS: 300000 + SPRING_CLOUD_SKIPPER_SERVER_PLATFORM_CLOUDFOUNDRY_ACCOUNTS[default]_CONNECTION_URL: https://api.local.pcfdev.io + SPRING_CLOUD_SKIPPER_SERVER_PLATFORM_CLOUDFOUNDRY_ACCOUNTS[default]_CONNECTION_ORG: pcfdev-org + SPRING_CLOUD_SKIPPER_SERVER_PLATFORM_CLOUDFOUNDRY_ACCOUNTS[default]_CONNECTION_SPACE: pcfdev-space + SPRING_CLOUD_SKIPPER_SERVER_PLATFORM_CLOUDFOUNDRY_ACCOUNTS[default]_DEPLOYMENT_DOMAIN: cfapps.io + SPRING_CLOUD_SKIPPER_SERVER_PLATFORM_CLOUDFOUNDRY_ACCOUNTS[default]_CONNECTION_USERNAME: admin + SPRING_CLOUD_SKIPPER_SERVER_PLATFORM_CLOUDFOUNDRY_ACCOUNTS[default]_CONNECTION_PASSWORD: admin + SPRING_CLOUD_SKIPPER_SERVER_PLATFORM_CLOUDFOUNDRY_ACCOUNTS[default]_CONNECTION_SKIP_SSL_VALIDATION: false + SPRING_CLOUD_SKIPPER_SERVER_PLATFORM_CLOUDFOUNDRY_ACCOUNTS[default]_DEPLOYMENT_DELETE_ROUTES: false + SPRING_CLOUD_SKIPPER_SERVER_PLATFORM_CLOUDFOUNDRY_ACCOUNTS[default]_DEPLOYMENT_SERVICES: rabbit, my-config-server +services: +- mysql + my-config-server +``` + +where `my-config-server` is the name of the Spring Cloud Config Service instance running on Cloud Foundry. + +By binding the service to Spring Cloud Data Flow server, Spring Cloud Task and via Skipper to all the Spring Cloud Stream applications respectively, we can now resolve centralized properties backed by this service. + +#### 11.18.3. Self-signed SSL Certificate and Spring Cloud Config Server + +Often, in a development environment, we may not have a valid certificate to enable SSL communication between clients and the backend services. +However, the configuration server for Pivotal Cloud Foundry uses HTTPS for all client-to-service communication, so we need to add a self-signed SSL certificate in environments with no valid certificates. + +By using the same `manifest.yml` templates listed in the previous section for the server, we can provide the self-signed SSL certificate by setting `TRUST_CERTS: `. + +However, the deployed applications also require `TRUST_CERTS` as a flat environment variable (as opposed to being wrapped inside `SPRING_APPLICATION_JSON`), so we must instruct the server with yet another set of tokens (`SPRING_CLOUD_DATAFLOW_TASK_PLATFORM_CLOUDFOUNDRY_ACCOUNTS[default]_DEPLOYMENT_USE_SPRING_APPLICATION_JSON: false`) for tasks. +With this setup, the applications receive their application properties as regular environment variables. + +The following listing shows the updated `manifest.yml` with the required changes. Both the Data Flow server and deployed applications +get their configuration from the `my-config-server` Cloud Config server (deployed as a Cloud Foundry service). + +``` +--- +applications: +- name: test-server + host: test-server + memory: 1G + disk_quota: 1G + instances: 1 + path: spring-cloud-dataflow-server-VERSION.jar + env: + SPRING_APPLICATION_NAME: test-server + MAVEN_REMOTE_REPOSITORIES_REPO1_URL: https://repo.spring.io/libs-snapshot + SPRING_CLOUD_DATAFLOW_TASK_PLATFORM_CLOUDFOUNDRY_ACCOUNTS[default]_CONNECTION_URL: https://api.sys.huron.cf-app.com + SPRING_CLOUD_DATAFLOW_TASK_PLATFORM_CLOUDFOUNDRY_ACCOUNTS[default]_CONNECTION_ORG: sabby20 + SPRING_CLOUD_DATAFLOW_TASK_PLATFORM_CLOUDFOUNDRY_ACCOUNTS[default]_CONNECTION_SPACE: sabby20 + SPRING_CLOUD_DATAFLOW_TASK_PLATFORM_CLOUDFOUNDRY_ACCOUNTS[default]_CONNECTION_DOMAIN: apps.huron.cf-app.com + SPRING_CLOUD_DATAFLOW_TASK_PLATFORM_CLOUDFOUNDRY_ACCOUNTS[default]_CONNECTION_USERNAME: admin + SPRING_CLOUD_DATAFLOW_TASK_PLATFORM_CLOUDFOUNDRY_ACCOUNTS[default]_CONNECTION_PASSWORD: *** + SPRING_CLOUD_DATAFLOW_TASK_PLATFORM_CLOUDFOUNDRY_ACCOUNTS[default]_CONNECTION_SKIP_SSL_VALIDATION: true + SPRING_CLOUD_DATAFLOW_TASK_PLATFORM_CLOUDFOUNDRY_ACCOUNTS[default]_DEPLOYMENT_SERVICES: mysql, config-server + SPRING_CLOUD_SKIPPER_CLIENT_SERVER_URI: https:///api + TRUST_CERTS: #this is for the server + SPRING_CLOUD_DATAFLOW_APPLICATION_PROPERTIES_TASK_TRUST_CERTS: #this propagates to all tasks +services: +- mysql +- my-config-server #this is for the server +``` + +Also add the `my-config-server` service to the Skipper’s manifest environment + +``` +--- +applications: +- name: skipper-server + host: skipper-server + memory: 1G + disk_quota: 1G + instances: 1 + timeout: 180 + buildpack: java_buildpack + path: + env: + SPRING_APPLICATION_NAME: skipper-server + SPRING_CLOUD_SKIPPER_SERVER_ENABLE_LOCAL_PLATFORM: false + SPRING_CLOUD_SKIPPER_SERVER_STRATEGIES_HEALTHCHECK_TIMEOUTINMILLIS: 300000 + SPRING_CLOUD_SKIPPER_SERVER_PLATFORM_CLOUDFOUNDRY_ACCOUNTS[default]_CONNECTION_URL: + SPRING_CLOUD_SKIPPER_SERVER_PLATFORM_CLOUDFOUNDRY_ACCOUNTS[default]_CONNECTION_ORG: + SPRING_CLOUD_SKIPPER_SERVER_PLATFORM_CLOUDFOUNDRY_ACCOUNTS[default]_CONNECTION_SPACE: + SPRING_CLOUD_SKIPPER_SERVER_PLATFORM_CLOUDFOUNDRY_ACCOUNTS[default]_DEPLOYMENT_DOMAIN: + SPRING_CLOUD_SKIPPER_SERVER_PLATFORM_CLOUDFOUNDRY_ACCOUNTS[default]_CONNECTION_USERNAME: + SPRING_CLOUD_SKIPPER_SERVER_PLATFORM_CLOUDFOUNDRY_ACCOUNTS[default]_CONNECTION_PASSWORD: + SPRING_CLOUD_SKIPPER_SERVER_PLATFORM_CLOUDFOUNDRY_ACCOUNTS[default]_DEPLOYMENT_SERVICES: rabbit, my-config-server #this is so all stream applications bind to my-config-server +services: +- mysql + my-config-server +``` + +### 11.19. Configure Scheduling + +This section discusses how to configure Spring Cloud Data Flow to connect to the [PCF-Scheduler](https://www.cloudfoundry.org/the-foundry/scheduler/) as its agent to execute tasks. + +| |Before following these instructions, be sure to have an instance of the PCF-Scheduler service running in your Cloud Foundry space.
To create a PCF-Scheduler in your space (assuming it is in your Market Place) execute the following from the CF CLI: `cf create-service scheduler-for-pcf standard `.
Name of a service is later used to bound running application in *PCF*.| +|---|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +For scheduling, you must add (or update) the following environment variables in your environment: + +* Enable scheduling for Spring Cloud Data Flow by setting `spring.cloud.dataflow.features.schedules-enabled` to `true`. + +* Bind the task deployer to your instance of PCF-Scheduler by adding the PCF-Scheduler service name to the `SPRING_CLOUD_DATAFLOW_TASK_PLATFORM_CLOUDFOUNDRY_ACCOUNTS[default]_DEPLOYMENT_SERVICES` environment variable. + +* Establish the URL to the PCF-Scheduler by setting the `SPRING_CLOUD_DATAFLOW_TASK_PLATFORM_CLOUDFOUNDRY_ACCOUNTS[default]_SCHEDULER_SCHEDULER_URL` environment variable. + +| |After creating the preceding configurations, you must create any task definitions that need to be scheduled.| +|---|------------------------------------------------------------------------------------------------------------| + +The following sample manifest shows both environment properties configured (assuming you have a PCF-Scheduler service available with the name `myscheduler`): + +``` +--- +applications: +- name: data-flow-server + host: data-flow-server + memory: 2G + disk_quota: 2G + instances: 1 + path: {PATH TO SERVER UBER-JAR} + env: + SPRING_APPLICATION_NAME: data-flow-server + SPRING_CLOUD_SKIPPER_SERVER_ENABLE_LOCAL_PLATFORM: false + SPRING_CLOUD_SKIPPER_SERVER_PLATFORM_CLOUDFOUNDRY_ACCOUNTS[default]_CONNECTION_URL: + SPRING_CLOUD_SKIPPER_SERVER_PLATFORM_CLOUDFOUNDRY_ACCOUNTS[default]_CONNECTION_ORG: + SPRING_CLOUD_SKIPPER_SERVER_PLATFORM_CLOUDFOUNDRY_ACCOUNTS[default]_CONNECTION_SPACE: + SPRING_CLOUD_SKIPPER_SERVER_PLATFORM_CLOUDFOUNDRY_ACCOUNTS[default]_DEPLOYMENT_DOMAIN: + SPRING_CLOUD_SKIPPER_SERVER_PLATFORM_CLOUDFOUNDRY_ACCOUNTS[default]_CONNECTION_USERNAME: + SPRING_CLOUD_SKIPPER_SERVER_PLATFORM_CLOUDFOUNDRY_ACCOUNTS[default]_CONNECTION_PASSWORD: + SPRING_CLOUD_SKIPPER_SERVER_PLATFORM_CLOUDFOUNDRY_ACCOUNTS[default]_DEPLOYMENT_SERVICES: rabbit, myscheduler + SPRING_CLOUD_DATAFLOW_FEATURES_SCHEDULES_ENABLED: true + SPRING_CLOUD_SKIPPER_CLIENT_SERVER_URI: https:///api + SPRING_CLOUD_DATAFLOW_TASK_PLATFORM_CLOUDFOUNDRY_ACCOUNTS[default]_SCHEDULER_SCHEDULER_URL: https://scheduler.local.pcfdev.io +services: +- mysql +``` + +Where the `SPRING_CLOUD_DATAFLOW_TASK_PLATFORM_CLOUDFOUNDRY_ACCOUNTS[default]*SCHEDULER_SCHEDULER_URL*` *has the following format: `scheduler.` (for +example, `[scheduler.local.pcfdev.io](https://scheduler.local.pcfdev.io)`). Check the actual address from your \_PCF* environment. + +| |Detailed examples for launching and scheduling tasks across multiple platforms, are available in this section [Multiple Platform Support for Tasks](https://dataflow.spring.io/docs/recipes/multi-platform-deployment/) on [dataflow.spring.io](http://dataflow.spring.io).| +|---|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +## 12. Configuration - Kubernetes + +This section describes how to configure Spring Cloud Data Flow features, such as deployer properties, tasks, and which relational database to use. + +### 12.1. Feature Toggles + +Data Flow server offers specific set of features that can be enabled or disabled when launching. These features include all the lifecycle operations, REST endpoints (server and client implementations including Shell and the UI) for: + +* Streams + +* Tasks + +* Schedules + +You can enable or disable these features by setting the following boolean environment variables when launching the Data Flow server: + +* `SPRING_CLOUD_DATAFLOW_FEATURES_STREAMS_ENABLED` + +* `SPRING_CLOUD_DATAFLOW_FEATURES_TASKS_ENABLED` + +* `SPRING_CLOUD_DATAFLOW_FEATURES_SCHEDULES_ENABLED` + +By default, all the features are enabled. + +The `/features` REST endpoint provides information on the features that have been enabled and disabled. + +### 12.2. Deployer Properties + +You can use the following configuration properties the [Kubernetes deployer](https://github.com/spring-cloud/spring-cloud-deployer-kubernetes) to customize how Streams and Tasks are deployed. +When deploying with the Data Flow shell, you can use the syntax `deployer..kubernetes.`. +These properties are also used when configuring the [Kubernetes task platforms](#configuration-kubernetes-tasks) in the Data Flow server and Kubernetes platforms in Skipper for deploying Streams. + +| Deployer Property Name | Description | Default Value | +|------------------------------------------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|----------------------------------------------------------------| +| namespace | Namespace to use |environment variable `KUBERNETES_NAMESPACE`, otherwise `default`| +| deployment.nodeSelector | The node selectors to apply to the deployment in `key:value` format. Multiple node selectors are comma separated. | \ | +| imagePullSecret | Secrets for a access a private registry to pull images. | \ | +| imagePullPolicy | The Image Pull Policy to apply when pulling images. Valid options are `Always`, `IfNotPresent`, and `Never`. | IfNotPresent | +| livenessProbeDelay | Delay in seconds when the Kubernetes liveness check of the app container should start checking its health status. | 10 | +| livenessProbePeriod | Period in seconds for performing the Kubernetes liveness check of the app container. | 60 | +| livenessProbeTimeout | Timeout in seconds for the Kubernetes liveness check of the app container. If the health check takes longer than this value to return it is assumed as 'unavailable'. | 2 | +| livenessProbePath | Path that app container has to respond to for liveness check. | \ | +| livenessProbePort | Port that app container has to respond on for liveness check. | \ | +| readinessProbeDelay | Delay in seconds when the readiness check of the app container should start checking if the module is fully up and running. | 10 | +| readinessProbePeriod | Period in seconds to perform the readiness check of the app container. | 10 | +| readinessProbeTimeout | Timeout in seconds that the app container has to respond to its health status during the readiness check. | 2 | +| readinessProbePath | Path that app container has to respond to for readiness check. | \ | +| readinessProbePort | Port that app container has to respond on for readiness check. | \ | +| probeCredentialsSecret | The secret name containing the credentials to use when accessing secured probe endpoints. | \ | +| limits.memory | The memory limit, maximum needed value to allocate a pod, Default unit is mebibytes, 'M' and 'G" suffixes supported | \ | +| limits.cpu | The CPU limit, maximum needed value to allocate a pod | \ | +| requests.memory | The memory request, guaranteed needed value to allocate a pod. | \ | +| requests.cpu | The CPU request, guaranteed needed value to allocate a pod. | \ | +|statefulSet.volumeClaimTemplate.storageClassName| Name of the storage class for a stateful set | \ | +| statefulSet.volumeClaimTemplate.storage | The storage amount. Default unit is mebibytes, 'M' and 'G" suffixes supported | \ | +| environmentVariables | List of environment variables to set for any deployed app container | \ | +| entryPointStyle | Entry point style used for the Docker image. Used to determine how to pass in properties. Can be `exec`, `shell`, and `boot` | `exec` | +| createLoadBalancer | Create a "LoadBalancer" for the service created for each app. This facilitates assignment of external IP to app. | false | +| serviceAnnotations | Service annotations to set for the service created for each application. String of the format `annotation1:value1,annotation2:value2` | \ | +| podAnnotations | Pod annotations to set for the pod created for each deployment. String of the format `annotation1:value1,annotation2:value2` | \ | +| jobAnnotations | Job annotations to set for the pod or job created for a job. String of the format `annotation1:value1,annotation2:value2` | \ | +| minutesToWaitForLoadBalancer | Time to wait for load balancer to be available before attempting delete of service (in minutes). | 5 | +| maxTerminatedErrorRestarts | Maximum allowed restarts for app that fails due to an error or excessive resource use. | 2 | +| maxCrashLoopBackOffRestarts | Maximum allowed restarts for app that is in a CrashLoopBackOff. Values are `Always`, `IfNotPresent`, `Never` | `IfNotPresent` | +| volumeMounts | volume mounts expressed in YAML format. e.g. ``[{name: 'testhostpath', mountPath: '/test/hostPath'}, {name: 'testpvc', mountPath: '/test/pvc'}, {name: 'testnfs', mountPath: '/test/nfs'}]`` | \ | +| volumes | The volumes that a Kubernetes instance supports specifed in YAML format. e.g. ``[{name: testhostpath, hostPath: { path: '/test/override/hostPath' }},{name: 'testpvc', persistentVolumeClaim: { claimName: 'testClaim', readOnly: 'true' }}, {name: 'testnfs', nfs: { server: '10.0.0.1:111', path: '/test/nfs' }}]`` | \ | +| hostNetwork | The hostNetwork setting for the deployments, see [kubernetes.io/docs/api-reference/v1/definitions/#\_v1\_podspec](https://kubernetes.io/docs/api-reference/v1/definitions/#_v1_podspec) | false | +| createDeployment | Create a "Deployment" with a "Replica Set" instead of a "Replication Controller". | true | +| createJob | Create a "Job" instead of just a "Pod" when launching tasks. | false | +| containerCommand | Overrides the default entry point command with the provided command and arguments. | \ | +| containerPorts | Adds additional ports to expose on the container. | \ | +| createNodePort | The explicit port to use when `NodePort` is the `Service` type. | \ | +| deploymentServiceAccountName | Service account name used in app deployments. Note: The service account name used for app deployments is derived from the Data Flow servers deployment. | \ | +| deploymentLabels | Additional labels to add to the deployment in `key:value` format. Multiple labels are comma separated. | \ | +| bootMajorVersion | The Spring Boot major version to use. Currently only used to configure Spring Boot version specific probe paths automatically. Valid options are `1` or `2`. | 2 | +| tolerations.key | The key to use for the toleration. | \ | +| tolerations.effect | The toleration effect. See [kubernetes.io/docs/concepts/configuration/taint-and-toleration](https://kubernetes.io/docs/concepts/configuration/taint-and-toleration) for valid options. | \ | +| tolerations.operator | The toleration operator. See [kubernetes.io/docs/concepts/configuration/taint-and-toleration/](https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/) for valid options. | \ | +| tolerations.tolerationSeconds | The number of seconds defining how long the pod will stay bound to the node after a taint is added. | \ | +| tolerations.value | The toleration value to apply, used in conjunction with `operator` to select to appropriate `effect`. | \ | +| secretRefs | The name of the secret(s) to load the entire data contents into individual environment variables. Multiple secrets may be comma separated. | \ | +| secretKeyRefs.envVarName | The environment variable name to hold the secret data | \ | +| secretKeyRefs.secretName | The secret name to access | \ | +| secretKeyRefs.dataKey | The key name to obtain secret data from | \ | +| configMapRefs | The name of the ConfigMap(s) to load the entire data contents into individual environment variables. Multiple ConfigMaps be comma separated. | \ | +| configMapKeyRefs.envVarName | The environment variable name to hold the ConfigMap data | \ | +| configMapKeyRefs.configMapName | The ConfigMap name to access | \ | +| configMapKeyRefs.dataKey | The key name to obtain ConfigMap data from | \ | +| maximumConcurrentTasks | The maximum concurrent tasks allowed for this platform instance. | 20 | +| podSecurityContext.runAsUser | The numeric user ID to run pod container processes under | \ | +| podSecurityContext.fsGroup | The numeric group ID to run pod container processes under | \ | +| affinity.nodeAffinity | The node affinity expressed in YAML format. e.g. ``{ requiredDuringSchedulingIgnoredDuringExecution: { nodeSelectorTerms: [ { matchExpressions: [ { key: 'kubernetes.io/e2e-az-name', operator: 'In', values: [ 'e2e-az1', 'e2e-az2']}]}]}, preferredDuringSchedulingIgnoredDuringExecution: [ { weight: 1, preference: { matchExpressions: [ { key: 'another-node-label-key', operator: 'In', values: [ 'another-node-label-value' ]}]}}]}`` | \ | +| affinity.podAffinity | The pod affinity expressed in YAML format. e.g. ``{ requiredDuringSchedulingIgnoredDuringExecution: { labelSelector: [ { matchExpressions: [ { key: 'app', operator: 'In', values: [ 'store']}]}], topologyKey: 'kubernetes.io/hostnam'}, preferredDuringSchedulingIgnoredDuringExecution: [ { weight: 1, podAffinityTerm: { labelSelector: { matchExpressions: [ { key: 'security', operator: 'In', values: [ 'S2' ]}]}, topologyKey: 'failure-domain.beta.kubernetes.io/zone'}}]}`` | \ | +| affinity.podAntiAffinity |The pod anti-affinity expressed in YAML format. e.g. ``{ requiredDuringSchedulingIgnoredDuringExecution: { labelSelector: { matchExpressions: [ { key: 'app', operator: 'In', values: [ 'store']}]}], topologyKey: 'kubernetes.io/hostname'}, preferredDuringSchedulingIgnoredDuringExecution: [ { weight: 1, podAffinityTerm: { labelSelector: { matchExpressions: [ { key: 'security', operator: 'In', values: [ 'S2' ]}]}, topologyKey: 'failure-domain.beta.kubernetes.io/zone'}}]}``| \ | +| statefulSetInitContainerImageName | A custom image name to use for the StatefulSet Init Container | \ | +| initContainer | An Init Container expressed in YAML format to be applied to a pod. e.g. ``{containerName: 'test', imageName: 'busybox:latest', commands: ['sh', '-c', 'echo hello']}`` | \ | +| additionalContainers | Additional containers expressed in YAML format to be applied to a pod. e.g. ``[{name: 'c1', image: 'busybox:latest', command: ['sh', '-c', 'echo hello1'], volumeMounts: [{name: 'test-volume', mountPath: '/tmp', readOnly: true}]}, {name: 'c2', image: 'busybox:1.26.1', command: ['sh', '-c', 'echo hello2']}]`` | \ | + +### 12.3. Tasks + +The Data Flow server is responsible for deploying Tasks. +Tasks that are launched by Data Flow write their state to the same database that is used by the Data Flow server. +For Tasks which are Spring Batch Jobs, the job and step execution data is also stored in this database. +As with Skipper, Tasks can be launched to multiple platforms. +When Data Flow is running on Kubernetes, a Task platfom must be defined. +To configure new platform accounts that target Kubernetes, provide an entry under the `spring.cloud.dataflow.task.platform.kubernetes` section in your `application.yaml` file for via another Spring Boot supported mechanism. +In the following example, two Kubernetes platform accounts named `dev` and `qa` are created. +The keys such as `memory` and `disk` are [Cloud Foundry Deployer Properties](#configuration-cloudfoundry-deployer). + +``` +spring: + cloud: + dataflow: + task: + platform: + kubernetes: + accounts: + dev: + namespace: devNamespace + imagePullPolicy: Always + entryPointStyle: exec + limits: + cpu: 4 + qa: + namespace: qaNamespace + imagePullPolicy: IfNotPresent + entryPointStyle: boot + limits: + memory: 2048m +``` + +| |By defining one platform as `default` allows you to skip using `platformName` where its use would otherwise be required.| +|---|------------------------------------------------------------------------------------------------------------------------| + +When launching a task, pass the value of the platform account name using the task launch option `--platformName` If you do not pass a value for `platformName`, the value `default` will be used. + +| |When deploying a task to multiple platforms, the configuration of the task needs to connect to the same database as the Data Flow Server.| +|---|-----------------------------------------------------------------------------------------------------------------------------------------| + +You can configure the Data Flow server that is on Kubernetes to deploy tasks to Cloud Foundry and Kubernetes. See the section on [Cloud Foundry Task Platform Configuration](#configuration-cloudfoundry-tasks) for more information. + +Detailed examples for launching and scheduling tasks across multiple platforms, are available in this section [Multiple Platform Support for Tasks](https://dataflow.spring.io/docs/recipes/multi-platform-deployment/) on [dataflow.spring.io](http://dataflow.spring.io). + +### 12.4. General Configuration + +The Spring Cloud Data Flow server for Kubernetes uses the [`spring-cloud-kubernetes`](https://github.com/fabric8io/spring-cloud-kubernetes) module to process secrets that are mounted under `/etc/secrets`. ConfigMaps must be mounted as `application.yaml` in the `/config` directory that is processed by Spring Boot. To avoid access to the Kubernetes API server the `SPRING_CLOUD_KUBERNETES_CONFIG_ENABLE_API` and `SPRING_CLOUD_KUBERNETES_SECRETS_ENABLE_API` are set to `false`. + +#### 12.4.1. Using ConfigMap and Secrets + +You can pass configuration properties to the Data Flow Server by using Kubernetes [ConfigMap](https://kubernetes.io/docs/tasks/configure-pod-container/configmap/) and [secrets](https://kubernetes.io/docs/concepts/configuration/secret/). + +The following example shows one possible configuration, which enables MySQL and sets a memory limit: + +``` +apiVersion: v1 +kind: ConfigMap +metadata: + name: scdf-server + labels: + app: scdf-server +data: + application.yaml: |- + spring: + cloud: + dataflow: + task: + platform: + kubernetes: + accounts: + default: + limits: + memory: 1024Mi + datasource: + url: jdbc:mysql://${MYSQL_SERVICE_HOST}:${MYSQL_SERVICE_PORT}/mysql + username: root + password: ${mysql-root-password} + driverClassName: org.mariadb.jdbc.Driver + testOnBorrow: true + validationQuery: "SELECT 1" +``` + +The preceding example assumes that MySQL is deployed with `mysql` as the service name. Kubernetes publishes the host and port values of these services as environment variables that we can use when configuring the apps we deploy. + +We prefer to provide the MySQL connection password in a Secrets file, as the following example shows: + +``` +apiVersion: v1 +kind: Secret +metadata: + name: mysql + labels: + app: mysql +data: + mysql-root-password: eW91cnBhc3N3b3Jk +``` + +The password is a base64-encoded value. + +### 12.5. Database Configuration + +Spring Cloud Data Flow provides schemas for H2, HSQLDB, MySQL, Oracle, PostgreSQL, DB2, and SQL Server. The appropriate schema is automatically created when the server starts, provided the right database driver and appropriate credentials are in the classpath. + +The JDBC drivers for MySQL (via MariaDB driver), HSQLDB, PostgreSQL, and embedded H2 are available out of the box. +If you use any other database, you need to put the corresponding JDBC driver jar on the classpath of the server. + +For instance, if you use MySQL in addition to a password in the secrets file, you could provide the following properties in the ConfigMap: + +``` +data: + application.yaml: |- + spring: + datasource: + url: jdbc:mysql://${MYSQL_SERVICE_HOST}:${MYSQL_SERVICE_PORT}/mysql + username: root + password: ${mysql-root-password} + driverClassName: org.mariadb.jdbc.Driver + url: jdbc:mysql://${MYSQL_SERVICE_HOST}:${MYSQL_SERVICE_PORT}/test + driverClassName: org.mariadb.jdbc.Driver +``` + +For PostgreSQL, you could use the following configuration: + +``` +data: + application.yaml: |- + spring: + datasource: + url: jdbc:postgresql://${PGSQL_SERVICE_HOST}:${PGSQL_SERVICE_PORT}/database + username: root + password: ${postgres-password} + driverClassName: org.postgresql.Driver +``` + +For HSQLDB, you could use the following configuration: + +``` +data: + application.yaml: |- + spring: + datasource: + url: jdbc:hsqldb:hsql://${HSQLDB_SERVICE_HOST}:${HSQLDB_SERVICE_PORT}/database + username: sa + driverClassName: org.hsqldb.jdbc.JDBCDriver +``` + +The following YAML snippet from a Deployment is an example of mounting a ConfigMap as `application.yaml` under `/config` where Spring Boot will process it plus a Secret mounted under `/etc/secrets` where it will get picked up by the spring-cloud-kubernetes library due to the environment variable `SPRING_CLOUD_KUBERNETES_SECRETS_PATHS` being set to `/etc/secrets`. + +``` +... + containers: + - name: scdf-server + image: springcloud/spring-cloud-dataflow-server:2.5.0.BUILD-SNAPSHOT + imagePullPolicy: Always + volumeMounts: + - name: config + mountPath: /config + readOnly: true + - name: database + mountPath: /etc/secrets/database + readOnly: true + ports: +... + volumes: + - name: config + configMap: + name: scdf-server + items: + - key: application.yaml + path: application.yaml + - name: database + secret: + secretName: mysql +``` + +You can find migration scripts for specific database types in the [spring-cloud-task](https://github.com/spring-cloud/spring-cloud-task/tree/master/spring-cloud-task-core/src/main/resources/org/springframework/cloud/task/migration) repo. + +### 12.6. Monitoring and Management + +We recommend using the `kubectl` command for troubleshooting streams and tasks. + +You can list all artifacts and resources used by using the following command: + +``` +kubectl get all,cm,secrets,pvc +``` + +You can list all resources used by a specific application or service by using a label to select resources. The following command lists all resources used by the `mysql` service: + +``` +kubectl get all -l app=mysql +``` + +You can get the logs for a specific pod by issuing the following command: + +``` +kubectl logs pod +``` + +If the pod is continuously getting restarted, you can add `-p` as an option to see the previous log, as follows: + +``` +kubectl logs -p +``` + +You can also tail or follow a log by adding an `-f` option, as follows: + +``` +kubectl logs -f +``` + +A useful command to help in troubleshooting issues, such as a container that has a fatal error when starting up, is to use the `describe` command, as the following example shows: + +``` +kubectl describe pod ticktock-log-0-qnk72 +``` + +#### 12.6.1. Inspecting Server Logs + +You can access the server logs by using the following command: + +``` +kubectl get pod -l app=scdf=server +kubectl logs +``` + +#### 12.6.2. Streams + +Stream applications are deployed with the stream name followed by the name of the application. For processors and sinks, an instance index is also appended. + +To see all the pods that are deployed by the Spring Cloud Data Flow server, you can specify the `role=spring-app` label, as follows: + +``` +kubectl get pod -l role=spring-app +``` + +To see details for a specific application deployment you can use the following command: + +``` +kubectl describe pod +``` + +To view the application logs, you can use the following command: + +``` +kubectl logs +``` + +If you would like to tail a log you can use the following command: + +``` +kubectl logs -f +``` + +#### 12.6.3. Tasks + +Tasks are launched as bare pods without a replication controller. The pods remain after the tasks complete, which gives you an opportunity to review the logs. + +To see all pods for a specific task, use the following command: + +``` +kubectl get pod -l task-name= +``` + +To review the task logs, use the following command: + +``` +kubectl logs +``` + +You have two options to delete completed pods. You can delete them manually once they are no longer needed or you can use the Data Flow shell `task execution cleanup` command to remove the completed pod for a task execution. + +To delete the task pod manually, use the following command: + +``` +kubectl delete pod +``` + +To use the `task execution cleanup` command, you must first determine the `ID` for the task execution. To do so, use the `task execution list` command, as the following example (with output) shows: + +``` +dataflow:>task execution list +╔═════════╤══╤════════════════════════════╤════════════════════════════╤═════════╗ +║Task Name│ID│ Start Time │ End Time │Exit Code║ +╠═════════╪══╪════════════════════════════╪════════════════════════════╪═════════╣ +║task1 │1 │Fri May 05 18:12:05 EDT 2017│Fri May 05 18:12:05 EDT 2017│0 ║ +╚═════════╧══╧════════════════════════════╧════════════════════════════╧═════════╝ +``` + +Once you have the ID, you can issue the command to cleanup the execution artifacts (the completed pod), as the following example shows: + +``` +dataflow:>task execution cleanup --id 1 +Request to clean up resources for task execution 1 has been submitted +``` + +##### Database Credentials for Tasks + +By default Spring Cloud Data Flow passes database credentials as properties to the pod at task launch time. +If using the `exec` or `shell` entry point styles the DB credentials will be viewable if the user does a `kubectl describe` on the task’s pod. +To configure Spring Cloud Data Flow to use Kubernetes Secrets: Set `spring.cloud.dataflow.task.use.kubernetes.secrets.for.db.credentials` property to `true`. If using the yaml files provided by Spring Cloud Data Flow update the `src/kubernetes/server/server-deployment.yaml to add the following environment variable: + +``` +- name: SPRING_CLOUD_DATAFLOW_TASK_USE_KUBERNETES_SECRETS_FOR_DB_CREDENTIALS + value: 'true' +``` + +If upgrading from a previous version of SCDF be sure to verify that `spring.datasource.username` and `spring.datasource.password` environment variables are present in the `secretKeyRefs` in the server-config.yaml. If not, add it as shown in the example below: + +``` +... + task: + platform: + kubernetes: + accounts: + default: + secretKeyRefs: + - envVarName: "spring.datasource.password" + secretName: mysql + dataKey: mysql-root-password + - envVarName: "spring.datasource.username" + secretName: mysql + dataKey: mysql-root-username +... +``` + +Also verify that the associated secret(dataKey) is also available in secrets. SCDF provides an example of this for MySql here: `src/kubernetes/mysql/mysql-svc.yaml`. + +| |Passing of DB credentials via properties by default is to preserve to backwards compatibility. This will be feature will be removed in future release.| +|---|------------------------------------------------------------------------------------------------------------------------------------------------------| + +### 12.7. Scheduling + +This section covers customization of how scheduled tasks are configured. Scheduling of tasks is enabled by default in the Spring Cloud Data Flow Kubernetes Server. Properties are used to influence settings for scheduled tasks and can be configured on a global or per-schedule basis. + +| |Unless noted, properties set on a per-schedule basis always take precedence over properties set as the server configuration. This arrangement allows for the ability to override global server level properties for a specific schedule.| +|---|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +See [`KubernetesSchedulerProperties`](https://github.com/spring-cloud/spring-cloud-scheduler-kubernetes/blob/master/src/main/java/org/springframework/cloud/scheduler/spi/kubernetes/KubernetesSchedulerProperties.java) for more on the supported options. + +#### 12.7.1. Entry Point Style + +An Entry Point Style affects how application properties are passed to the task container to be deployed. Currently, three styles are supported: + +* `exec`: (default) Passes all application properties as command line arguments. + +* `shell`: Passes all application properties as environment variables. + +* `boot`: Creates an environment variable called `SPRING_APPLICATION_JSON` that contains a JSON representation of all application properties. + +You can configure the entry point style as follows: + +``` +deployer.kubernetes.entryPointStyle= +``` + +Replace `` with your desired Entry Point Style. + +You can also configure the Entry Point Style at the server level in the container `env` section of a deployment YAML, as the following example shows: + +``` +env: +- name: SPRING_CLOUD_SCHEDULER_KUBERNETES_ENTRY_POINT_STYLE + value: entryPointStyle +``` + +Replace `entryPointStyle` with the desired Entry Point Style. + +You should choose an Entry Point Style of either `exec` or `shell`, to correspond to how the `ENTRYPOINT` syntax is defined in the container’s `Dockerfile`. For more information and uses cases on `exec` vs `shell`, see the [ENTRYPOINT](https://docs.docker.com/engine/reference/builder/#entrypoint) section of the Docker documentation. + +Using the `boot` Entry Point Style corresponds to using the `exec` style `ENTRYPOINT`. Command line arguments from the deployment request are passed to the container, with the addition of application properties mapped into the `SPRING_APPLICATION_JSON` environment variable rather than command line arguments. + +#### 12.7.2. Environment Variables + +To influence the environment settings for a given application, you can take advantage of the `spring.cloud.deployer.kubernetes.environmentVariables` property. +For example, a common requirement in production settings is to influence the JVM memory arguments. +You can achieve this by using the `JAVA_TOOL_OPTIONS` environment variable, as the following example shows: + +``` +deployer.kubernetes.environmentVariables=JAVA_TOOL_OPTIONS=-Xmx1024m +``` + +| |When deploying stream applications or launching task applications where some of the properties may contain sensitive information, use the `shell` or `boot` as the `entryPointStyle`. This is because the `exec` (default) converts all properties to command line arguments and thus may not be secure in some environments.| +|---|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +Additionally you can configure environment variables at the server level in the container `env` section of a deployment YAML, as the following example shows: + +| |When specifying environment variables in the server configuration and on a per-schedule basis, environment variables will be merged. This allows for the ability to set common environment variables in the server configuration and more specific at the specific schedule level.| +|---|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +``` +env: +- name: SPRING_CLOUD_SCHEDULER_KUBERNETES_ENVIRONMENT_VARIABLES + value: myVar=myVal +``` + +Replace `myVar=myVal` with your desired environment variables. + +#### 12.7.3. Image Pull Policy + +An image pull policy defines when a Docker image should be pulled to the local registry. Currently, three policies are supported: + +* `IfNotPresent`: (default) Do not pull an image if it already exists. + +* `Always`: Always pull the image regardless of whether it already exists. + +* `Never`: Never pull an image. Use only an image that already exists. + +The following example shows how you can individually configure containers: + +``` +deployer.kubernetes.imagePullPolicy=Always +``` + +Replace `Always` with your desired image pull policy. + +You can configure an image pull policy at the server level in the container `env` section of a deployment YAML, as the following example shows: + +``` +env: +- name: SPRING_CLOUD_SCHEDULER_KUBERNETES_IMAGE_PULL_POLICY + value: Always +``` + +Replace `Always` with your desired image pull policy. + +#### 12.7.4. Private Docker Registry + +Docker images that are private and require authentication can be pulled by configuring a Secret. First, you must create a Secret in the cluster. Follow the [Pull an Image from a Private Registry](https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/) guide to create the Secret. + +Once you have created the secret, use the `imagePullSecret` property to set the secret to use, as the following example shows: + +``` +deployer.kubernetes.imagePullSecret=mysecret +``` + +Replace `mysecret` with the name of the secret you created earlier. + +You can also configure the image pull secret at the server level in the container `env` section of a deployment YAML, as the following example shows: + +``` +env: +- name: SPRING_CLOUD_SCHEDULER_KUBERNETES_IMAGE_PULL_SECRET + value: mysecret +``` + +Replace `mysecret` with the name of the secret you created earlier. + +#### 12.7.5. Namespace + +By default the namespace used for scheduled tasks is `default`. This value can be set at the server level configuration in the container `env` section of a deployment YAML, as the following example shows: + +``` +env: +- name: SPRING_CLOUD_SCHEDULER_KUBERNETES_NAMESPACE + value: mynamespace +``` + +#### 12.7.6. Service Account + +You can configure a custom service account for scheduled tasks through properties. An existing service account can be used or a new one created. One way to create a service account is by using `kubectl`, as the following example shows: + +``` +$ kubectl create serviceaccount myserviceaccountname +serviceaccount "myserviceaccountname" created +``` + +Then you can configure the service account to use on a per-schedule basis as follows: + +``` +deployer.kubernetes.taskServiceAccountName=myserviceaccountname +``` + +Replace `myserviceaccountname` with your service account name. + +You can also configure the service account name at the server level in the container `env` section of a deployment YAML, as the following example shows: + +``` +env: +- name: SPRING_CLOUD_SCHEDULER_KUBERNETES_TASK_SERVICE_ACCOUNT_NAME + value: myserviceaccountname +``` + +Replace `myserviceaccountname` with the service account name to be applied to all deployments. + +For more information on scheduling tasks see [Scheduling Tasks](#spring-cloud-dataflow-schedule-launch-tasks). + +### 12.8. Debug Support + +Debugging the Spring Cloud Data Flow Kubernetes Server and included components (such as the [Spring Cloud Kubernetes Deployer](https://github.com/spring-cloud/spring-cloud-deployer-kubernetes)) is supported through the [Java Debug Wire Protocol (JDWP)](https://docs.oracle.com/javase/8/docs/technotes/guides/jpda/jdwp-spec.html). This section outlines an approach to manually enable debugging and another approach that uses configuration files provided with Spring Cloud Data Flow Server Kubernetes to “patch” a running deployment. + +| |JDWP itself does not use any authentication. This section assumes debugging is being done on a local development environment (such as Minikube), so guidance on securing the debug port is not provided.| +|---|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +#### 12.8.1. Enabling Debugging Manually + +To manually enable JDWP, first edit `src/kubernetes/server/server-deployment.yaml` and add an additional `containerPort` entry under `spec.template.spec.containers.ports` with a value of `5005`. Additionally, add the [`JAVA_TOOL_OPTIONS`](https://docs.oracle.com/javase/8/docs/platform/jvmti/jvmti.html#tooloptions) environment variable under `spec.template.spec.containers.env` as the following example shows: + +``` +spec: + ... + template: + ... + spec: + containers: + - name: scdf-server + ... + ports: + ... + - containerPort: 5005 + env: + - name: JAVA_TOOL_OPTIONS + value: '-agentlib:jdwp=transport=dt_socket,server=y,suspend=n,address=5005' +``` + +| |The preceding example uses port 5005, but it can be any number that does not conflict with another port. The chosen port number must also be the same for the added `containerPort` value and the `address` parameter of the `JAVA_TOOL_OPTIONS` `-agentlib` flag, as shown in the preceding example.| +|---|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +You can now start the Spring Cloud Data Flow Kubernetes Server. Once the server is up, you can verify the configuration changes on the `scdf-server` deployment, as the following example (with output) shows: + +``` +kubectl describe deployment/scdf-server +... +... +Pod Template: + ... + Containers: + scdf-server: + ... + Ports: 80/TCP, 5005/TCP + ... + Environment: + JAVA_TOOL_OPTIONS: -agentlib:jdwp=transport=dt_socket,server=y,suspend=n,address=5005 + ... +``` + +With the server started and JDWP enabled, you need to configure access to the port. In this example, we use the [`port-forward`](https://kubernetes.io/docs/tasks/access-application-cluster/port-forward-access-application-cluster/) subcommand of `kubectl`. The following example (with output) shows how to expose a local port to your debug target by using `port-forward`: + +``` +$ kubectl get pod -l app=scdf-server +NAME READY STATUS RESTARTS AGE +scdf-server-5b7cfd86f7-d8mj4 1/1 Running 0 10m +$ kubectl port-forward scdf-server-5b7cfd86f7-d8mj4 5005:5005 +Forwarding from 127.0.0.1:5005 -> 5005 +Forwarding from [::1]:5005 -> 5005 +``` + +You can now attach a debugger by pointing it to `127.0.0.1` as the host and `5005` as the port. The `port-forward` subcommand runs until stopped (by pressing `CTRL+c`, for example). + +You can remove debugging support by reverting the changes to `src/kubernetes/server/server-deployment.yaml`. The reverted changes are picked up on the next deployment of the Spring Cloud Data Flow Kubernetes Server. Manually adding debug support to the configuration is useful when debugging should be enabled by default each time the server is deployed. + +#### 12.8.2. Enabling Debugging with Patching + +Rather than manually changing the `server-deployment.yaml`, Kubernetes objects can be “patched” in place. For convenience, patch files that provide the same configuration as the manual approach are included. To enable debugging by patching, use the following command: + +``` +kubectl patch deployment scdf-server -p "$(cat src/kubernetes/server/server-deployment-debug.yaml)" +``` + +Running the preceding command automatically adds the `containerPort` attribute and the `JAVA_TOOL_OPTIONS` environment variable. The following example (with output) shows how toverify changes to the `scdf-server` deployment: + +``` +$ kubectl describe deployment/scdf-server +... +... +Pod Template: + ... + Containers: + scdf-server: + ... + Ports: 5005/TCP, 80/TCP + ... + Environment: + JAVA_TOOL_OPTIONS: -agentlib:jdwp=transport=dt_socket,server=y,suspend=n,address=5005 + ... +``` + +To enable access to the debug port, rather than using the `port-forward` subcommand of `kubectl`, you can patch the `scdf-server` Kubernetes service object. You must first ensure that the `scdf-server` Kubernetes service object has the proper configuration. The following example (with output) shows how to do so: + +``` +kubectl describe service/scdf-server +Port: 80/TCP +TargetPort: 80/TCP +NodePort: 30784/TCP +``` + +If the output contains ``, you must patch the service to add a name for this port. The following example shows how to do so: + +``` +$ kubectl patch service scdf-server -p "$(cat src/kubernetes/server/server-svc.yaml)" +``` + +| |A port name should only be missing if the target cluster had been created prior to debug functionality being added. Since multiple ports are being added to the `scdf-server` Kubernetes Service Object, each needs to have its own name.| +|---|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +Now you can add the debug port, as the following example shows: + +``` +kubectl patch service scdf-server -p "$(cat src/kubernetes/server/server-svc-debug.yaml)" +``` + +The following example (with output) shows how to verify the mapping: + +``` +$ kubectl describe service scdf-server +Name: scdf-server +... +... +Port: scdf-server-jdwp 5005/TCP +TargetPort: 5005/TCP +NodePort: scdf-server-jdwp 31339/TCP +... +... +Port: scdf-server 80/TCP +TargetPort: 80/TCP +NodePort: scdf-server 30883/TCP +... +... +``` + +The output shows that container port 5005 has been mapped to the NodePort of 31339. The following example (with output) shows how to get the IP address of the Minikube node: + +``` +$ minikube ip +192.168.99.100 +``` + +With this information, you can create a debug connection by using a host of 192.168.99.100 and a port of 31339. + +The following example shows how to disable JDWP: + +``` +$ kubectl rollout undo deployment/scdf-server +$ kubectl patch service scdf-server --type json -p='[{"op": "remove", "path": "/spec/ports/0"}]' +``` + +The Kubernetes deployment object is rolled back to its state before being patched. The Kubernetes service object is then patched with a `remove` operation to remove port 5005 from the `containerPorts` list. + +| |`kubectl rollout undo` forces the pod to restart. Patching the Kubernetes Service Object does not re-create the service, and the port mapping to the `scdf-server` deployment remains the same.| +|---|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +See [Rolling Back a Deployment](https://kubernetes.io/docs/concepts/workloads/controllers/deployment/#rolling-back-a-deployment) for more information on deployment rollbacks, including managing history and [Updating API Objects in Place Using kubectl Patch](https://kubernetes.io/docs/tasks/run-application/update-api-object-kubectl-patch/). + +# Shell + +This section covers the options for starting the shell and more advanced functionality relating to how the shell handles whitespace, quotes, and interpretation of SpEL expressions. +The introductory chapters to the[Stream DSL](#spring-cloud-dataflow-stream-intro) and [Composed Task DSL](#spring-cloud-dataflow-composed-tasks) are good places to start for the most common usage of shell commands. + +## 13. Shell Options + +The shell is built upon the [Spring Shell](https://projects.spring.io/spring-shell/) project. +Some command-line options come from Spring Shell, and some are specific to Data Flow. +The shell takes the following command line options: + +``` +unix:>java -jar spring-cloud-dataflow-shell-2.9.2.jar --help +Data Flow Options: + --dataflow.uri= Address of the Data Flow Server [default: http://localhost:9393]. + --dataflow.username= Username of the Data Flow Server [no default]. + --dataflow.password= Password of the Data Flow Server [no default]. + --dataflow.credentials-provider-command= Executes an external command which must return an + OAuth Bearer Token (Access Token prefixed with 'Bearer '), + e.g. 'Bearer 12345'), [no default]. + --dataflow.skip-ssl-validation= Accept any SSL certificate (even self-signed) [default: no]. + --dataflow.proxy.uri= Address of an optional proxy server to use [no default]. + --dataflow.proxy.username= Username of the proxy server (if required by proxy server) [no default]. + --dataflow.proxy.password= Password of the proxy server (if required by proxy server) [no default]. + --spring.shell.historySize= Default size of the shell log file [default: 3000]. + --spring.shell.commandFile= Data Flow Shell executes commands read from the file(s) and then exits. + --help This message. +``` + +You can use the `spring.shell.commandFile` option to point to an existing file that contains +all the shell commands to deploy one or many related streams and tasks. +Running multiple files is also supported. They should be passed as a comma-delimited string: + +`--spring.shell.commandFile=file1.txt,file2.txt` + +This option is useful when creating some scripts to help automate deployment. + +Also, the following shell command helps to modularize a complex script into multiple independent files: + +`dataflow:>script --file ` + +## 14. Listing Available Commands + +Typing `help` at the command prompt gives a listing of all available commands. +Most of the commands are for Data Flow functionality, but a few are general purpose. +The following listing shows the output of the `help` command: + +``` +! - Allows execution of operating system (OS) commands +clear - Clears the console +cls - Clears the console +date - Displays the local date and time +exit - Exits the shell +http get - Make GET request to http endpoint +http post - POST data to http endpoint +quit - Exits the shell +system properties - Shows the shells properties {JB - restore the apostrophe} +version - Displays shell version +``` + +Adding the name of the command to `help` shows additional information on how to invoke the command: + +``` +dataflow:>help stream create +Keyword: stream create +Description: Create a new stream definition + Keyword: ** default ** + Keyword: name + Help: the name to give to the stream + Mandatory: true + Default if specified: '__NULL__' + Default if unspecified: '__NULL__' + + Keyword: definition + Help: a stream definition, using the DSL (e.g. "http --port=9000 | hdfs") + Mandatory: true + Default if specified: '__NULL__' + Default if unspecified: '__NULL__' + + Keyword: deploy + Help: whether to deploy the stream immediately + Mandatory: false + Default if specified: 'true' + Default if unspecified: 'false' +``` + +## 15. Tab Completion + +You can complete the shell command options in the shell by pressing the `TAB` key after the leading `--`. For example, pressing `TAB` after `stream create --` results in the following pair of suggestions: + +``` +dataflow:>stream create -- +stream create --definition stream create --name +``` + +If you type `--de` and then press tab, `--definition` expands. + +Tab completion is also available inside the stream or composed task DSL expression for application or task properties. You can also use `TAB` to get hints in a stream DSL expression for the available sources, processors, or sinks that you can use. + +## 16. Whitespace and Quoting Rules + +You need to quote parameter values only if they contain spaces or the `|` character. The following example passes a SpEL expression (which is applied to any data it encounters) to a transform processor: + +``` +transform --expression='new StringBuilder(payload).reverse()' +``` + +If the parameter value needs to embed a single quote, use two single quotes, as follows: + +``` +// Query is: Select * from /Customers where name='Smith' +scan --query='Select * from /Customers where name=''Smith''' +``` + +### 16.1. Quotes and Escaping + +There is a Spring Shell-based client that talks to the Data Flow Server and is responsible for **parsing** the DSL. +In turn, applications may have application properties that rely on embedded languages, such as the **Spring Expression Language**. + +The Shell, Data Flow DSL parser, and SpEL have rules about how they handle quotes and how syntax escaping works. +When combined together, confusion may arise. +This section explains the rules that apply and provides examples of the most complicated situations you may encounter when all three components are involved. + +| |It is not always that complicated

If you do not use the Data Flow Shell (for example, if you use the REST API directly) or if application properties are not SpEL expressions, the escaping rules are simpler.| +|---|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +#### 16.1.1. Shell Rules + +Arguably, the most complex component when it comes to quotes is the Shell. The rules can be laid out quite simply, though: + +* A shell command is made of keys (`--something`) and corresponding values. There is a special, keyless mapping, though, which is described later. + +* A value cannot normally contain spaces, as space is the default delimiter for commands. + +* Spaces can be added though, by surrounding the value with quotes (either single (`'`) or double (`"`) quotes). + +* Values passed inside deployment properties (for example, `deployment --properties " …​"`) should not be quoted again. + +* If surrounded with quotes, a value can embed a literal quote of the same kind by prefixing it with a backslash (`\`). + +* Other escapes are available, such as `\t`, `\n`, `\r`, `\f` and unicode escapes of the form `\uxxxx`. + +* The keyless mapping is handled in a special way such that it does not need quoting to contain spaces. + +For example, the shell supports the `!` command to execute native shell commands. The `!` accepts a single keyless argument. This is why the following example works: + +``` +dataflow:>! rm something +``` + +The argument here is the whole `rm something` string, which is passed as is to the underlying shell. + +As another example, the following commands are strictly equivalent, and the argument value is `something` (without the quotes): + +``` +dataflow:>stream destroy something +dataflow:>stream destroy --name something +dataflow:>stream destroy "something" +dataflow:>stream destroy --name "something" +``` + +#### 16.1.2. Property Files Rules + +The rules are relaxed when loading the properties from files. + +* The special characters used in property files (both Java and YAML) need to be escaped. For example `\` should be replaced by `\\`, `\t` by `\\t` and so forth. + +* For Java property files (`--propertiesFile .properties`), the property values should not be surrounded by quotes. It is not needed even if they contain spaces. + + ``` + filter.expression=payload > 5 + ``` + +* For YAML property files (`--propertiesFile .yaml`), though, the values need to be surrounded by double quotes. + + ``` + app: + filter: + filter: + expression: "payload > 5" + ``` + +#### 16.1.3. DSL Parsing Rules + +At the parser level (that is, inside the body of a stream or task definition), the rules are as follows: + +* Option values are normally parsed until the first space character. + +* They can be made of literal strings, though, surrounded by single or double quotes. + +* To embed such a quote, use two consecutive quotes of the desired kind. + +As such, the values of the `--expression` option to the filter application are semantically equivalent in the following examples: + +``` +filter --expression=payload>5 +filter --expression="payload>5" +filter --expression='payload>5' +filter --expression='payload > 5' +``` + +Arguably, the last one is more readable. It is made possible thanks to the surrounding quotes. The actual expression is `payload > 5`. + +Now, imagine that we want to test against string messages. If we want to compare the payload to the SpEL literal string, `"something"`, we could use the following: + +``` +filter --expression=payload=='something' (1) +filter --expression='payload == ''something''' (2) +filter --expression='payload == "something"' (3) +``` + +|**1**| This works because there are no spaces. It is not very legible, though. | +|-----|-----------------------------------------------------------------------------------------------------------------------| +|**2**| This uses single quotes to protect the whole argument. Hence, the actual single quotes need to be doubled. | +|**3**|SpEL recognizes String literals with either single or double quotes, so this last method is arguably the most readable.| + +Note that the preceding examples are to be considered outside of the shell (for example, when calling the REST API directly). +When entered inside the shell, chances are that the whole stream definition is itself inside double quotes, which would need to be escaped. The whole example then becomes the following: + +``` +dataflow:>stream create something --definition "http | filter --expression=payload='something' | log" + +dataflow:>stream create something --definition "http | filter --expression='payload == ''something''' | log" + +dataflow:>stream create something --definition "http | filter --expression='payload == \"something\"' | log" +``` + +#### 16.1.4. SpEL Syntax and SpEL Literals + +The last piece of the puzzle is about SpEL expressions. +Many applications accept options that are to be interpreted as SpEL expressions, and, as seen earlier, String literals are handled in a special way there, too. The rules are as follows: + +* Literals can be enclosed in either single or double quotes. + +* Quotes need to be doubled to embed a literal quote. Single quotes inside double quotes need no special treatment, and the reverse is also true. + +As a last example, assume you want to use the [transform processor]($https://docs.spring.io/spring-cloud-stream-app-starters/docs/current/reference/html/spring-cloud-stream-modules-processors.html#spring-clound-stream-modules-transform-processor). +This processor accepts an `expression` option which is a SpEL expression. It is to be evaluated against the incoming message, with a default of `payload` (which forwards the message payload untouched). + +It is important to understand that the following statements are equivalent: + +``` +transform --expression=payload +transform --expression='payload' +``` + +However, they are different from the following (and variations upon them): + +``` +transform --expression="'payload'" +transform --expression='''payload''' +``` + +The first series evaluates to the message payload, while the latter examples evaluate to the literal string, `payload`. + +#### 16.1.5. Putting It All Together + +As a last, complete example, consider how you could force the transformation of all messages to the string literal, `hello world`, by creating a stream in the context of the Data Flow shell: + +``` +dataflow:>stream create something --definition "http | transform --expression='''hello world''' | log" (1) + +dataflow:>stream create something --definition "http | transform --expression='\"hello world\"' | log" (2) + +dataflow:>stream create something --definition "http | transform --expression=\"'hello world'\" | log" (2) +``` + +|**1**| In the first line, single quotes surround the string (at the Data Flow parser level), but they need to be doubled because they are inside a string literal (started by the first single quote after the equals sign). | +|-----|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +|**2**|The second and third lines use single and double quotes, respectively, to encompass the whole string at the Data Flow parser level. Consequently, the other kind of quote can be used inside the string. The whole thing is inside the `--definition` argument to the shell, though, which uses double quotes. Consequently, double quotes are escaped (at the shell level).| + +# Streams + +This section goes into more detail about how you can create Streams, which are collections of[Spring Cloud Stream](https://cloud.spring.io/spring-cloud-stream/) applications. It covers topics such as +creating and deploying Streams. + +If you are just starting out with Spring Cloud Data Flow, you should probably read the[Getting Started](#getting-started) guide before diving into +this section. + +## 17. Introduction + +A Stream is a collection of long-lived [Spring Cloud Stream](https://cloud.spring.io/spring-cloud-stream/) applications that communicate with each other over messaging middleware. +A text-based DSL defines the configuration and data flow between the applications. While many applications are provided for you to implement common use-cases, you typically create a custom Spring Cloud Stream application to implement custom business logic. + +The general lifecycle of a Stream is: + +1. Register applications. + +2. Create a Stream Definition. + +3. Deploy the Stream. + +4. Undeploy or destroy the Stream. + +5. Upgrade or roll back applications in the Stream. + +For deploying Streams, the Data Flow Server has to be configured to delegate the deployment to a new server in the Spring Cloud ecosystem named [Skipper](https://cloud.spring.io/spring-cloud-skipper/). + +Furthermore, you can configure Skipper to deploy applications to one or more Cloud Foundry orgs and spaces, one or more namespaces on a Kubernetes cluster, or to the local machine. +When deploying a stream in Data Flow, you can specify which platform to use at deployment time. +Skipper also provides Data Flow with the ability to perform updates to deployed streams. +There are many ways the applications in a stream can be updated, but one of the most common examples is to upgrade a processor application with new custom business logic while leaving the existing source and sink applications alone. + +### 17.1. Stream Pipeline DSL + +A stream is defined by using a Unix-inspired [Pipeline syntax](https://en.wikipedia.org/wiki/Pipeline_(Unix)). +The syntax uses vertical bars, known as “pipes”, to connect multiple commands. +The command `ls -l | grep key | less` in Unix takes the output of the `ls -l` process and pipes it to the input of the `grep key` process. +The output of `grep` is, in turn, sent to the input of the `less` process. +Each `|` symbol connects the standard output of the command on the left to the standard input of the command on the right. +Data flows through the pipeline from left to right. + +In Data Flow, the Unix command is replaced by a [Spring Cloud Stream](https://cloud.spring.io/spring-cloud-stream/) application and each pipe symbol represents connecting the input and output of applications over messaging middleware, such as RabbitMQ or Apache Kafka. + +Each Spring Cloud Stream application is registered under a simple name. +The registration process specifies where the application can be obtained (for example, in a Maven Repository or a Docker registry). You can find out more about how to register Spring Cloud Stream applications in this [section](#spring-cloud-dataflow-register-stream-apps). +In Data Flow, we classify the Spring Cloud Stream applications as Sources, Processors, or Sinks. + +As a simple example, consider the collection of data from an HTTP Source and writing to a File Sink. +Using the DSL, the stream description is: + +`http | file` + +A stream that involves some processing would be expressed as: + +`http | filter | transform | file` + +Stream definitions can be created by using the shell’s `stream create` command, as shown in the following example: + +`dataflow:> stream create --name httpIngest --definition "http | file"` + +The Stream DSL is passed in to the `--definition` command option. + +The deployment of stream definitions is done through the Shell’s `stream deploy` command, as follows: + +`dataflow:> stream deploy --name ticktock` + +The [Getting Started](#getting-started) section shows you how to start the server and how to start and use the Spring Cloud Data Flow shell. + +Note that the shell calls the Data Flow Server’s REST API. For more information on making HTTP requests directly to the server, see the [REST API Guide](#api-guide). + +| |When naming a stream definition, keep in mind that each application in the stream will be created on the platform with the name in the format of `-`. Thus, the total length of the generated application name can’t exceed 58 characters.| +|---|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +### 17.2. Stream Application DSL + +You can use the Stream Application DSL to define custom binding properties for each of the Spring Cloud Stream applications. +See the [Stream Application DSL](https://dataflow.spring.io/docs/feature-guides/streams/stream-application-dsl/) section of the microsite for more information. + +Consider the following Java interface, which defines an input method and two output methods: + +``` +public interface Barista { + + @Input + SubscribableChannel orders(); + + @Output + MessageChannel hotDrinks(); + + @Output + MessageChannel coldDrinks(); +} +``` + +Further consider the following Java interface, which is typical for creating a Kafka Streams application: + +``` +interface KStreamKTableBinding { + + @Input + KStream inputStream(); + + @Input + KTable inputTable(); +} +``` + +In these cases with multiple input and output bindings, Data Flow cannot make any assumptions about the flow of data from one application to another. +Therefore, you need to set the binding properties to “wire up” the application. +The **Stream Application DSL** uses a “double pipe”, instead of the “pipe symbol”, to indicate that Data Flow should not configure the binding properties of the application. Think of `||` as meaning “in parallel”. +The following example shows such a “parallel” definition: + +``` +dataflow:> stream create --definition "orderGeneratorApp || baristaApp || hotDrinkDeliveryApp || coldDrinkDeliveryApp" --name myCafeStream +``` + +| |Breaking Change! Versions of SCDF Local, Cloud Foundry 1.7.0 to 1.7.2 and SCDF Kubernetes 1.7.0 to 1.7.1 used the `comma` character as the separator between applications. This caused breaking changes in the traditional Stream DSL. While not ideal, changing the separator character was felt to be the best solution with the least impact on existing users.| +|---|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +This stream has four applications.`baristaApp` has two output destinations, `hotDrinks` and `coldDrinks`, intended to be consumed by the `hotDrinkDeliveryApp` and `coldDrinkDeliveryApp`, respectively. +When deploying this stream, you need to set the binding properties so that the `baristaApp` sends hot drink messages to the `hotDrinkDeliveryApp` destination and cold drink messages to the `coldDrinkDeliveryApp` destination. +The following listing does so: + +``` +app.baristaApp.spring.cloud.stream.bindings.hotDrinks.destination=hotDrinksDest +app.baristaApp.spring.cloud.stream.bindings.coldDrinks.destination=coldDrinksDest +app.hotDrinkDeliveryApp.spring.cloud.stream.bindings.input.destination=hotDrinksDest +app.coldDrinkDeliveryApp.spring.cloud.stream.bindings.input.destination=coldDrinksDest +``` + +If you want to use consumer groups, you need to set the Spring Cloud Stream application properties, `spring.cloud.stream.bindings..producer.requiredGroups` and `spring.cloud.stream.bindings..group`, on the producer and consumer applications respectively. + +Another common use case for the Stream Application DSL is to deploy a HTTP gateway application that sends a synchronous request or reply message to a Kafka or RabbitMQ application. +In this case, both the HTTP gateway application and the Kafka or RabbitMQ application can be a Spring Integration application that does not make use of the Spring Cloud Stream library. + +It is also possible to deploy only a single application using the Stream application DSL. + +### 17.3. Application Properties + +Each application takes properties to customize its behavior. As an example, the `http` source module exposes a `port` setting that lets the data ingestion port be changed from the default value: + +``` +dataflow:> stream create --definition "http --port=8090 | log" --name myhttpstream +``` + +This `port` property is actually the same as the standard Spring Boot `server.port` property. +Data Flow adds the ability to use the shorthand form `port` instead of `server.port`. +You can also specify the longhand version: + +``` +dataflow:> stream create --definition "http --server.port=8000 | log" --name myhttpstream +``` + +This shorthand behavior is discussed more in the section on [Stream Application Properties](#spring-cloud-dataflow-application-properties). +If you have [registered application property metadata](https://dataflow.spring.io/docs/applications/application-metadata/#using-application-metadata), you can use tab completion in the shell after typing `--` to get a list of candidate property names. + +The shell provides tab completion for application properties. The `app info --name --type ` shell command provides additional documentation for all the supported properties. + +| |Supported Stream `` possibilities are: `source`, `processor`, and `sink`.| +|---|----------------------------------------------------------------------------------| + +## 18. Stream Lifecycle + +The lifecycle of a stream goes through the following stages: + +1. [Register a Stream Application](#spring-cloud-dataflow-register-stream-apps) + +2. [Creating a Stream](#spring-cloud-dataflow-create-stream) + +3. [Deploying a Stream](#spring-cloud-dataflow-deploy-stream) + +4. [Destroying a Stream](#spring-cloud-dataflow-destroy-stream) or [Undeploying a Stream](#spring-cloud-dataflow-undeploy-stream) + +5. [Upgrade](#spring-cloud-dataflow-streams-upgrading) or [roll back](#spring-cloud-dataflow-streams-rollback) applications in the Stream. + +[Skipper](https://cloud.spring.io/spring-cloud-skipper/) is a server that lets you discover Spring Boot applications and manage their lifecycle on multiple Cloud Platforms. + +Applications in Skipper are bundled as packages that contain the application’s resource location, application properties, and deployment properties. +You can think of Skipper packages as being analogous to packages found in tools such as `apt-get` or `brew`. + +When Data Flow deploys a Stream, it generates and upload a package to Skipper that represents the applications in the Stream. +Subsequent commands to upgrade or roll back the applications within the Stream are passed through to Skipper. +In addition, the Stream definition is reverse-engineered from the package, and the status of the Stream is also delegated to Skipper. + +### 18.1. Register a Stream Application + +You can register a versioned stream application by using the `app register` command. You must provide a unique name, an application type, and a URI that can be resolved to the application artifact. +For the type, specify `source`, `processor`, or `sink`. The version is resolved from the URI. Here are a few examples: + +``` +dataflow:>app register --name mysource --type source --uri maven://com.example:mysource:0.0.1 +dataflow:>app register --name mysource --type source --uri maven://com.example:mysource:0.0.2 +dataflow:>app register --name mysource --type source --uri maven://com.example:mysource:0.0.3 + +dataflow:>app list --id source:mysource +╔═══╤══════════════════╤═════════╤════╤════╗ +║app│ source │processor│sink│task║ +╠═══╪══════════════════╪═════════╪════╪════╣ +║ │> mysource-0.0.1 <│ │ │ ║ +║ │mysource-0.0.2 │ │ │ ║ +║ │mysource-0.0.3 │ │ │ ║ +╚═══╧══════════════════╧═════════╧════╧════╝ + +dataflow:>app register --name myprocessor --type processor --uri file:///Users/example/myprocessor-1.2.3.jar + +dataflow:>app register --name mysink --type sink --uri https://example.com/mysink-2.0.1.jar +``` + +The application URI should conform to one the following schema formats: + +* Maven schema: + + ``` + maven://:[:[:]]: + ``` + +* HTTP schema: + + ``` + http:///-.jar + ``` + +* File schema: + + ``` + file:////-.jar + ``` + +* Docker schema: + + ``` + docker:/: + ``` + +| |The URI `` part is compulsory for versioned stream applications.
Skipper uses the multi-versioned stream applications to allow upgrading or rolling back those applications at runtime by using the deployment properties.| +|---|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +If you would like to register the snapshot versions of the `http` and `log`applications built with the RabbitMQ binder, you could do the following: + +``` +dataflow:>app register --name http --type source --uri maven://org.springframework.cloud.stream.app:http-source-rabbit:1.2.1.BUILD-SNAPSHOT +dataflow:>app register --name log --type sink --uri maven://org.springframework.cloud.stream.app:log-sink-rabbit:1.2.1.BUILD-SNAPSHOT +``` + +If you would like to register multiple applications at one time, you can store them in a properties file, +where the keys are formatted as `.` and the values are the URIs. + +For example, to register the snapshot versions of the `http` and `log`applications built with the RabbitMQ binder, you could have the following in a properties file (for example, `stream-apps.properties`): + +``` +source.http=maven://org.springframework.cloud.stream.app:http-source-rabbit:1.2.1.BUILD-SNAPSHOT +sink.log=maven://org.springframework.cloud.stream.app:log-sink-rabbit:1.2.1.BUILD-SNAPSHOT +``` + +Then, to import the applications in bulk, use the `app import` command and provide the location of the properties file with the `--uri` switch, as follows: + +``` +dataflow:>app import --uri file:////stream-apps.properties +``` + +Registering an application by using `--type app` is the same as registering a `source`, `processor` or `sink`. +Applications of the type `app` can be used only in the Stream Application DSL (which uses double pipes `||` instead of single pipes `|` in the DSL) and instructs Data Flow not to configure the Spring Cloud Stream binding properties of the application. +The application that is registered using `--type app` does not have to be a Spring Cloud Stream application. It can be any Spring Boot application. +See the [Stream Application DSL introduction](#spring-cloud-dataflow-stream-app-dsl) for more about using this application type. + +You can register multiple versions of the same applications (for example, the same name and type), but you can set only one as the default. +The default version is used for deploying Streams. + +The first time an application is registered, it is marked as default. The default application version can be altered with the `app default` command: + +``` +dataflow:>app default --id source:mysource --version 0.0.2 +dataflow:>app list --id source:mysource +╔═══╤══════════════════╤═════════╤════╤════╗ +║app│ source │processor│sink│task║ +╠═══╪══════════════════╪═════════╪════╪════╣ +║ │mysource-0.0.1 │ │ │ ║ +║ │> mysource-0.0.2 <│ │ │ ║ +║ │mysource-0.0.3 │ │ │ ║ +╚═══╧══════════════════╧═════════╧════╧════╝ +``` + +The `app list --id ` command lists all versions for a given stream application. + +The `app unregister` command has an optional `--version` parameter to specify the application version to unregister: + +``` +dataflow:>app unregister --name mysource --type source --version 0.0.1 +dataflow:>app list --id source:mysource +╔═══╤══════════════════╤═════════╤════╤════╗ +║app│ source │processor│sink│task║ +╠═══╪══════════════════╪═════════╪════╪════╣ +║ │> mysource-0.0.2 <│ │ │ ║ +║ │mysource-0.0.3 │ │ │ ║ +╚═══╧══════════════════╧═════════╧════╧════╝ +``` + +If `--version` is not specified, the default version is unregistered. + +| |All applications in a stream should have a default version set for the stream to be deployed.
Otherwise, they are treated as unregistered application during the deployment.
Use the `app default` command to set the defaults.| +|---|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +``` +app default --id source:mysource --version 0.0.3 +dataflow:>app list --id source:mysource +╔═══╤══════════════════╤═════════╤════╤════╗ +║app│ source │processor│sink│task║ +╠═══╪══════════════════╪═════════╪════╪════╣ +║ │mysource-0.0.2 │ │ │ ║ +║ │> mysource-0.0.3 <│ │ │ ║ +╚═══╧══════════════════╧═════════╧════╧════╝ +``` + +The `stream deploy` necessitates default application versions being set. +The `stream update` and `stream rollback` commands, though, can use all (default and non-default) registered application versions. + +The following command creates a stream that uses the default mysource version (0.0.3): + +``` +dataflow:>stream create foo --definition "mysource | log" +``` + +Then we can update the version to 0.0.2: + +``` +dataflow:>stream update foo --properties version.mysource=0.0.2 +``` + +| |Only pre-registered applications can be used to `deploy`, `update`, or `rollback` a Stream.| +|---|-------------------------------------------------------------------------------------------| + +An attempt to update the `mysource` to version `0.0.1` (not registered) fails. + +#### 18.1.1. Register Supported Applications and Tasks + +For convenience, we have the static files with application-URIs (for both Maven and Docker) available +for all the out-of-the-box stream and task or batch app-starters. You can point to this file and import +all the application-URIs in bulk. Otherwise, as explained previously, you can register them individually or have your own +custom property file with only the required application-URIs in it. We recommend, however, having a “focused” +list of desired application-URIs in a custom property file. + +##### Spring Cloud Stream App Starters + +The following table includes the `dataflow.spring.io` links to the available Stream Application Starters based on Spring Cloud Stream 2.1.x +and Spring Boot 2.1.x: + +| Artifact Type | Stable Release | SNAPSHOT Release | +|---------------------|----------------------------------------------------------------------------------------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| RabbitMQ + Maven | [dataflow.spring.io/rabbitmq-maven-latest](https://dataflow.spring.io/rabbitmq-maven-latest) | [dataflow.spring.io/Einstein-BUILD-SNAPSHOT-stream-applications-rabbit-maven](https://dataflow.spring.io/Einstein-BUILD-SNAPSHOT-stream-applications-rabbit-maven) | +| RabbitMQ + Docker |[dataflow.spring.io/rabbitmq-docker-latest](https://dataflow.spring.io/rabbitmq-docker-latest)|[dataflow.spring.io/Einstein-BUILD-SNAPSHOT-stream-applications-rabbit-docker](https://dataflow.spring.io/Einstein-BUILD-SNAPSHOT-stream-applications-rabbit-docker)| +|Apache Kafka + Maven | [dataflow.spring.io/kafka-maven-latest](https://dataflow.spring.io/kafka-maven-latest) | [dataflow.spring.io/Einstein-BUILD-SNAPSHOT-stream-applications-kafka-maven](https://dataflow.spring.io/Einstein-BUILD-SNAPSHOT-stream-applications-kafka-maven) | +|Apache Kafka + Docker| [dataflow.spring.io/kafka-docker-latest](https://dataflow.spring.io/kafka-docker-latest) | [dataflow.spring.io/Einstein-BUILD-SNAPSHOT-stream-applications-kafka-docker](https://dataflow.spring.io/Einstein-BUILD-SNAPSHOT-stream-applications-kafka-docker) | + +| |By default, App Starter actuator endpoints are secured. You can disable security by deploying streams with the`app.*.spring.autoconfigure.exclude=org.springframework.boot.autoconfigure.security.servlet.SecurityAutoConfiguration` property.
On Kubernetes, see the [Liveness and readiness probes](#getting-started-kubernetes-probes) section for how to configure
security for actuator endpoints.| +|---|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +| |Starting with the Spring Cloud Stream 2.1 GA release, we now have robust interoperability with the Spring Cloud Function
programming model. Building on that, with the Einstein release-train, it is now possible to pick a few Stream App
Starters and compose them into a single application by using the functional-style programming model. Check out the["Composed Function Support in
Spring Cloud Data Flow"](https://spring.io/blog/2019/01/09/composed-function-support-in-spring-cloud-data-flow) blog to learn more about the developer and orchestration-experience with an example.| +|---|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +##### Spring Cloud Task App Starters + +The following table includes the available Task Application Starters based on Spring Cloud Task 2.1.x and Spring Boot 2.1.x: + +|Artifact Type| Stable Release | SNAPSHOT Release | +|-------------|--------------------------------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------------| +| Maven | [dataflow.spring.io/task-maven-latest](https://dataflow.spring.io/task-maven-latest) | [dataflow.spring.io/Elston-BUILD-SNAPSHOT-task-applications-maven](https://dataflow.spring.io/Elston-BUILD-SNAPSHOT-task-applications-maven) | +| Docker |[dataflow.spring.io/task-docker-latest](https://dataflow.spring.io/task-docker-latest)|[dataflow.spring.io/Elston-BUILD-SNAPSHOT-task-applications-docker](https://dataflow.spring.io/Elston-BUILD-SNAPSHOT-task-applications-docker)| + +You can find more information about the available task starters in the [Task App Starters Project Page](https://cloud.spring.io/spring-cloud-task-app-starters/) and +related reference documentation. For more information about the available stream starters, look at the [Stream App Starters Project Page](https://cloud.spring.io/spring-cloud-stream-app-starters/)and related reference documentation. + +As an example, if you would like to register all out-of-the-box stream applications built with the Kafka binder in bulk, you can use the following command: + +``` +$ dataflow:>app import --uri https://dataflow.spring.io/kafka-maven-latest +``` + +Alternatively, you can register all the stream applications with the Rabbit binder, as follows: + +``` +$ dataflow:>app import --uri https://dataflow.spring.io/rabbitmq-maven-latest +``` + +You can also pass the `--local` option (which is `true` by default) to indicate whether the +properties file location should be resolved within the shell process itself. If the location should +be resolved from the Data Flow Server process, specify `--local false`. + +| |When you use either `app register` or `app import`, if an application is already registered with
the provided name and type and version, it is, by default, not overridden. If you would like to override the
pre-existing application `uri` or `metadata-uri` coordinates, include the `--force` option.

Note, however, that, once downloaded, applications may be cached locally on the Data Flow server, based on the resource
location. If the resource location does not change (even though the actual resource *bytes* may be different), it
is not re-downloaded. When using `maven://` resources, on the other hand, using a constant location may still circumvent
caching (if using `-SNAPSHOT` versions).

Moreover, if a stream is already deployed and uses some version of a registered app, then (forcibly) re-registering a
different application has no effect until the stream is deployed again.| +|---|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +| |In some cases, the resource is resolved on the server side. In others, the
URI is passed to a runtime container instance, where it is resolved. See
the specific documentation of each Data Flow Server for more detail.| +|---|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +#### 18.1.2. Creating Custom Applications + +While Data Flow includes source, processor, sink applications, you can extend these applications or write a custom [Spring Cloud Stream](https://github.com/spring-cloud/spring-cloud-stream) application. + +The process of creating Spring Cloud Stream applications with [Spring Initializr](https://start.spring.io/) is detailed in the Spring Cloud Stream [documentation](https://docs.spring.io/spring-cloud-stream/docs/current/reference/htmlsingle/index.html#_getting_started). +You can include multiple binders to an application. +If you do so, see the instructions in [[passing\_producer\_consumer\_properties]](#passing_producer_consumer_properties) for how to configure them. + +To support allowing properties, Spring Cloud Stream applications running in Spring Cloud Data Flow can include the Spring Boot `configuration-processor` as an optional dependency, as shown in the following example: + +``` + + + + org.springframework.boot + spring-boot-configuration-processor + true + + +``` + +NOTE:Make sure that the `spring-boot-maven-plugin` is included in the POM. +The plugin is necessary for creating the executable jar that is registered with Spring Cloud Data Flow. +Spring Initialzr includes the plugin in the generated POM. + +Once you have created a custom application, you can register it, as described in [Register a Stream Application](#spring-cloud-dataflow-register-stream-apps). + +### 18.2. Creating a Stream + +The Spring Cloud Data Flow Server exposes a full RESTful API for managing the lifecycle of stream definitions, but the easiest way to use is it is through the Spring Cloud Data Flow shell. The [Getting Started](#getting-started) section describes how to start the shell. + +New streams are created with the help of stream definitions. The definitions are built from a simple DSL. For example, consider what happens if we run the following shell command: + +``` +dataflow:> stream create --definition "time | log" --name ticktock +``` + +This defines a stream named `ticktock` that is based off of the DSL expression `time | log`. The DSL uses the “pipe” symbol (`|`), to connect a source to a sink. + +The `stream info` command shows useful information about the stream, as shown (with its output) in the following example: + +``` +dataflow:>stream info ticktock +╔═══════════╤═════════════════╤══════════╗ +║Stream Name│Stream Definition│ Status ║ +╠═══════════╪═════════════════╪══════════╣ +║ticktock │time | log │undeployed║ +╚═══════════╧═════════════════╧══════════╝ +``` + +#### 18.2.1. Stream Application Properties + +Application properties are the properties associated with each application in the stream. When the application is deployed, the application properties are applied to the application through +command-line arguments or environment variables, depending on the underlying deployment implementation. + +The following stream can have application properties defined at the time of stream creation: + +``` +dataflow:> stream create --definition "time | log" --name ticktock +``` + +The `app info --name --type ` shell command displays the exposed application properties for the application. +For more about exposed properties, see [Application Metadata](https://dataflow.spring.io/docs/applications/application-metadata). + +The following listing shows the exposed properties for the `time` application: + +``` +dataflow:> app info --name time --type source +╔══════════════════════════════╤══════════════════════════════╤══════════════════════════════╤══════════════════════════════╗ +║ Option Name │ Description │ Default │ Type ║ +╠══════════════════════════════╪══════════════════════════════╪══════════════════════════════╪══════════════════════════════╣ +║trigger.time-unit │The TimeUnit to apply to delay│ │java.util.concurrent.TimeUnit ║ +║ │values. │ │ ║ +║trigger.fixed-delay │Fixed delay for periodic │1 │java.lang.Integer ║ +║ │triggers. │ │ ║ +║trigger.cron │Cron expression value for the │ │java.lang.String ║ +║ │Cron Trigger. │ │ ║ +║trigger.initial-delay │Initial delay for periodic │0 │java.lang.Integer ║ +║ │triggers. │ │ ║ +║trigger.max-messages │Maximum messages per poll, -1 │1 │java.lang.Long ║ +║ │means infinity. │ │ ║ +║trigger.date-format │Format for the date value. │ │java.lang.String ║ +╚══════════════════════════════╧══════════════════════════════╧══════════════════════════════╧══════════════════════════════╝ +``` + +The following listing shows the exposed properties for the `log` application: + +``` +dataflow:> app info --name log --type sink +╔══════════════════════════════╤══════════════════════════════╤══════════════════════════════╤══════════════════════════════╗ +║ Option Name │ Description │ Default │ Type ║ +╠══════════════════════════════╪══════════════════════════════╪══════════════════════════════╪══════════════════════════════╣ +║log.name │The name of the logger to use.│ │java.lang.String ║ +║log.level │The level at which to log │ │org.springframework.integratio║ +║ │messages. │ │n.handler.LoggingHandler$Level║ +║log.expression │A SpEL expression (against the│payload │java.lang.String ║ +║ │incoming message) to evaluate │ │ ║ +║ │as the logged message. │ │ ║ +╚══════════════════════════════╧══════════════════════════════╧══════════════════════════════╧══════════════════════════════╝ +``` + +You can specify the application properties for the `time` and `log` apps at the time of `stream` creation, as follows: + +``` +dataflow:> stream create --definition "time --fixed-delay=5 | log --level=WARN" --name ticktock +``` + +Note that, in the preceding example, the `fixed-delay` and `level` properties defined for the `time` and `log` applications are the “short-form” property names provided by the shell completion. +These “short-form” property names are applicable only for the exposed properties. In all other cases, you should use only fully qualified property names. + +#### 18.2.2. Common Application Properties + +In addition to configuration through DSL, Spring Cloud Data Flow provides a mechanism for setting common properties to all +the streaming applications that are launched by it. +This can be done by adding properties prefixed with `spring.cloud.dataflow.applicationProperties.stream` when starting +the server. +When doing so, the server passes all the properties, without the prefix, to the instances it launches. + +For example, all the launched applications can be configured to use a specific Kafka broker by launching the +Data Flow server with the following options: + +``` +--spring.cloud.dataflow.applicationProperties.stream.spring.cloud.stream.kafka.binder.brokers=192.168.1.100:9092 +--spring.cloud.dataflow.applicationProperties.stream.spring.cloud.stream.kafka.binder.zkNodes=192.168.1.100:2181 +``` + +Doing so causes the `spring.cloud.stream.kafka.binder.brokers` and `spring.cloud.stream.kafka.binder.zkNodes` properties +to be passed to all the launched applications. + +| |Properties configured with this mechanism have lower precedence than stream deployment properties.
They are overridden if a property with the same key is specified at stream deployment time (for example,`app.http.spring.cloud.stream.kafka.binder.brokers` overrides the common property).| +|---|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +### 18.3. Deploying a Stream + +This section describes how to deploy a Stream when the Spring Cloud Data Flow server is responsible for deploying the stream. It covers the deployment and upgrade of Streams by using the Skipper service. The description of how to set deployment properties applies to both approaches of Stream deployment. + +Consider the `ticktock` stream definition: + +``` +dataflow:> stream create --definition "time | log" --name ticktock +``` + +To deploy the stream, use the following shell command: + +``` +dataflow:> stream deploy --name ticktock +``` + +The Data Flow Server delegates to Skipper the resolution and deployment of the `time` and `log` applications. + +The `stream info` command shows useful information about the stream, including the deployment properties: + +``` +dataflow:>stream info --name ticktock +╔═══════════╤═════════════════╤═════════╗ +║Stream Name│Stream Definition│ Status ║ +╠═══════════╪═════════════════╪═════════╣ +║ticktock │time | log │deploying║ +╚═══════════╧═════════════════╧═════════╝ + +Stream Deployment properties: { + "log" : { + "resource" : "maven://org.springframework.cloud.stream.app:log-sink-rabbit", + "spring.cloud.deployer.group" : "ticktock", + "version" : "2.0.1.RELEASE" + }, + "time" : { + "resource" : "maven://org.springframework.cloud.stream.app:time-source-rabbit", + "spring.cloud.deployer.group" : "ticktock", + "version" : "2.0.1.RELEASE" + } +} +``` + +There is an important optional command argument (called `--platformName`) to the `stream deploy` command. +Skipper can be configured to deploy to multiple platforms. +Skipper is pre-configured with a platform named `default`, which deploys applications to the local machine where Skipper is running. +The default value of the `--platformName` command line argument is `default`. +If you commonly deploy to one platform, when installing Skipper, you can override the configuration of the `default` platform. +Otherwise, specify the `platformName` to be one of the values returned by the `stream platform-list` command. + +In the preceding example, the time source sends the current time as a message each second, and the log sink outputs it by using the logging framework. +You can tail the `stdout` log (which has an `` suffix). The log files are located within the directory displayed in the Data Flow Server’s log output, as shown in the following listing: + +``` +$ tail -f /var/folders/wn/8jxm_tbd1vj28c8vj37n900m0000gn/T/spring-cloud-dataflow-912434582726479179/ticktock-1464788481708/ticktock.log/stdout_0.log +2016-06-01 09:45:11.250 INFO 79194 --- [ kafka-binder-] log.sink : 06/01/16 09:45:11 +2016-06-01 09:45:12.250 INFO 79194 --- [ kafka-binder-] log.sink : 06/01/16 09:45:12 +2016-06-01 09:45:13.251 INFO 79194 --- [ kafka-binder-] log.sink : 06/01/16 09:45:13 +``` + +You can also create and deploy the stream in one step by passing the `--deploy` flag when creating the stream, as follows: + +``` +dataflow:> stream create --definition "time | log" --name ticktock --deploy +``` + +However, it is not common in real-world use cases to create and deploy the stream in one step. +The reason is that when you use the `stream deploy` command, you can pass in properties that define how to map the applications onto the platform (for example, what is the memory size of the container to use, the number of each application to run, and whether to enable data partitioning features). +Properties can also override application properties that were set when creating the stream. +The next sections cover this feature in detail. + +#### 18.3.1. Deployment Properties + +When deploying a stream, you can specify properties that can control how applications are deployed and configured. See the [Deployment Properties](https://dataflow.spring.io/docs/feature-guides/streams/deployment-properties/) section of the microsite for more information. + +### 18.4. Destroying a Stream + +You can delete a stream by issuing the `stream destroy` command from the shell, as follows: + +``` +dataflow:> stream destroy --name ticktock +``` + +If the stream was deployed, it is undeployed before the stream definition is deleted. + +### 18.5. Undeploying a Stream + +Often, you want to stop a stream but retain the name and definition for future use. In that case, you can `undeploy` the stream by name: + +``` +dataflow:> stream undeploy --name ticktock +dataflow:> stream deploy --name ticktock +``` + +You can issue the `deploy` command at a later time to restart it: + +``` +dataflow:> stream deploy --name ticktock +``` + +### 18.6. Validating a Stream + +Sometimes, an application contained within a stream definition contains an invalid URI in its registration. +This can caused by an invalid URI being entered at application registration time or by the application being removed from the repository from which it was to be drawn. +To verify that all the applications contained in a stream are resolve-able, a user can use the `validate` command: + +``` +dataflow:>stream validate ticktock +╔═══════════╤═════════════════╗ +║Stream Name│Stream Definition║ +╠═══════════╪═════════════════╣ +║ticktock │time | log ║ +╚═══════════╧═════════════════╝ + +ticktock is a valid stream. +╔═══════════╤═════════════════╗ +║ App Name │Validation Status║ +╠═══════════╪═════════════════╣ +║source:time│valid ║ +║sink:log │valid ║ +╚═══════════╧═════════════════╝ +``` + +In the preceding example, the user validated their ticktock stream. Both the `source:time` and `sink:log` are valid. +Now we can see what happens if we have a stream definition with a registered application with an invalid URI: + +``` +dataflow:>stream validate bad-ticktock +╔════════════╤═════════════════╗ +║Stream Name │Stream Definition║ +╠════════════╪═════════════════╣ +║bad-ticktock│bad-time | log ║ +╚════════════╧═════════════════╝ + +bad-ticktock is an invalid stream. +╔═══════════════╤═════════════════╗ +║ App Name │Validation Status║ +╠═══════════════╪═════════════════╣ +║source:bad-time│invalid ║ +║sink:log │valid ║ +╚═══════════════╧═════════════════╝ +``` + +In this case, Spring Cloud Data Flow states that the stream is invalid because `source:bad-time` has an invalid URI. + +### 18.7. Updating a Stream + +To update the stream, use the `stream update` command, which takes either `--properties` or `--propertiesFile` as a command argument. +Skipper has an important new top-level prefix: `version`. +The following commands deploy `http | log` stream (and the version of `log` which registered at the time of deployment was `1.1.0.RELEASE`): + +``` +dataflow:> stream create --name httptest --definition "http --server.port=9000 | log" +dataflow:> stream deploy --name httptest +dataflow:>stream info httptest +╔══════════════════════════════╤══════════════════════════════╤════════════════════════════╗ +║ Name │ DSL │ Status ║ +╠══════════════════════════════╪══════════════════════════════╪════════════════════════════╣ +║httptest │http --server.port=9000 | log │deploying ║ +╚══════════════════════════════╧══════════════════════════════╧════════════════════════════╝ + +Stream Deployment properties: { + "log" : { + "spring.cloud.deployer.indexed" : "true", + "spring.cloud.deployer.group" : "httptest", + "maven://org.springframework.cloud.stream.app:log-sink-rabbit" : "1.1.0.RELEASE" + }, + "http" : { + "spring.cloud.deployer.group" : "httptest", + "maven://org.springframework.cloud.stream.app:http-source-rabbit" : "1.1.0.RELEASE" + } +} +``` + +Then the following command updates the stream to use the `1.2.0.RELEASE` version of the log application. +Before updating the stream with the specific version of the application, we need to make sure that the application is registered with that version: + +``` +dataflow:>app register --name log --type sink --uri maven://org.springframework.cloud.stream.app:log-sink-rabbit:1.2.0.RELEASE +Successfully registered application 'sink:log' +``` + +Then we can update the application: + +``` +dataflow:>stream update --name httptest --properties version.log=1.2.0.RELEASE +``` + +| |You can use only pre-registered application versions to `deploy`, `update`, or `rollback` a stream.| +|---|---------------------------------------------------------------------------------------------------| + +To verify the deployment properties and the updated version, we can use `stream info`, as shown (with its output) in the following example: + +``` +dataflow:>stream info httptest +╔══════════════════════════════╤══════════════════════════════╤════════════════════════════╗ +║ Name │ DSL │ Status ║ +╠══════════════════════════════╪══════════════════════════════╪════════════════════════════╣ +║httptest │http --server.port=9000 | log │deploying ║ +╚══════════════════════════════╧══════════════════════════════╧════════════════════════════╝ + +Stream Deployment properties: { + "log" : { + "spring.cloud.deployer.indexed" : "true", + "spring.cloud.deployer.count" : "1", + "spring.cloud.deployer.group" : "httptest", + "maven://org.springframework.cloud.stream.app:log-sink-rabbit" : "1.2.0.RELEASE" + }, + "http" : { + "spring.cloud.deployer.group" : "httptest", + "maven://org.springframework.cloud.stream.app:http-source-rabbit" : "1.1.0.RELEASE" + } +} +``` + +### 18.8. Forcing an Update of a Stream + +When upgrading a stream, you can use the `--force` option to deploy new instances of currently deployed applications even if no application or deployment properties have changed. +This behavior is needed for when configuration information is obtained by the application itself at startup time — for example, from Spring Cloud Config Server. +You can specify the applications for which to force an upgrade by using the `--app-names` option. +If you do not specify any application names, all the applications are forced to upgrade. +You can specify the `--force` and `--app-names` options together with the `--properties` or `--propertiesFile` options. + +### 18.9. Stream Versions + +Skipper keeps a history of the streams that were deployed. +After updating a Stream, there is a second version of the stream. +You can query for the history of the versions by using the `stream history --name ` command: + +``` +dataflow:>stream history --name httptest +╔═══════╤════════════════════════════╤════════╤════════════╤═══════════════╤════════════════╗ +║Version│ Last updated │ Status │Package Name│Package Version│ Description ║ +╠═══════╪════════════════════════════╪════════╪════════════╪═══════════════╪════════════════╣ +║2 │Mon Nov 27 22:41:16 EST 2017│DEPLOYED│httptest │1.0.0 │Upgrade complete║ +║1 │Mon Nov 27 22:40:41 EST 2017│DELETED │httptest │1.0.0 │Delete complete ║ +╚═══════╧════════════════════════════╧════════╧════════════╧═══════════════╧════════════════╝ +``` + +### 18.10. Stream Manifests + +Skipper keeps a “manifest” of the all of the applications, their application properties, and their deployment properties after all values have been substituted. +This represents the final state of what was deployed to the platform. +You can view the manifest for any of the versions of a Stream by using the following command: + +``` +stream manifest --name --releaseVersion +``` + +If the `--releaseVersion` is not specified, the manifest for the last version is returned. + +The following example shows the use of the manifest: + +``` +dataflow:>stream manifest --name httptest +``` + +Using the command results in the following output: + +``` +# Source: log.yml +apiVersion: skipper.spring.io/v1 +kind: SpringCloudDeployerApplication +metadata: + name: log +spec: + resource: maven://org.springframework.cloud.stream.app:log-sink-rabbit + version: 1.2.0.RELEASE + applicationProperties: + spring.cloud.dataflow.stream.app.label: log + spring.cloud.stream.bindings.input.group: httptest + spring.cloud.dataflow.stream.name: httptest + spring.cloud.dataflow.stream.app.type: sink + spring.cloud.stream.bindings.input.destination: httptest.http + deploymentProperties: + spring.cloud.deployer.indexed: true + spring.cloud.deployer.group: httptest + spring.cloud.deployer.count: 1 + +--- +# Source: http.yml +apiVersion: skipper.spring.io/v1 +kind: SpringCloudDeployerApplication +metadata: + name: http +spec: + resource: maven://org.springframework.cloud.stream.app:http-source-rabbit + version: 1.2.0.RELEASE + applicationProperties: + spring.cloud.dataflow.stream.app.label: http + spring.cloud.stream.bindings.output.producer.requiredGroups: httptest + server.port: 9000 + spring.cloud.stream.bindings.output.destination: httptest.http + spring.cloud.dataflow.stream.name: httptest + spring.cloud.dataflow.stream.app.type: source + deploymentProperties: + spring.cloud.deployer.group: httptest +``` + +The majority of the deployment and application properties were set by Data Flow to enable the applications to talk to each other and to send application metrics with identifying labels. + +### 18.11. Rollback a Stream + +You can roll back to a previous version of the stream by using the `stream rollback` command: + +``` +dataflow:>stream rollback --name httptest +``` + +The optional `--releaseVersion` command argument adds the version of the stream. +If not specified, the rollback operation goes to the previous stream version. + +### 18.12. Application Count + +The application count is a dynamic property of the system used to specify the number of instances of applications. See the [Application Count](https://dataflow.spring.io/docs/feature-guides/streams/application-count/) section of the microsite for more information. + +### 18.13. Skipper’s Upgrade Strategy + +Skipper has a simple “red/black” upgrade strategy. It deploys the new version of the applications, using as many instances as the currently running version, and checks the `/health` endpoint of the application. +If the health of the new application is good, the previous application is undeployed. +If the health of the new application is bad, all new applications are undeployed, and the upgrade is considered to be not successful. + +The upgrade strategy is not a rolling upgrade, so, if five instances of the application are running, then, in a sunny-day scenario, five of the new applications are also running before the older version is undeployed. + +## 19. Stream DSL + +This section covers additional features of the Stream DSL not covered in the [Stream DSL introduction](#spring-cloud-dataflow-stream-intro-dsl). + +### 19.1. Tap a Stream + +Taps can be created at various producer endpoints in a stream. See the [Tapping a Stream](https://dataflow.spring.io/docs/feature-guides/streams/taps/) section of the microsite for more information. + +### 19.2. Using Labels in a Stream + +When a stream is made up of multiple applications with the same name, they must be qualified with labels. +See the [Labeling Applications](https://dataflow.spring.io/docs/feature-guides/streams/labels/) section of the microsite for more information. + +### 19.3. Named Destinations + +Instead of referencing a source or sink application, you can use a named destination. +See the [Named Destinations](https://dataflow.spring.io/docs/feature-guides/streams/named-destinations/) section of the microsite for more information. + +### 19.4. Fan-in and Fan-out + +By using named destinations, you can support fan-in and fan-out use cases. +See the [Fan-in and Fan-out](https://dataflow.spring.io/docs/feature-guides/streams/fanin-fanout/) section of the microsite for more information. + +## 20. Stream Java DSL + +Instead of using the shell to create and deploy streams, you can use the Java-based DSL provided by the `spring-cloud-dataflow-rest-client` module. +See the [Java DSL](https://dataflow.spring.io/docs/feature-guides/streams/java-dsl/) section of the microsite for more information. + +## 21. Stream Applications with Multiple Binder Configurations + +In some cases, a stream can have its applications bound to multiple spring cloud stream binders when they are required to connect to different messaging +middleware configurations. In those cases, you should make sure the applications are configured appropriately with their binder +configurations. For example, a multi-binder transformer that supports both Kafka and Rabbit binders is the processor in the following stream: + +``` +http | multibindertransform --expression=payload.toUpperCase() | log +``` + +| |In the preceding example, you would write your own `multibindertransform` application.| +|---|--------------------------------------------------------------------------------------| + +In this stream, each application connects to messaging middleware in the following way: + +1. The HTTP source sends events to RabbitMQ (`rabbit1`). + +2. The Multi-Binder Transform processor receives events from RabbitMQ (`rabbit1`) and sends the processed events into Kafka (`kafka1`). + +3. The log sink receives events from Kafka (`kafka1`). + +Here, `rabbit1` and `kafka1` are the binder names given in the Spring Cloud Stream application properties. +Based on this setup, the applications have the following binders in their classpaths with the appropriate configuration: + +* HTTP: Rabbit binder + +* Transform: Both Kafka and Rabbit binders + +* Log: Kafka binder + +The `spring-cloud-stream` `binder` configuration properties can be set within the applications themselves. +If not, they can be passed through `deployment` properties when the stream is deployed: + +``` +dataflow:>stream create --definition "http | multibindertransform --expression=payload.toUpperCase() | log" --name mystream + +dataflow:>stream deploy mystream --properties "app.http.spring.cloud.stream.bindings.output.binder=rabbit1,app.multibindertransform.spring.cloud.stream.bindings.input.binder=rabbit1, +app.multibindertransform.spring.cloud.stream.bindings.output.binder=kafka1,app.log.spring.cloud.stream.bindings.input.binder=kafka1" +``` + +You can override any of the binder configuration properties by specifying them through deployment properties. + +## 22. Function Composition + +Function composition lets you attach a functional logic dynamically to an existing event streaming application. See the [Function Composition](https://dataflow.spring.io/docs/feature-guides/streams/function-composition/) section of the microsite for more details. + +## 23. Functional Applications + +With Spring Cloud Stream 3.x adding [functional support](https://cloud.spring.io/spring-cloud-static/spring-cloud-stream/current/reference/html/spring-cloud-stream.html#spring-cloud-stream-overview-producing-consuming-messages), you can build `Source`, `Sink` and `Processor` applications merely by implementing the Java Util’s `Supplier`, `Consumer`, and `Function` interfaces respectively. +See the [Functional Application Recipe](https://dataflow.spring.io/docs/recipes/functional-apps/) of the SCDF site for more about this feature. + +## 24. Examples + +This chapter includes the following examples: + +* [Simple Stream Processing](#spring-cloud-dataflow-simple-stream) + +* [Stateful Stream Processing](#spring-cloud-dataflow-stream-partitions) + +* [Other Source and Sink Application Types](#spring-cloud-dataflow-stream-app-types) + +You can find links to more samples in the “[Samples](#dataflow-samples)” chapter. + +### 24.1. Simple Stream Processing + +As an example of a simple processing step, we can transform the payload of the HTTP-posted data to upper case by using the following stream definition: + +``` +http | transform --expression=payload.toUpperCase() | log +``` + +To create this stream, enter the following command in the shell: + +``` +dataflow:> stream create --definition "http --server.port=9000 | transform --expression=payload.toUpperCase() | log" --name mystream --deploy +``` + +The following example uses a shell command to post some data: + +``` +dataflow:> http post --target http://localhost:9000 --data "hello" +``` + +The preceding example results in an upper-case `HELLO` in the log, as follows: + +``` +2016-06-01 09:54:37.749 INFO 80083 --- [ kafka-binder-] log.sink : HELLO +``` + +### 24.2. Stateful Stream Processing + +To demonstrate the data partitioning functionality, the following listing deploys a stream with Kafka as the binder: + +``` +dataflow:>stream create --name words --definition "http --server.port=9900 | splitter --expression=payload.split(' ') | log" +Created new stream 'words' + +dataflow:>stream deploy words --properties "app.splitter.producer.partitionKeyExpression=payload,deployer.log.count=2" +Deployed stream 'words' + +dataflow:>http post --target http://localhost:9900 --data "How much wood would a woodchuck chuck if a woodchuck could chuck wood" +> POST (text/plain;Charset=UTF-8) http://localhost:9900 How much wood would a woodchuck chuck if a woodchuck could chuck wood +> 202 ACCEPTED + +dataflow:>runtime apps +╔════════════════════╤═══════════╤═══════════════════════════════════════════════════════════════════════════════════════════════════════════════════════════════════════════════════════════╗ +║App Id / Instance Id│Unit Status│ No. of Instances / Attributes ║ +╠════════════════════╪═══════════╪═══════════════════════════════════════════════════════════════════════════════════════════════════════════════════════════════════════════════════════════╣ +║words.log-v1 │ deployed │ 2 ║ +╟┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┼┈┈┈┈┈┈┈┈┈┈┈┼┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈╢ +║ │ │ guid = 24166 ║ +║ │ │ pid = 33097 ║ +║ │ │ port = 24166 ║ +║words.log-v1-0 │ deployed │ stderr = /var/folders/js/7b_pn0t575l790x7j61slyxc0000gn/T/spring-cloud-deployer-6467595568759190742/words-1542803461063/words.log-v1/stderr_0.log ║ +║ │ │ stdout = /var/folders/js/7b_pn0t575l790x7j61slyxc0000gn/T/spring-cloud-deployer-6467595568759190742/words-1542803461063/words.log-v1/stdout_0.log ║ +║ │ │ url = https://192.168.0.102:24166 ║ +║ │ │working.dir = /var/folders/js/7b_pn0t575l790x7j61slyxc0000gn/T/spring-cloud-deployer-6467595568759190742/words-1542803461063/words.log-v1 ║ +╟┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┼┈┈┈┈┈┈┈┈┈┈┈┼┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈╢ +║ │ │ guid = 41269 ║ +║ │ │ pid = 33098 ║ +║ │ │ port = 41269 ║ +║words.log-v1-1 │ deployed │ stderr = /var/folders/js/7b_pn0t575l790x7j61slyxc0000gn/T/spring-cloud-deployer-6467595568759190742/words-1542803461063/words.log-v1/stderr_1.log ║ +║ │ │ stdout = /var/folders/js/7b_pn0t575l790x7j61slyxc0000gn/T/spring-cloud-deployer-6467595568759190742/words-1542803461063/words.log-v1/stdout_1.log ║ +║ │ │ url = https://192.168.0.102:41269 ║ +║ │ │working.dir = /var/folders/js/7b_pn0t575l790x7j61slyxc0000gn/T/spring-cloud-deployer-6467595568759190742/words-1542803461063/words.log-v1 ║ +╟────────────────────┼───────────┼───────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────╢ +║words.http-v1 │ deployed │ 1 ║ +╟┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┼┈┈┈┈┈┈┈┈┈┈┈┼┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈╢ +║ │ │ guid = 9900 ║ +║ │ │ pid = 33094 ║ +║ │ │ port = 9900 ║ +║words.http-v1-0 │ deployed │ stderr = /var/folders/js/7b_pn0t575l790x7j61slyxc0000gn/T/spring-cloud-deployer-6467595568759190742/words-1542803461054/words.http-v1/stderr_0.log ║ +║ │ │ stdout = /var/folders/js/7b_pn0t575l790x7j61slyxc0000gn/T/spring-cloud-deployer-6467595568759190742/words-1542803461054/words.http-v1/stdout_0.log ║ +║ │ │ url = https://192.168.0.102:9900 ║ +║ │ │working.dir = /var/folders/js/7b_pn0t575l790x7j61slyxc0000gn/T/spring-cloud-deployer-6467595568759190742/words-1542803461054/words.http-v1 ║ +╟────────────────────┼───────────┼───────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────╢ +║words.splitter-v1 │ deployed │ 1 ║ +╟┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┼┈┈┈┈┈┈┈┈┈┈┈┼┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈╢ +║ │ │ guid = 33963 ║ +║ │ │ pid = 33093 ║ +║ │ │ port = 33963 ║ +║words.splitter-v1-0 │ deployed │ stderr = /var/folders/js/7b_pn0t575l790x7j61slyxc0000gn/T/spring-cloud-deployer-6467595568759190742/words-1542803437542/words.splitter-v1/stderr_0.log║ +║ │ │ stdout = /var/folders/js/7b_pn0t575l790x7j61slyxc0000gn/T/spring-cloud-deployer-6467595568759190742/words-1542803437542/words.splitter-v1/stdout_0.log║ +║ │ │ url = https://192.168.0.102:33963 ║ +║ │ │working.dir = /var/folders/js/7b_pn0t575l790x7j61slyxc0000gn/T/spring-cloud-deployer-6467595568759190742/words-1542803437542/words.splitter-v1 ║ +╚════════════════════╧═══════════╧═══════════════════════════════════════════════════════════════════════════════════════════════════════════════════════════════════════════════════════════╝ +``` + +When you review the `words.log-v1-0` logs, you should see the following: + +``` +2016-06-05 18:35:47.047 INFO 58638 --- [ kafka-binder-] log.sink : How +2016-06-05 18:35:47.066 INFO 58638 --- [ kafka-binder-] log.sink : chuck +2016-06-05 18:35:47.066 INFO 58638 --- [ kafka-binder-] log.sink : chuck +``` + +When you review the `words.log-v1-1` logs, you should see the following: + +``` +2016-06-05 18:35:47.047 INFO 58639 --- [ kafka-binder-] log.sink : much +2016-06-05 18:35:47.066 INFO 58639 --- [ kafka-binder-] log.sink : wood +2016-06-05 18:35:47.066 INFO 58639 --- [ kafka-binder-] log.sink : would +2016-06-05 18:35:47.066 INFO 58639 --- [ kafka-binder-] log.sink : a +2016-06-05 18:35:47.066 INFO 58639 --- [ kafka-binder-] log.sink : woodchuck +2016-06-05 18:35:47.067 INFO 58639 --- [ kafka-binder-] log.sink : if +2016-06-05 18:35:47.067 INFO 58639 --- [ kafka-binder-] log.sink : a +2016-06-05 18:35:47.067 INFO 58639 --- [ kafka-binder-] log.sink : woodchuck +2016-06-05 18:35:47.067 INFO 58639 --- [ kafka-binder-] log.sink : could +2016-06-05 18:35:47.067 INFO 58639 --- [ kafka-binder-] log.sink : wood +``` + +This example has shown that payload splits that contain the same word are routed to the same application instance. + +### 24.3. Other Source and Sink Application Types + +This example shows something a bit more complicated: swapping out the `time` source for something else. Another supported source type is `http`, which accepts data for ingestion over HTTP POST requests. Note that the `http` source accepts data on a different port from the Data Flow Server (default 8080). By default, the port is randomly assigned. + +To create a stream that uses an `http` source but still uses the same `log` sink, we would change the original command in the [Simple Stream Processing](#spring-cloud-dataflow-simple-stream) example to the following: + +``` +dataflow:> stream create --definition "http | log" --name myhttpstream --deploy +``` + +Note that, this time, we do not see any other output until we actually post some data (by using a shell command). To see the randomly assigned port on which the `http` source is listening, run the following command: + +``` +dataflow:>runtime apps + +╔══════════════════════╤═══════════╤═════════════════════════════════════════════════════════════════════════════════════════════════════════════════════════════════════════════════════════════════════╗ +║ App Id / Instance Id │Unit Status│ No. of Instances / Attributes ║ +╠══════════════════════╪═══════════╪═════════════════════════════════════════════════════════════════════════════════════════════════════════════════════════════════════════════════════════════════════╣ +║myhttpstream.log-v1 │ deploying │ 1 ║ +╟┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┼┈┈┈┈┈┈┈┈┈┈┈┼┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈╢ +║ │ │ guid = 39628 ║ +║ │ │ pid = 34403 ║ +║ │ │ port = 39628 ║ +║myhttpstream.log-v1-0 │ deploying │ stderr = /var/folders/js/7b_pn0t575l790x7j61slyxc0000gn/T/spring-cloud-deployer-6467595568759190742/myhttpstream-1542803867070/myhttpstream.log-v1/stderr_0.log ║ +║ │ │ stdout = /var/folders/js/7b_pn0t575l790x7j61slyxc0000gn/T/spring-cloud-deployer-6467595568759190742/myhttpstream-1542803867070/myhttpstream.log-v1/stdout_0.log ║ +║ │ │ url = https://192.168.0.102:39628 ║ +║ │ │working.dir = /var/folders/js/7b_pn0t575l790x7j61slyxc0000gn/T/spring-cloud-deployer-6467595568759190742/myhttpstream-1542803867070/myhttpstream.log-v1 ║ +╟──────────────────────┼───────────┼─────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────╢ +║myhttpstream.http-v1 │ deploying │ 1 ║ +╟┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┼┈┈┈┈┈┈┈┈┈┈┈┼┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈┈╢ +║ │ │ guid = 52143 ║ +║ │ │ pid = 34401 ║ +║ │ │ port = 52143 ║ +║myhttpstream.http-v1-0│ deploying │ stderr = /var/folders/js/7b_pn0t575l790x7j61slyxc0000gn/T/spring-cloud-deployer-6467595568759190742/myhttpstream-1542803866800/myhttpstream.http-v1/stderr_0.log║ +║ │ │ stdout = /var/folders/js/7b_pn0t575l790x7j61slyxc0000gn/T/spring-cloud-deployer-6467595568759190742/myhttpstream-1542803866800/myhttpstream.http-v1/stdout_0.log║ +║ │ │ url = https://192.168.0.102:52143 ║ +║ │ │working.dir = /var/folders/js/7b_pn0t575l790x7j61slyxc0000gn/T/spring-cloud-deployer-6467595568759190742/myhttpstream-1542803866800/myhttpstream.http-v1 ║ +╚══════════════════════╧═══════════╧═════════════════════════════════════════════════════════════════════════════════════════════════════════════════════════════════════════════════════════════════════╝ +``` + +You should see that the corresponding `http` source has a `url` property that contains the host and port information on which it is listening. You are now ready to post to that url, as shown in the following example: + +``` +dataflow:> http post --target http://localhost:1234 --data "hello" +dataflow:> http post --target http://localhost:1234 --data "goodbye" +``` + +The stream then funnels the data from the `http` source to the output log implemented by the `log` sink, yielding output similar to the following: + +``` +2016-06-01 09:50:22.121 INFO 79654 --- [ kafka-binder-] log.sink : hello +2016-06-01 09:50:26.810 INFO 79654 --- [ kafka-binder-] log.sink : goodbye +``` + +We could also change the sink implementation. You could pipe the output to a file (`file`), to hadoop (`hdfs`), or to any of the other sink applications that are available. You can also define your own applications. + +# Stream Developer Guide + +See the [Stream Developer Guides](https://dataflow.spring.io/docs/stream-developer-guides/) on the microsite for more about how to create, test, and run Spring Cloud Stream applications on your local machine. + +# Stream Monitoring + +See the [Stream Monitoring Guide](https://dataflow.spring.io/docs/feature-guides/streams/monitoring/) on the microsite for more about how to monitor the applications that were deployed as part of a Stream. + +# Tasks + +This section goes into more detail about how you can orchestrate [Spring Cloud Task](https://cloud.spring.io/spring-cloud-task/) applications on Spring Cloud Data Flow. + +If you are just starting out with Spring Cloud Data Flow, you should probably read the Getting Started guide for “[Local](#getting-started-local)” , “[Cloud Foundry](#getting-started-cloudfoundry)”, or “[Kubernetes](#getting-started-kubernetes)” before diving into this section. + +## 25. Introduction + +A task application is short-lived, meaning that it stops running on purpose and can be run on demand or scheduled for later. +One use case might be to scrape a web page and write to the database. + +The [Spring Cloud Task](https://cloud.spring.io/spring-cloud-task/) framework is based on Spring Boot and adds the ability for Boot applications to record the lifecycle events of a short-lived application, such as when it starts, when it ends, and the exit status. +The [`TaskExecution`](https://docs.spring.io/spring-cloud-task/docs/2.0.0.RELEASE/reference/htmlsingle/#features-task-execution-details) documentation shows which information is stored in the database. +The entry point for code execution in a Spring Cloud Task application is most often an implementation of Boot’s `CommandLineRunner` interface, as shown in this [example](https://docs.spring.io/spring-cloud-task/docs/2.0.0.RELEASE/reference/htmlsingle/#getting-started-writing-the-code). + +The Spring Batch project is probably what comes to mind for Spring developers writing short-lived applications. +Spring Batch provides a much richer set of functionality than Spring Cloud Task and is recommended when processing large volumes of data. +One use case might be to read many CSV files, transform each row of data, and write each transformed row to a database. +Spring Batch provides its own database schema with a much more rich [set of information](https://docs.spring.io/spring-batch/4.1.x/reference/html/schema-appendix.html#metaDataSchema) about the execution of a Spring Batch job. +Spring Cloud Task is integrated with Spring Batch so that, if a Spring Cloud Task application defines a Spring Batch `Job`, a link between the Spring Cloud Task and Spring Cloud Batch execution tables is created. + +When running Data Flow on your local machine, Tasks are launched in a separate JVM. +When running on Cloud Foundry, tasks are launched by using [Cloud Foundry’s Task](https://docs.cloudfoundry.org/devguide/using-tasks.html) functionality. When running on Kubernetes, tasks are launched by using either a `Pod` or a `Job` resource. + +## 26. The Lifecycle of a Task + +Before you dive deeper into the details of creating Tasks, you should understand the typical lifecycle for tasks in the context of Spring Cloud Data Flow: + +1. [Creating a Task Application](#spring-cloud-dataflow-create-task-apps) + +2. [Registering a Task Application](#spring-cloud-dataflow-register-task-apps) + +3. [Creating a Task Definition](#spring-cloud-dataflow-create-task-definition) + +4. [Launching a Task](#spring-cloud-dataflow-task-launch) + +5. [Reviewing Task Executions](#spring-cloud-dataflow-task-review-executions) + +6. [Destroying a Task Definition](#spring-cloud-dataflow-task-definition-destroying) + +7. [Continuous Deployment](#spring-cloud-dataflow-task-cd) + +### 26.1. Creating a Task Application + +While Spring Cloud Task does provide a number of out-of-the-box applications (at [spring-cloud-task-app-starters](https://github.com/spring-cloud-task-app-starters)), most task applications require custom development. +To create a custom task application: + +1. Use the [Spring Initializer](https://start.spring.io) to create a new project, making sure to select the following starters: + + 1. `Cloud Task`: This dependency is the `spring-cloud-starter-task`. + + 2. `JDBC`: This dependency is the `spring-jdbc` starter. + + 3. Select your database dependency: Enter the database dependency that Data Flow is currently using. For example: `H2`. + +2. Within your new project, create a new class to serve as your main class, as follows: + + ``` + @EnableTask + @SpringBootApplication + public class MyTask { + + public static void main(String[] args) { + SpringApplication.run(MyTask.class, args); + } + } + ``` + +3. With this class, you need one or more `CommandLineRunner` or `ApplicationRunner` implementations within your application. You can either implement your own or use the ones provided by Spring Boot (there is one for running batch jobs, for example). + +4. Packaging your application with Spring Boot into an über jar is done through the standard [Spring Boot conventions](https://docs.spring.io/spring-boot/docs/2.1.1.RELEASE/reference/html/getting-started-first-application.html#getting-started-first-application-executable-jar). + The packaged application can be registered and deployed as noted below. + +#### 26.1.1. Task Database Configuration + +| |When launching a task application, be sure that the database driver that is being used by Spring Cloud Data Flow is also a dependency on the task application.
For example, if your Spring Cloud Data Flow is set to use Postgresql, be sure that the task application also has Postgresql as a dependency.| +|---|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +| |When you run tasks externally (that is, from the command line) and you want Spring Cloud Data Flow to show the TaskExecutions in its UI, be sure that common datasource settings are shared among them both.
By default, Spring Cloud Task uses a local H2 instance, and the execution is recorded to the database used by Spring Cloud Data Flow.| +|---|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +### 26.2. Registering a Task Application + +You can register a Task application with the App Registry by using the Spring Cloud Data Flow Shell `app register` command. +You must provide a unique name and a URI that can be resolved to the application artifact. For the type, specify `task`. +The following listing shows three examples: + +``` +dataflow:>app register --name task1 --type task --uri maven://com.example:mytask:1.0.2 + +dataflow:>app register --name task2 --type task --uri file:///Users/example/mytask-1.0.2.jar + +dataflow:>app register --name task3 --type task --uri https://example.com/mytask-1.0.2.jar +``` + +When providing a URI with the `maven` scheme, the format should conform to the following: + +``` +maven://:[:[:]]: +``` + +If you would like to register multiple applications at one time, you can store them in a properties file where the keys are formatted as `.` and the values are the URIs. +For example, the following listing would be a valid properties file: + +``` +task.cat=file:///tmp/cat-1.2.1.BUILD-SNAPSHOT.jar +task.hat=file:///tmp/hat-1.2.1.BUILD-SNAPSHOT.jar +``` + +Then you can use the `app import` command and provide the location of the properties file by using the `--uri` option, as follows: + +``` +app import --uri file:///tmp/task-apps.properties +``` + +For example, if you would like to register all the task applications that ship with Data Flow in a single operation, you can do so with the following command: + +``` +dataflow:>app import --uri https://dataflow.spring.io/task-maven-latest +``` + +You can also pass the `--local` option (which is `TRUE` by default) to indicate whether the properties file location should be resolved within the shell process itself. +If the location should be resolved from the Data Flow Server process, specify `--local false`. + +When using either `app register` or `app import`, if a task application is already registered with +the provided name and version, it is not overridden by default. If you would like to override the +pre-existing task application with a different `uri` or `uri-metadata` location, include the `--force` option. + +| |In some cases, the resource is resolved on the server side.
In other cases, the URI is passed to a runtime container instance, where it is resolved.
Consult the specific documentation of each Data Flow Server for more detail.| +|---|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +### 26.3. Creating a Task Definition + +You can create a task definition from a task application by providing a definition name as well as +properties that apply to the task execution. You can create a task definition through +the RESTful API or the shell. To create a task definition by using the shell, use the`task create` command to create the task definition, as shown in the following example: + +``` +dataflow:>task create mytask --definition "timestamp --format=\"yyyy\"" +Created new task 'mytask' +``` + +You can obtain a listing of the current task definitions through the RESTful API or the shell. +To get the task definition list by using the shell, use the `task list` command. + +#### 26.3.1. Maximum Task Definition Name Length + +The maximum character length of a task definition name is dependent on the platform. + +| |Consult the platform documents for specifics on resource naming.
The Local platform stores the task definition name in a database column with a maximum size of 255.| +|---|------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +|Kubernetes Bare Pods|Kubernetes Jobs|Cloud Foundry|Local| +|--------------------|---------------|-------------|-----| +| 63 | 52 | 63 | 255 | + +#### 26.3.2. Automating the Creation of Task Definitions + +As of version 2.3.0, you can configure the Data Flow server to automatically create task definitions by setting `spring.cloud.dataflow.task.autocreate-task-definitions` to `true`. +This is not the default behavior but is provided as a convenience. +When this property is enabled, a task launch request can specify the registered task application name as the task name. +If the task application is registered, the server creates a basic task definition that specifies only the application name, as required. This eliminates a manual step similar to: + +``` +dataflow:>task create mytask --definition "mytask" +``` + +You can still specify command-line arguments and deployment properties for each task launch request. + +### 26.4. Launching a Task + +An ad hoc task can be launched through the RESTful API or the shell. +To launch an ad hoc task through the shell, use the `task launch` command, as shown in the following example: + +``` +dataflow:>task launch mytask +Launched task 'mytask' +``` + +When a task is launched, you can set any properties that need to be passed as command-line arguments to the task application when you launch the task, as follows: + +``` +dataflow:>task launch mytask --arguments "--server.port=8080 --custom=value" +``` + +| |The arguments need to be passed as space-delimited values.| +|---|----------------------------------------------------------| + +You can pass in additional properties meant for a `TaskLauncher` itself by using the `--properties` option. +The format of this option is a comma-separated string of properties prefixed with `app..`. +Properties are passed to `TaskLauncher` as application properties. +It is up to an implementation to choose how those are passed into an actual task application. +If the property is prefixed with `deployer` instead of `app`, it is passed to `TaskLauncher` as a deployment property, and its meaning may be `TaskLauncher` implementation specific. + +``` +dataflow:>task launch mytask --properties "deployer.timestamp.custom1=value1,app.timestamp.custom2=value2" +``` + +#### 26.4.1. Application properties + +Each application takes properties to customize its behavior. For example, the `timestamp` task `format` setting establishes an output format that is different from the default value. + +``` +dataflow:> task create --definition "timestamp --format=\"yyyy\"" --name printTimeStamp +``` + +This `timestamp` property is actually the same as the `timestamp.format` property specified by the timestamp application. +Data Flow adds the ability to use the shorthand form `format` instead of `timestamp.format`. +You can also specify the longhand version as well, as shown in the following example: + +``` +dataflow:> task create --definition "timestamp --timestamp.format=\"yyyy\"" --name printTimeStamp +``` + +This shorthand behavior is discussed more in the section on [Stream Application Properties](#spring-cloud-dataflow-application-properties). +If you have [registered application property metadata](https://dataflow.spring.io/docs/applications/application-metadata/#using-application-metadata), you can use tab completion in the shell after typing `--` to get a list of candidate property names. + +The shell provides tab completion for application properties. The `app info --name --type ` shell command provides additional documentation for all the supported properties. The supported task `` is `task`. + +| |When restarting Spring Batch Jobs on Kubernetes, you must use the entry point of `shell` or `boot`.| +|---|---------------------------------------------------------------------------------------------------| + +##### Application Properties With Sensitive Information on Kubernetes + +When launching task applications where some of the properties may contain sensitive information, use the `shell` or `boot` as the `entryPointStyle`. This is because the `exec` (default) converts all properties to command-line arguments and, as a result, may not be secure in some environments. + +#### 26.4.2. Common application properties + +In addition to configuration through DSL, Spring Cloud Data Flow provides a mechanism for setting properties that are common to all the task applications that are launched by it. +You can do so by adding properties prefixed with `spring.cloud.dataflow.applicationProperties.task` when starting the server. +The server then passes all the properties, without the prefix, to the instances it launches. + +For example, you can configure all the launched applications to use the `prop1` and `prop2` properties by launching the Data Flow server with the following options: + +``` +--spring.cloud.dataflow.applicationProperties.task.prop1=value1 +--spring.cloud.dataflow.applicationProperties.task.prop2=value2 +``` + +This causes the `prop1=value1` and `prop2=value2` properties to be passed to all the launched applications. + +| |Properties configured by using this mechanism have lower precedence than task deployment properties.
They are overridden if a property with the same key is specified at task launch time (for example, `app.trigger.prop2`overrides the common property).| +|---|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +### 26.5. Limit the number concurrent task launches + +Spring Cloud Data Flow lets a user limit the maximum number of concurrently running tasks for each configured platform to prevent the saturation of IaaS or hardware resources. +By default, the limit is set to `20` for all supported platforms. If the number of concurrently running tasks on a platform instance is greater than or equal to the limit, the next task launch request fails, and an error message is returned through the RESTful API, the Shell, or the UI. +You can configure this limit for a platform instance by setting the corresponding deployer property, `spring.cloud.dataflow.task.platform..accounts[].maximumConcurrentTasks`, where `` is the name of a configured platform account (`default` if no accounts are explicitly configured). +The `` refers to one of the currently supported deployers: `local` or `kubernetes`. For `cloudfoundry`, the property is `spring.cloud.dataflow.task.platform..accounts[].deployment.maximumConcurrentTasks`. (The difference is that `deployment` has been added to the path). + +The `TaskLauncher` implementation for each supported platform determines the number of currently running tasks by querying the underlying platform’s runtime state, if possible. The method for identifying a `task` varies by platform. +For example, launching a task on the local host uses the `LocalTaskLauncher`. `LocalTaskLauncher` runs a process for each launch request and keeps track of these processes in memory. In this case, we do not query the underlying OS, as it is impractical to identify tasks this way. +For Cloud Foundry, tasks are a core concept supported by its deployment model. The state of all tasks ) is available directly through the API. +This means that every running task container in the account’s organization and space is included in the running execution count, whether or not it was launched by using Spring Cloud Data Flow or by invoking the `CloudFoundryTaskLauncher` directly. +For Kubernetes, launching a task through the `KubernetesTaskLauncher`, if successful, results in a running pod, which we expect to eventually complete or fail. +In this environment, there is generally no easy way to identify pods that correspond to a task. +For this reason, we count only pods that were launched by the `KubernetesTaskLauncher`. +Since the task launcher provides `task-name` label in the pod’s metadata, we filter all running pods by the presence of this label. + +### 26.6. Reviewing Task Executions + +Once the task is launched, the state of the task is stored in a relational database. The state +includes: + +* Task Name + +* Start Time + +* End Time + +* Exit Code + +* Exit Message + +* Last Updated Time + +* Parameters + +You can check the status of your task executions through the RESTful API or the shell. +To display the latest task executions through the shell, use the `task execution list` command. + +To get a list of task executions for just one task definition, add `--name` and +the task definition name — for example, `task execution list --name foo`. To retrieve full +details for a task execution, use the `task execution status` command with the ID of the task execution, +for example `task execution status --id 549`. + +### 26.7. Destroying a Task Definition + +Destroying a task definition removes the definition from the definition repository. +This can be done through the RESTful API or the shell. +To destroy a task through the shell, use the `task destroy` command, as shown in the following example: + +``` +dataflow:>task destroy mytask +Destroyed task 'mytask' +``` + +The `task destroy` command also has an option to `cleanup` the task executions of the task being destroyed, as shown in the following example: + +``` +dataflow:>task destroy mytask --cleanup +Destroyed task 'mytask' +``` + +By default, the `cleanup` option is set to `false` (that is, by default, the task executions are not cleaned up when the task is destroyed). + +To destroy all tasks through the shell, use the `task all destroy` command as shown in the following example: + +``` +dataflow:>task all destroy +Really destroy all tasks? [y, n]: y +All tasks destroyed +``` + +If need be, you can use the force switch: + +``` +dataflow:>task all destroy --force +All tasks destroyed +``` + +The task execution information for previously launched tasks for the definition remains in the task repository. + +| |This does not stop any currently running tasks for this definition. Instead, it removes the task definition from the database.| +|---|------------------------------------------------------------------------------------------------------------------------------| + +| |`task destroy ` deletes only the definition and not the task deployed on Cloud Foundry.
The only way to do delete the task is through the CLI in two steps:

\+
. Obtain a list of the apps by using the `cf apps` command.
. Identify the task application to be deleted and run the `cf delete ` command.| +|---|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +### 26.8. Validating a Task + +Sometimes, an application contained within a task definition has an invalid URI in its registration. +This can be caused by an invalid URI being entered at application-registration time or the by the application being removed from the repository from which it was to be drawn. +To verify that all the applications contained in a task are resolve-able, use the `validate` command, as follows: + +``` +dataflow:>task validate time-stamp +╔══════════╤═══════════════╗ +║Task Name │Task Definition║ +╠══════════╪═══════════════╣ +║time-stamp│timestamp ║ +╚══════════╧═══════════════╝ + +time-stamp is a valid task. +╔═══════════════╤═════════════════╗ +║ App Name │Validation Status║ +╠═══════════════╪═════════════════╣ +║task:timestamp │valid ║ +╚═══════════════╧═════════════════╝ +``` + +In the preceding example, the user validated their time-stamp task. The `task:timestamp` application is valid. +Now we can see what happens if we have a stream definition with a registered application that has an invalid URI: + +``` +dataflow:>task validate bad-timestamp +╔═════════════╤═══════════════╗ +║ Task Name │Task Definition║ +╠═════════════╪═══════════════╣ +║bad-timestamp│badtimestamp ║ +╚═════════════╧═══════════════╝ + +bad-timestamp is an invalid task. +╔══════════════════╤═════════════════╗ +║ App Name │Validation Status║ +╠══════════════════╪═════════════════╣ +║task:badtimestamp │invalid ║ +╚══════════════════╧═════════════════╝ +``` + +In this case, Spring Cloud Data Flow states that the task is invalid because `task:badtimestamp` has an invalid URI. + +### 26.9. Stopping a Task Execution + +In some cases, a task that is running on a platform may not stop because of a problem on the platform or the application business logic itself. +For such cases, Spring Cloud Data Flow offers the ability to send a request to the platform to end the task. +To do this, submit a `task execution stop` for a given set of task executions, as follows: + +``` +task execution stop --ids 5 + +Request to stop the task execution with id(s): 5 has been submitted +``` + +With the preceding command, the trigger to stop the execution of `id=5` is submitted to the underlying deployer implementation. As a result, the operation stops that task. When we view the result for the task execution, we see that the task execution completed with a 0 exit code: + +``` +dataflow:>task execution list +╔══════════╤══╤════════════════════════════╤════════════════════════════╤═════════╗ +║Task Name │ID│ Start Time │ End Time │Exit Code║ +╠══════════╪══╪════════════════════════════╪════════════════════════════╪═════════╣ +║batch-demo│5 │Mon Jul 15 13:58:41 EDT 2019│Mon Jul 15 13:58:55 EDT 2019│0 ║ +║timestamp │1 │Mon Jul 15 09:26:41 EDT 2019│Mon Jul 15 09:26:41 EDT 2019│0 ║ +╚══════════╧══╧════════════════════════════╧════════════════════════════╧═════════╝ +``` + +If you submit a stop for a task execution that has child task executions associated with it, such as a composed task, a stop request is sent for each of the child task executions. + +| |When stopping a task execution that has a running Spring Batch job, the job is left with a batch status of `STARTED`.
Each of the supported platforms sends a SIG-INT to the task application when a stop is requested. That allows Spring Cloud Task to capture the state of the app. However, Spring Batch does not handle a SIG-INT and, as a result, the job stops but remains in the STARTED status.| +|---|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +| |When launching Remote Partitioned Spring Batch Task applications, Spring Cloud Data Flow supports stopping a worker partition task directly for both Cloud Foundry and Kubernetes platforms. Stopping worker partition task is not supported for the local platform.| +|---|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +#### 26.9.1. Stopping a Task Execution that was Started Outside of Spring Cloud Data Flow + +You may wish to stop a task that has been launched outside of Spring Cloud Data Flow. An example of this is the worker applications launched by a remote batch partitioned application. +In such cases, the remote batch partitioned application stores the `external-execution-id` for each of the worker applications. However, no platform information is stored. +So when Spring Cloud Data Flow has to stop a remote batch partitioned application and its worker applications, you need to specify the platform name, as follows: + +``` +dataflow:>task execution stop --ids 1 --platform myplatform +Request to stop the task execution with id(s): 1 for platform myplatform has been submitted +``` + +## 27. Subscribing to Task and Batch Events + +You can also tap into various task and batch events when the task is launched. +If the task is enabled to generate task or batch events (with the additional dependencies of `spring-cloud-task-stream` and, in the case of Kafka as the binder, `spring-cloud-stream-binder-kafka`), those events are published during the task lifecycle. +By default, the destination names for those published events on the broker (Rabbit, Kafka, and others) are the event names themselves (for instance: `task-events`, `job-execution-events`, and so on). + +``` +dataflow:>task create myTask --definition "myBatchJob" +dataflow:>stream create task-event-subscriber1 --definition ":task-events > log" --deploy +dataflow:>task launch myTask +``` + +You can control the destination name for those events by specifying explicit names when launching the task, as follows: + +``` +dataflow:>stream create task-event-subscriber2 --definition ":myTaskEvents > log" --deploy +dataflow:>task launch myTask --properties "app.myBatchJob.spring.cloud.stream.bindings.task-events.destination=myTaskEvents" +``` + +The following table lists the default task and batch event and destination names on the broker: + +| **Event** | **Destination** | +|---------------------|-----------------------| +| Task events | `task-events` | +|Job Execution events |`job-execution-events` | +|Step Execution events|`step-execution-events`| +| Item Read events | `item-read-events` | +| Item Process events | `item-process-events` | +| Item Write events | `item-write-events` | +| Skip events | `skip-events` | + +## 28. Composed Tasks + +Spring Cloud Data Flow lets you create a directed graph, where each node of the graph is a task application. +This is done by using the DSL for composed tasks. +You can create a composed task through the RESTful API, the Spring Cloud Data Flow Shell, or the Spring Cloud Data Flow UI. + +### 28.1. The Composed Task Runner + +Composed tasks are run through a task application called the Composed Task Runner. The Spring Cloud Data Flow server automatically deploys the Composed Task Runner when launching a composed task. + +#### 28.1.1. Configuring the Composed Task Runner + +The composed task runner application has a `dataflow-server-uri` property that is used for validation and for launching child tasks. +This defaults to `[localhost:9393](http://localhost:9393)`. If you run a distributed Spring Cloud Data Flow server, as you would if you deploy the server on Cloud Foundry or Kubernetes, you need to provide the URI that can be used to access the server. +You can either provide this by setting the `dataflow-server-uri` property for the composed task runner application when launching a composed task or by setting the `spring.cloud.dataflow.server.uri` property for the Spring Cloud Data Flow server when it is started. +For the latter case, the `dataflow-server-uri` composed task runner application property is automatically set when a composed task is launched. + +##### Configuration Options + +The `ComposedTaskRunner` task has the following options: + +* `composed-task-arguments`The command line arguments to be used for each of the tasks. (String, default: \). + +* `increment-instance-enabled`Allows a single `ComposedTaskRunner` instance to be run again without changing the parameters by adding a incremented number job parameter based on `run.id` from the previous execution. (Boolean, default: `true`). + ComposedTaskRunner is built by using [Spring Batch](https://github.com/spring-projects/spring-batch). As a result, upon a successful execution, the batch job is considered to be complete. + To launch the same `ComposedTaskRunner` definition multiple times, you must set either `increment-instance-enabled` or `uuid-instance-enabled` property to `true` or change the parameters for the definition for each launch. + When using this option, it must be applied for all task launches for the desired application, including the first launch. + +* `uuid-instance-enabled`Allows a single `ComposedTaskRunner` instance to be run again without changing the parameters by adding a UUID to the `ctr.id` job parameter. (Boolean, default: `false`). + ComposedTaskRunner is built by using [Spring Batch](https://github.com/spring-projects/spring-batch). As a result, upon a successful execution, the batch job is considered to be complete. + To launch the same `ComposedTaskRunner` definition multiple times, you must set either `increment-instance-enabled` or `uuid-instance-enabled` property to `true` or change the parameters for the definition for each launch. + When using this option, it must be applied for all task launches for the desired application, including the first launch. This option when set to true will override the value of `increment-instance-id`. + Set this option to `true` when running multiple instances of the same composed task definition at the same time. + +* `interval-time-between-checks`The amount of time, in milliseconds, that the `ComposedTaskRunner` waits between checks of the database to see if a task has completed. (Integer, default: `10000`).`ComposedTaskRunner` uses the datastore to determine the status of each child tasks. This interval indicates to `ComposedTaskRunner` how often it should check the status its child tasks. + +* `transaction-isolation-level`Establish the transaction isolation level for the Composed Task Runner. + A list of available transaction isolation levels can be found [here](https://docs.spring.io/spring-framework/docs/current/javadoc-api/org/springframework/transaction/TransactionDefinition.html). + Default is `ISOLATION_REPEATABLE_READ`. + +* `max-wait-time`The maximum amount of time, in milliseconds, that an individual step can run before the execution of the Composed task is failed (Integer, default: 0). + Determines the maximum time each child task is allowed to run before the CTR ends with a failure. The default of `0` indicates no timeout. + +* `split-thread-allow-core-thread-timeout`Specifies whether to allow split core threads to timeout. (Boolean, default: `false`) + Sets the policy governing whether core threads may timeout and terminate if no tasks arrive within the keep-alive time, being replaced if needed when new tasks arrive. + +* `split-thread-core-pool-size`Split’s core pool size. (Integer, default: `1`) + Each child task contained in a split requires a thread in order to execute. So, for example, a definition such as ` && ` would require a `split-thread-core-pool-size` of `3`. + This is because the largest split contains three child tasks. A count of `2` would mean that `AAA` and `BBB` would run in parallel, but CCC would wait until either `AAA` or `BBB` finish in order to run. + Then `DDD` and `EEE` would run in parallel. + +* `split-thread-keep-alive-seconds`Split’s thread keep alive seconds. (Integer, default: `60`) + If the pool currently has more than `corePoolSize` threads, excess threads are stopped if they have been idle for more than the `keepAliveTime`. + +* `split-thread-max-pool-size`Split’s maximum pool size. (Integer, default: `Integer.MAX_VALUE`). + Establish the maximum number of threads allowed for the thread pool. + +* **split-thread-queue-capacity**Capacity for Split’s `BlockingQueue`. (Integer, default: `Integer.MAX_VALUE`) + + * If fewer than `corePoolSize` threads are running, the `Executor` always prefers adding a new thread rather than queuing. + + * If `corePoolSize` or more threads are running, the `Executor` always prefers queuing a request rather than adding a new thread. + + * If a request cannot be queued, a new thread is created unless this would exceed `maximumPoolSize`. In that case, the task is rejected. + +* `split-thread-wait-for-tasks-to-complete-on-shutdown`Whether to wait for scheduled tasks to complete on shutdown, not interrupting running tasks and running all tasks in the queue. (Boolean, default: `false`) + +* `dataflow-server-uri`The URI for the Data Flow server that receives task launch requests. (String, default: `[localhost:9393](http://localhost:9393)`) + +* `dataflow-server-username`The optional username for the Data Flow server that receives task launch requests. + Used to access the the Data Flow server by using Basic Authentication. Not used if `dataflow-server-access-token` is set. + +* `dataflow-server-password`The optional password for the Data Flow server that receives task launch requests. + Used to access the the Data Flow server by using Basic Authentication. Not used if `dataflow-server-access-token` is set. + +* `dataflow-server-access-token`This property sets an optional OAuth2 Access Token. + Typically, the value is automatically set by using the token from the currently logged-in user, if available. + However, for special use-cases, this value can also be set explicitly. + +A special boolean property, `dataflow-server-use-user-access-token`, exists for when you want to use the access token of the currently logged-in user and propagate it to the Composed Task Runner. This property is used +by Spring Cloud Data Flow and, if set to `true`, auto-populates the `dataflow-server-access-token` property. When using `dataflow-server-use-user-access-token`, it must be passed for each task execution. +In some cases, it may be preferred that the user’s `dataflow-server-access-token` must be passed for each composed task launch by default. +In this case, set the Spring Cloud Data Flow `spring.cloud.dataflow.task.useUserAccessToken` property to `true`. + +To set a property for Composed Task Runner you will need to prefix the property with `app.composed-task-runner.`. +For example to set the `dataflow-server-uri` property the property will look like `app.composed-task-runner.dataflow-server-uri`. + +### 28.2. The Lifecycle of a Composed Task + +The lifecycle of a composed task has three parts: + +* [Creating a Composed Task](#spring-cloud-data-flow-composed-task-creating) + +* [Stopping a Composed Task](#spring-cloud-data-flow-composed-task-stopping) + +* [Restarting a Composed Task](#spring-cloud-data-flow-composed-task-restarting) + +#### 28.2.1. Creating a Composed Task + +The DSL for the composed tasks is used when creating a task definition through the task create command, as shown in the following example: + +``` +dataflow:> app register --name timestamp --type task --uri maven://org.springframework.cloud.task.app:timestamp-task: +dataflow:> app register --name mytaskapp --type task --uri file:///home/tasks/mytask.jar +dataflow:> task create my-composed-task --definition "mytaskapp && timestamp" +dataflow:> task launch my-composed-task +``` + +In the preceding example, we assume that the applications to be used by our composed task have not yet been registered. +Consequently, in the first two steps, we register two task applications. +We then create our composed task definition by using the `task create` command. +The composed task DSL in the preceding example, when launched, runs `mytaskapp` and then runs the timestamp application. + +But before we launch the `my-composed-task` definition, we can view what Spring Cloud Data Flow generated for us. +This can be done by using the task list command, as shown (including its output) in the following example: + +``` +dataflow:>task list +╔══════════════════════════╤══════════════════════╤═══════════╗ +║ Task Name │ Task Definition │Task Status║ +╠══════════════════════════╪══════════════════════╪═══════════╣ +║my-composed-task │mytaskapp && timestamp│unknown ║ +║my-composed-task-mytaskapp│mytaskapp │unknown ║ +║my-composed-task-timestamp│timestamp │unknown ║ +╚══════════════════════════╧══════════════════════╧═══════════╝ +``` + +In the example, Spring Cloud Data Flow created three task definitions, one for each of the applications that makes up our composed task (`my-composed-task-mytaskapp` and `my-composed-task-timestamp`) as well as the composed task (`my-composed-task`) definition. +We also see that each of the generated names for the child tasks is made up of the name of the composed task and the name of the application, separated by a hyphen `-` (as in *my-composed-task* `-` *mytaskapp*). + +##### Task Application Parameters + +The task applications that make up the composed task definition can also contain parameters, as shown in the following example: + +``` +dataflow:> task create my-composed-task --definition "mytaskapp --displayMessage=hello && timestamp --format=YYYY" +``` + +#### 28.2.2. Launching a Composed Task + +Launching a composed task is done in the same way as launching a stand-alone task, as follows: + +``` +task launch my-composed-task +``` + +Once the task is launched, and assuming all the tasks complete successfully, you can see three task executions when you run a `task execution list`, as shown in the following example: + +``` +dataflow:>task execution list +╔══════════════════════════╤═══╤════════════════════════════╤════════════════════════════╤═════════╗ +║ Task Name │ID │ Start Time │ End Time │Exit Code║ +╠══════════════════════════╪═══╪════════════════════════════╪════════════════════════════╪═════════╣ +║my-composed-task-timestamp│713│Wed Apr 12 16:43:07 EDT 2017│Wed Apr 12 16:43:07 EDT 2017│0 ║ +║my-composed-task-mytaskapp│712│Wed Apr 12 16:42:57 EDT 2017│Wed Apr 12 16:42:57 EDT 2017│0 ║ +║my-composed-task │711│Wed Apr 12 16:42:55 EDT 2017│Wed Apr 12 16:43:15 EDT 2017│0 ║ +╚══════════════════════════╧═══╧════════════════════════════╧════════════════════════════╧═════════╝ +``` + +In the preceding example, we see that `my-compose-task` launched and that the other tasks were also launched in sequential order. +Each of them ran successfully with an `Exit Code` as `0`. + +##### Passing Properties to the Child Tasks + +To set the properties for child tasks in a composed task graph at task launch time, +use the following format: `app...`. +The following listing shows a composed task definition as an example: + +``` +dataflow:> task create my-composed-task --definition "mytaskapp && mytimestamp" +``` + +To have `mytaskapp` display 'HELLO' and set the `mytimestamp` timestamp format to `YYYY` for the composed task definition, use the following task launch format: + +``` +task launch my-composed-task --properties "app.my-composed-task.mytaskapp.displayMessage=HELLO,app.my-composed-task.mytimestamp.timestamp.format=YYYY" +``` + +Similar to application properties, you can also set the `deployer` properties for child tasks by using the following format: `deployer...`: + +``` +task launch my-composed-task --properties "deployer.my-composed-task.mytaskapp.memory=2048m,app.my-composed-task.mytimestamp.timestamp.format=HH:mm:ss" +Launched task 'a1' +``` + +##### Passing Arguments to the Composed Task Runner + +You can pass command-line arguments for the composed task runner by using the `--arguments` option: + +``` +dataflow:>task create my-composed-task --definition "" +Created new task 'my-composed-task' + +dataflow:>task launch my-composed-task --arguments "--increment-instance-enabled=true --max-wait-time=50000 --split-thread-core-pool-size=4" --properties "app.my-composed-task.bbb.timestamp.format=dd/MM/yyyy HH:mm:ss" +Launched task 'my-composed-task' +``` + +##### Exit Statuses + +The following list shows how the exit status is set for each step (task) contained in the composed task following each step execution: + +* If the `TaskExecution` has an `ExitMessage`, that is used as the `ExitStatus`. + +* If no `ExitMessage` is present and the `ExitCode` is set to zero, the `ExitStatus` for the step is `COMPLETED`. + +* If no `ExitMessage` is present and the `ExitCode` is set to any non-zero number, the `ExitStatus` for the step is `FAILED`. + +#### 28.2.3. Destroying a Composed Task + +The command used to destroy a stand-alone task is the same as the command used to destroy a composed task. +The only difference is that destroying a composed task also destroys the child tasks associated with it. +The following example shows the task list before and after using the `destroy` command: + +``` +dataflow:>task list +╔══════════════════════════╤══════════════════════╤═══════════╗ +║ Task Name │ Task Definition │Task Status║ +╠══════════════════════════╪══════════════════════╪═══════════╣ +║my-composed-task │mytaskapp && timestamp│COMPLETED ║ +║my-composed-task-mytaskapp│mytaskapp │COMPLETED ║ +║my-composed-task-timestamp│timestamp │COMPLETED ║ +╚══════════════════════════╧══════════════════════╧═══════════╝ +... +dataflow:>task destroy my-composed-task +dataflow:>task list +╔═════════╤═══════════════╤═══════════╗ +║Task Name│Task Definition│Task Status║ +╚═════════╧═══════════════╧═══════════╝ +``` + +#### 28.2.4. Stopping a Composed Task + +In cases where a composed task execution needs to be stopped, you can do so through the: + +* RESTful API + +* Spring Cloud Data Flow Dashboard + +To stop a composed task through the dashboard, select the **Jobs** tab and click the \*Stop() button next to the job execution that you want to stop. + +The composed task run is stopped when the currently running child task completes. +The step associated with the child task that was running at the time that the composed task was stopped is marked as `STOPPED` as well as the composed task job execution. + +#### 28.2.5. Restarting a Composed Task + +In cases where a composed task fails during execution and the status of the composed task is `FAILED`, the task can be restarted. +You can do so through the: + +* RESTful API + +* The shell + +* Spring Cloud Data Flow Dashboard + +To restart a composed task through the shell, launch the task with the same parameters. +To restart a composed task through the dashboard, select the **Jobs** tab and click the **Restart** button next to the job execution that you want to restart. + +| |Restarting a composed task job that has been stopped (through the Spring Cloud Data Flow Dashboard or RESTful API) relaunches the `STOPPED` child task and then launches the remaining (unlaunched) child tasks in the specified order.| +|---|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +## 29. Composed Tasks DSL + +Composed tasks can be run in three ways: + +* [Conditional Execution](#spring-cloud-data-flow-conditional-execution) + +* [Transitional Execution](#spring-cloud-data-flow-transitional-execution) + +* [Split Execution](#spring-cloud-data-flow-split-execution) + +### 29.1. Conditional Execution + +Conditional execution is expressed by using a double ampersand symbol (`&&`). +This lets each task in the sequence be launched only if the previous task +successfully completed, as shown in the following example: + +``` +task create my-composed-task --definition "task1 && task2" +``` + +When the composed task called `my-composed-task` is launched, it launches the task called `task1` and, if `task1` completes successfully, the task called `task2` is launched. +If `task1` fails, `task2` does not launch. + +You can also use the Spring Cloud Data Flow Dashboard to create your conditional execution, by using the designer to drag and drop applications that are required and connecting them together to create your directed graph, as shown in the following image: + +![Composed Task Conditional Execution](https://raw.githubusercontent.com/spring-cloud/spring-cloud-dataflow/master/spring-cloud-dataflow-docs/src/main/asciidoc/images/dataflow-ctr-conditional-execution.png) + +Figure 2. Conditional Execution + +The preceding diagram is a screen capture of the directed graph as it being created by using the Spring Cloud Data Flow Dashboard. +You can see that four components in the diagram comprise a conditional execution: + +* **Start** icon: All directed graphs start from this symbol. There is only one. + +* **Task** icon: Represents each task in the directed graph. + +* **End** icon: Represents the end of a directed graph. + +* Solid line arrow: Represents the flow conditional execution flow between: + + * Two applications. + + * The start control node and an application. + + * An application and the end control node. + +* **End** icon: All directed graphs end at this symbol. + +| |You can view a diagram of your directed graph by clicking the **Detail** button next to the composed task definition on the Definitions tab.| +|---|--------------------------------------------------------------------------------------------------------------------------------------------| + +### 29.2. Transitional Execution + +The DSL supports fine-grained control over the transitions taken during the execution of the directed graph. +Transitions are specified by providing a condition for equality that is based on the exit status of the previous task. +A task transition is represented by the following symbol `->`. + +#### 29.2.1. Basic Transition + +A basic transition would look like the following: + +``` +task create my-transition-composed-task --definition "foo 'FAILED' -> bar 'COMPLETED' -> baz" +``` + +In the preceding example, `foo` would launch, and, if it had an exit status of `FAILED`, the `bar` task would launch. +If the exit status of `foo` was `COMPLETED`, `baz` would launch. +All other statuses returned by `cat` have no effect, and the task would end normally. + +Using the Spring Cloud Data Flow Dashboard to create the same “basic transition” would resemble the following image: + +![Composed Task Basic Transition](https://raw.githubusercontent.com/spring-cloud/spring-cloud-dataflow/master/spring-cloud-dataflow-docs/src/main/asciidoc/images/dataflow-ctr-transition-basic.png) + +Figure 3. Basic Transition + +The preceding diagram is a screen capture of the directed graph as it being created in the Spring Cloud Data Flow Dashboard. +Notice that there are two different types of connectors: + +* Dashed line: Represents transitions from the application to one of the possible destination applications. + +* Solid line: Connects applications in a conditional execution or a connection between the application and a control node (start or end). + +To create a transitional connector: + +1. When creating a transition, link the application to each possible destination by using the connector. + +2. Once complete, go to each connection and select it by clicking it. + +3. A bolt icon appears. + +4. Click that icon. + +5. Enter the exit status required for that connector. + +6. The solid line for that connector turns to a dashed line. + +#### 29.2.2. Transition With a Wildcard + +Wildcards are supported for transitions by the DSL, as shown in the following example: + +``` +task create my-transition-composed-task --definition "foo 'FAILED' -> bar '*' -> baz" +``` + +In the preceding example, `foo` would launch, and, if it had an exit status of `FAILED`, `bar` task would launch. +For any exit status of `cat` other than `FAILED`, `baz` would launch. + +Using the Spring Cloud Data Flow Dashboard to create the same “transition with wildcard” would resemble the following image: + +![Composed Task Basic Transition with Wildcard](https://raw.githubusercontent.com/spring-cloud/spring-cloud-dataflow/master/spring-cloud-dataflow-docs/src/main/asciidoc/images/dataflow-ctr-transition-basic-wildcard.png) + +Figure 4. Basic Transition With Wildcard + +#### 29.2.3. Transition With a Following Conditional Execution + +A transition can be followed by a conditional execution, so long as the wildcard +is not used, as shown in the following example: + +``` +task create my-transition-conditional-execution-task --definition "foo 'FAILED' -> bar 'UNKNOWN' -> baz && qux && quux" +``` + +In the preceding example, `foo` would launch, and, if it had an exit status of `FAILED`, the `bar` task would launch. +If `foo` had an exit status of `UNKNOWN`, `baz` would launch. +For any exit status of `foo` other than `FAILED` or `UNKNOWN`, `qux` would launch and, upon successful completion, `quux` would launch. + +Using the Spring Cloud Data Flow Dashboard to create the same “transition with conditional execution” would resemble the following image: + +![Composed Task Transition with Conditional Execution](https://raw.githubusercontent.com/spring-cloud/spring-cloud-dataflow/master/spring-cloud-dataflow-docs/src/main/asciidoc/images/dataflow-ctr-transition-conditional-execution.png) + +Figure 5. Transition With Conditional Execution + +| |In this diagram, the dashed line (transition) connects the `foo` application to the target applications, but a solid line connects the conditional executions between `foo`, `qux`, and `quux`.| +|---|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +### 29.3. Split Execution + +Splits let multiple tasks within a composed task be run in parallel. +It is denoted by using angle brackets (`<>`) to group tasks and flows that are to be run in parallel. +These tasks and flows are separated by the double pipe `||` symbol, as shown in the following example: + +``` +task create my-split-task --definition "" +``` + +The preceding example launches tasks `foo`, `bar` and `baz` in parallel. + +Using the Spring Cloud Data Flow Dashboard to create the same “split execution” would resemble the following image: + +![Composed Task Split](https://raw.githubusercontent.com/spring-cloud/spring-cloud-dataflow/master/spring-cloud-dataflow-docs/src/main/asciidoc/images/dataflow-ctr-split.png) + +Figure 6. Split + +With the task DSL, you can also run multiple split groups in succession, as shown in the following example: + +``` +task create my-split-task --definition " && " +``` + +In the preceding example, the `foo`, `bar`, and `baz` tasks are launched in parallel. +Once they all complete, then the `qux` and `quux` tasks are launched in parallel. +Once they complete, the composed task ends. +However, if `foo`, `bar`, or `baz` fails, the split containing `qux` and `quux` does not launch. + +Using the Spring Cloud Data Flow Dashboard to create the same “split with multiple groups” would resemble the following image: + +![Composed Task Split](https://raw.githubusercontent.com/spring-cloud/spring-cloud-dataflow/master/spring-cloud-dataflow-docs/src/main/asciidoc/images/dataflow-ctr-multiple-splits.png) + +Figure 7. Split as a part of a conditional execution + +Notice that there is a `SYNC` control node that is inserted by the designer when +connecting two consecutive splits. + +| |Tasks that are used in a split should not set the their `ExitMessage`. Setting the `ExitMessage` is only to be used
with [transitions](#spring-cloud-data-flow-transitional-execution).| +|---|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +#### 29.3.1. Split Containing Conditional Execution + +A split can also have a conditional execution within the angle brackets, as shown in the following example: + +``` +task create my-split-task --definition "" +``` + +In the preceding example, we see that `foo` and `baz` are launched in parallel. +However, `bar` does not launch until `foo` completes successfully. + +Using the Spring Cloud Data Flow Dashboard to create the same " `split containing conditional execution` " resembles the following image: + +![Composed Task Split With Conditional Execution](https://raw.githubusercontent.com/spring-cloud/spring-cloud-dataflow/master/spring-cloud-dataflow-docs/src/main/asciidoc/images/dataflow-ctr-split-contains-conditional.png) + +Figure 8. Split with conditional execution + +#### 29.3.2. Establishing the Proper Thread Count for Splits + +Each child task contained in a split requires a thread in order to run. To set this properly, you want to look at your graph and find the split that has the largest number of child tasks. The number of child tasks in that split is the number of threads you need. +To set the thread count, use the `split-thread-core-pool-size property` (defaults to `1`). So, for example, a definition such as ` && ` requires a `split-thread-core-pool-size` of `3`. +This is because the largest split contains three child tasks. A count of two would mean that `AAA` and `BBB` would run in parallel but CCC would wait for either `AAA` or `BBB` to finish in order to run. +Then `DDD` and `EEE` would run in parallel. + +## 30. Launching Tasks from a Stream + +You can launch a task from a stream by using the [`task-launcher-dataflow`](https://github.com/spring-cloud-stream-app-starters/tasklauncher-dataflow/blob/master/spring-cloud-starter-stream-sink-task-launcher-dataflow/README.adoc) sink. +The sink connects to a Data Flow server and uses its REST API to launch any defined task. +The sink accepts a [JSON payload](https://github.com/spring-cloud-stream-app-starters/tasklauncher-dataflow/blob/master/spring-cloud-starter-stream-sink-task-launcher-dataflow/README.adoc#payload) representing a `task launch request`, which provides the name of the task to launch and may include command line arguments and deployment properties. + +The [`app-starters-task-launch-request-common`](https://github.com/spring-cloud-stream-app-starters/core/blob/master/common/app-starters-task-launch-request-common/README.adoc) component, in conjunction with Spring Cloud Stream [functional composition](https://docs.spring.io/spring-cloud-stream/docs/current-snapshot/reference/htmlsingle/#_functional_composition), can transform the output of any source or processor to a task launch request. + +Adding a dependency to `app-starters-task-launch-request-common` auto-configures a `java.util.function.Function` implementation, registered through [Spring Cloud Function](https://cloud.spring.io/spring-cloud-function/) as a `taskLaunchRequest`. + +For example, you can start with the [time](https://github.com/spring-cloud-stream-app-starters/time/tree/master/spring-cloud-starter-stream-source-time) source, add the following dependency, build it, and register it as a custom source. We call it `time-tlr` in this example: + +``` + + org.springframework.cloud.stream.app + app-starters-task-launch-request-common + +``` + +| |[Spring Cloud Stream Initializr](https://start-scs.cfapps.io/) provides a great starting point for creating stream applications.| +|---|--------------------------------------------------------------------------------------------------------------------------------| + +Next, [register](#applications) the `task-launcher-dataflow` sink and create a task (we use the provided timestamp task): + +``` +stream create --name task-every-minute --definition "time-tlr --trigger.fixed-delay=60 --spring.cloud.stream.function.definition=taskLaunchRequest --task.launch.request.task-name=timestamp-task | task-launcher-dataflow" --deploy +``` + +The preceding stream produces a task launch request every minute. The request provides the name of the task to launch: `{"name":"timestamp-task"}`. + +The following stream definition illustrates the use of command line arguments. It produces messages such as `{"args":["foo=bar","time=12/03/18 17:44:12"],"deploymentProps":{},"name":"timestamp-task"}` to provide command-line arguments to the task: + +``` +stream create --name task-every-second --definition "time-tlr --spring.cloud.stream.function.definition=taskLaunchRequest --task.launch.request.task-name=timestamp-task --task.launch.request.args=foo=bar --task.launch.request.arg-expressions=time=payload | task-launcher-dataflow" --deploy +``` + +Note the use of SpEL expressions to map each message payload to the `time` command-line argument, along with a static argument (`foo=bar`). + +You can then see the list of task executions by using the shell command `task execution list`, as shown (with its output) in the following example: + +``` +dataflow:>task execution list +╔════════════════════╤══╤════════════════════════════╤════════════════════════════╤═════════╗ +║ Task Name │ID│ Start Time │ End Time │Exit Code║ +╠════════════════════╪══╪════════════════════════════╪════════════════════════════╪═════════╣ +║timestamp-task_26176│4 │Tue May 02 12:13:49 EDT 2017│Tue May 02 12:13:49 EDT 2017│0 ║ +║timestamp-task_32996│3 │Tue May 02 12:12:49 EDT 2017│Tue May 02 12:12:49 EDT 2017│0 ║ +║timestamp-task_58971│2 │Tue May 02 12:11:50 EDT 2017│Tue May 02 12:11:50 EDT 2017│0 ║ +║timestamp-task_13467│1 │Tue May 02 12:10:50 EDT 2017│Tue May 02 12:10:50 EDT 2017│0 ║ +╚════════════════════╧══╧════════════════════════════╧════════════════════════════╧═════════╝ +``` + +In this example, we have shown how to use the `time` source to launch a task at a fixed rate. +This pattern may be applied to any source to launch a task in response to any event. + +### 30.1. Launching a Composed Task From a Stream + +A composed task can be launched with the `task-launcher-dataflow` sink, as discussed [here](#spring-cloud-dataflow-launch-tasks-from-stream). +Since we use the `ComposedTaskRunner` directly, we need to set up the task definitions for the composed task runner itself, along with the composed tasks, prior to the creation of the composed task launching stream. +Suppose we wanted to create the following composed task definition: `AAA && BBB`. +The first step would be to create the task definitions, as shown in the following example: + +``` +task create composed-task-runner --definition "composed-task-runner" +task create AAA --definition "timestamp" +task create BBB --definition "timestamp" +``` + +| |Releases of `ComposedTaskRunner` can be found[here](https://github.com/spring-cloud-task-app-starters/composed-task-runner/releases).| +|---|-------------------------------------------------------------------------------------------------------------------------------------| + +Now that the task definitions we need for composed task definition are ready, we need to create a stream that launches `ComposedTaskRunner`. +So, in this case, we create a stream with: + +* The `time` source customized to emit task launch requests, as shown [earlier](#spring-cloud-dataflow-launch-tasks-from-stream). + +* The `task-launcher-dataflow` sink that launches the `ComposedTaskRunner` + +The stream should resemble the following: + +``` +stream create ctr-stream --definition "time --fixed-delay=30 --task.launch.request.task-name=composed-task-launcher --task.launch.request.args=--graph=AAA&&BBB,--increment-instance-enabled=true | task-launcher-dataflow" +``` + +For now, we focus on the configuration that is required to launch the `ComposedTaskRunner`: + +* `graph`: This is the graph that is to be executed by the `ComposedTaskRunner`. + In this case it is `AAA&&BBB`. + +* `increment-instance-enabled`: This lets each execution of `ComposedTaskRunner` be unique.`ComposedTaskRunner` is built by using [Spring Batch](https://projects.spring.io/spring-batch/). + Thus, we want a new Job Instance for each launch of the `ComposedTaskRunner`. + To do this, we set `increment-instance-enabled` to be `true`. + +## 31. Sharing Spring Cloud Data Flow’s Datastore with Tasks + +As discussed in the [Tasks](#spring-cloud-dataflow-task) documentation, Spring +Cloud Data Flow lets you view Spring Cloud Task application executions. So, in +this section, we discuss what is required for a task application and Spring +Cloud Data Flow to share the task execution information. + +### 31.1. A Common DataStore Dependency + +Spring Cloud Data Flow supports many databases out-of-the-box, +so all you typically need to do is declare the `spring_datasource_*` environment variables +to establish what data store Spring Cloud Data Flow needs. +Regardless of which database you decide to use for Spring Cloud Data Flow, make sure that your task also +includes that database dependency in its `pom.xml` or `gradle.build` file. If the database dependency +that is used by Spring Cloud Data Flow is not present in the Task Application, the task fails +and the task execution is not recorded. + +### 31.2. A Common Data Store + +Spring Cloud Data Flow and your task application must access the same datastore instance. +This is so that the task executions recorded by the task application can be read by Spring Cloud Data Flow to list them in the Shell and Dashboard views. +Also, the task application must have read and write privileges to the task data tables that are used by Spring Cloud Data Flow. + +Given this understanding of the datasource dependency between Task applications and Spring Cloud Data Flow, you can now review how to apply them in various Task orchestration scenarios. + +#### 31.2.1. Simple Task Launch + +When launching a task from Spring Cloud Data Flow, Data Flow adds its datasource +properties (`spring.datasource.url`, `spring.datasource.driverClassName`, `spring.datasource.username`, `spring.datasource.password`) +to the application properties of the task being launched. Thus, a task application +records its task execution information to the Spring Cloud Data Flow repository. + +#### 31.2.2. Composed Task Runner + +Spring Cloud Data Flow lets you create a directed graph where each node +of the graph is a task application. This is done through the[composed task runner](https://github.com/spring-cloud-task-app-starters/composed-task-runner/blob/master/spring-cloud-starter-task-composedtaskrunner/README.adoc). +In this case, the rules that applied to a [simple task launch](#datasource-simple-task-launch)or task launcher sink apply to the composed task runner as well. +All child applications must also have access to the datastore that is being used by the composed task runner. +Also, all child applications must have the same database dependency as the composed task runner enumerated in their `pom.xml` or `gradle.build` file. + +#### 31.2.3. Launching a Task Externally from Spring Cloud Data Flow + +You can launch Spring Cloud Task applications by using another method (scheduler, for example) but still track the task execution in Spring Cloud Data Flow. +You can do so, provided the task applications observe the rules specified [here](#a-common-datastore-dependency) and [here](#a-common-datastore). + +| |If you want to use Spring Cloud Data Flow to view your[Spring Batch](https://projects.spring.io/spring-batch/) jobs, make sure that
your batch application uses the `@EnableTask` annotation and follow the rules enumerated [here](#a-common-datastore-dependency) and [here](#a-common-datastore).
More information is available [here](https://github.com/spring-projects/spring-batch-admin/blob/master/MIGRATION.md).| +|---|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +## 32. Scheduling Tasks + +Spring Cloud Data Flow lets you schedule the execution of tasks with a `cron` expression. +You can create a schedule through the RESTful API or the Spring Cloud Data Flow UI. + +### 32.1. The Scheduler + +Spring Cloud Data Flow schedules the execution of its tasks through a scheduling agent that is available on the cloud platform. +When using the Cloud Foundry platform, Spring Cloud Data Flow uses the [PCF Scheduler](https://www.cloudfoundry.org/the-foundry/scheduler/). +When using Kubernetes, a [CronJob](https://kubernetes.io/docs/concepts/workloads/controllers/cron-jobs/) will be used. + +| |Scheduled tasks do not implement the continuous deployment feature. Any changes to application version or properties for a task definition in Spring Cloud Data Flow will not affect scheduled tasks.| +|---|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +![Scheduler Architecture Overview](https://raw.githubusercontent.com/spring-cloud/spring-cloud-dataflow/master/spring-cloud-dataflow-docs/src/main/asciidoc/images/dataflow-scheduling-architecture.png) + +Figure 9. Architectural Overview + +### 32.2. Enabling Scheduling + +By default, Spring Cloud Data Flow leaves the scheduling feature disabled. To enable the scheduling feature, set the following feature properties to `true`: + +* `spring.cloud.dataflow.features.schedules-enabled` + +* `spring.cloud.dataflow.features.tasks-enabled` + +### 32.3. The Lifecycle of a Schedule + +The lifecycle of a schedule has three parts: + +* [Scheduling a Task Execution](#spring-cloud-data-flow-schedule-scheduling) + +* [Deleting a Schedule](#spring-cloud-data-flow-schedule-unscheduling) + +* [Listing Schedules](#spring-cloud-data-flow-schedule-list) + +#### 32.3.1. Scheduling a Task Execution + +You can schedule a task execution via the: + +* Spring Cloud Data Flow Shell + +* Spring Cloud Data Flow Dashboard + +* Spring Cloud Data Flow RESTful API + +#### 32.3.2. Scheduling a Task + +To schedule a task using the shell, use the `task schedule create` command to create the schedule, as shown in the following example: + +``` +dataflow:>task schedule create --definitionName mytask --name mytaskschedule --expression '*/1 * * * *' +Created schedule 'mytaskschedule' +``` + +In the earlier example, we created a schedule called `mytaskschedule` for the task definition called `mytask`. This schedule launches `mytask` once a minute. + +| |If using Cloud Foundry, the `cron` expression above would be: `*/1 * ? * *`. This is because Cloud Foundry uses the Quartz `cron` expression format.| +|---|----------------------------------------------------------------------------------------------------------------------------------------------------| + +##### Maximum Length for a Schedule Name + +The maximum character length of a schedule name is dependent on the platform. + +|Kubernetes|Cloud Foundry|Local| +|----------|-------------|-----| +| 52 | 63 | N/A | + +#### 32.3.3. Deleting a Schedule + +You can delete a schedule by using the: + +* Spring Cloud Data Flow Shell + +* Spring Cloud Data Flow Dashboard + +* Spring Cloud Data Flow RESTful API + +To delete a task schedule by using the shell, use the `task schedule destroy` command, as shown in the following example: + +``` +dataflow:>task schedule destroy --name mytaskschedule +Deleted task schedule 'mytaskschedule' +``` + +#### 32.3.4. Listing Schedules + +You can view the available schedules by using the: + +* Spring Cloud Data Flow Shell + +* Spring Cloud Data Flow Dashboard + +* Spring Cloud Data Flow RESTful API + +To view your schedules from the shell, use the `task schedule list` command, as shown in the following example: + +``` +dataflow:>task schedule list +╔══════════════════════════╤════════════════════╤════════════════════════════════════════════════════╗ +║ Schedule Name │Task Definition Name│ Properties ║ +╠══════════════════════════╪════════════════════╪════════════════════════════════════════════════════╣ +║mytaskschedule │mytask │spring.cloud.scheduler.cron.expression = */1 * * * *║ +╚══════════════════════════╧════════════════════╧════════════════════════════════════════════════════╝ +``` + +| |Instructions to create, delete, and list schedules by using the Spring Cloud Data Flow UI can be found [here](https://dataflow.spring.io/docs/feature-guides/batch/scheduling/).| +|---|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +## 33. Continuous Deployment + +As task applications evolve, you want to get your updates to production. This section walks through the capabilities that Spring Cloud Data Flow provides around being able to update task applications. + +When a task application is registered (see [Registering a Task Application](#spring-cloud-dataflow-register-task-apps)), a version is associated with it. A task application can have multiple versions associated with it, with one selected as the default. The following image illustrates an application with multiple versions associated with it (see the timestamp entry). + +![Task Application Versions](https://raw.githubusercontent.com/spring-cloud/spring-cloud-dataflow/master/spring-cloud-dataflow-docs/src/main/asciidoc/images/dataflow-task-application-versions.png) + +Versions of an application are managed by registering multiple applications with the same name and coordinates, *except* the version. For example, if you were to register an application with the following values, you would get one application registered with two versions (2.1.0.RELEASE and 2.1.1.RELEASE): + +* Application 1 + + * Name: `timestamp` + + * Type: `task` + + * URI: `maven://org.springframework.cloud.task.app:timestamp-task:2.1.0.RELEASE` + +* Application 2 + + * Name: `timestamp` + + * Type: `task` + + * URI: `maven://org.springframework.cloud.task.app:timestamp-task:2.1.1.RELEASE` + +Besides having multiple versions, Spring Cloud Data Flow needs to know which version to run on the next launch. This is indicated by setting a version to be the default version. Whatever version of a task application is configured as the default version is the one to be run on the next launch request. You can see which version is the default in the UI, as this image shows: + +![Task Application Default Version](https://raw.githubusercontent.com/spring-cloud/spring-cloud-dataflow/master/spring-cloud-dataflow-docs/src/main/asciidoc/images/dataflow-task-default-version.png) + +### 33.1. Task Launch Lifecycle + +In previous versions of Spring Cloud Data Flow, when the request to launch a task was received, Spring Cloud Data Flow would deploy the application (if needed) and run it. If the application was being run on a platform that did not need to have the application deployed every time (CloudFoundry, for example), the previously deployed application was used. This flow has changed in 2.3. The following image shows what happens when a task launch request comes in now: + +![Flow For Launching A Task](https://raw.githubusercontent.com/spring-cloud/spring-cloud-dataflow/master/spring-cloud-dataflow-docs/src/main/asciidoc/images/dataflow-task-launch-flow.png) + +There are three main flows to consider in the preceding diagram. Launching the first time or launching with no changes is one. The other two are launching when there are changes but the appliction is not currently and launching when there are changes and the application is running. We look at the flow with no changes first. + +#### 33.1.1. Launching a Task With No Changes + +1. A launch request comes into Data Flow. Data Flow determines that an upgrade is not required, since nothing has changed (no properties, deployment properties, or versions have changed since the last execution). + +1. On platforms that cache a deployed artifact (CloudFoundry, at this writing), Data Flow checks whether the application was previously deployed. + +2. If the application needs to be deployed, Data Flow deploys the task application. + +3. Data Flow launches the application. + +This flow is the default behavior and, if nothing has changed, occurs every time a request comes in. Note that this is the same flow that Data Flow has always use for launching tasks. + +#### 33.1.2. Launching a Task With Changes That Is Not Currently Running + +The second flow to consider when launching a task is when a task is not running but there is a change in any of the task application version, application properties, or deployment properties. In this case, the following flow is executed: + +1. A launch request comes into Data Flow. Data Flow determines that an upgrade is required, since there was a change in the task application version, the application properties, or the deployment properties. + +2. Data Flow checks to see whether another instance of the task definition is currently running. + +1. If there is no other instance of the task definition currently running, the old deployment is deleted. + +2. On platforms that cache a deployed artifact (CloudFoundry, at this writing), Data Flow checks whether the application was previously deployed (this check evaluates to `false` in this flow, since the old deployment was deleted). + +3. Data Flow does the deployment of the task application with the updated values (new application version, new merged properties, and new merged deployment properties). + +4. Data Flow launches the application. + +This flow is what fundamentally enables continuous deployment for Spring Cloud Data Flow. + +#### 33.1.3. Launch a Task With Changes While Another Instance Is Running + +The last main flow is when a launch request comes to Spring Cloud Data Flow to do an upgrade but the task definition is currently running. In this case, the launch is blocked due to the requirement to delete the current application. On some platforms (CloudFoundry, at this writing), deleting the application causes all currently running applications to be shut down. This feature prevents that from happening. The following process describes what happens when a task changes while another instance is running: + +1. A launch request comes into Data Flow. Data Flow determines that an upgrade is required, since there was a change in the task application version, the application properties, or the deployment properties. + +2. Data Flow checks to see whether another instance of the task definition is currently running. + +3. Data Flow prevents the launch from happening, because other instances of the task definition are running. + +| |Any launch that requires an upgrade of a task definition that is running at the time of the request is blocked from running due to the need to delete any currently running tasks.| +|---|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +# Task Developer Guide + +See the [Batch Developer](https://dataflow.spring.io/docs/batch-developer-guides/) section of the microsite for more about how to create, test, and run Spring Cloud Task applications on your local machine. + +# Task Monitoring + +See the [Task Monitoring](https://dataflow.spring.io/docs/feature-guides/batch/monitoring/) Guide of the microsite for more about how to monitor the applications that were deployed as part of a task. + +# Dashboard + +This section describes how to use the dashboard of Spring Cloud Data Flow. + +## 34. Introduction + +Spring Cloud Data Flow provides a browser-based GUI called the Dashboard to manage the following information: + +* **Apps**: The **Apps** tab lists all available applications and provides the controls to register and unregister them. + +* **Runtime**: The **Runtime** tab provides the list of all running applications. + +* **Streams**: The **Streams** tab lets you list, design, create, deploy, and destroy Stream Definitions. + +* **Tasks**: The **Tasks** tab lets you list, create, launch, schedule, and destroy Task Definitions. + +* **Jobs**: The **Jobs** tab lets you perform batch job related functions. + +Upon starting Spring Cloud Data Flow, the dashboard is available at: + +`[:/dashboard](http://:/dashboard)` + +For example, if Spring Cloud Data Flow is running locally, the dashboard is available at `[localhost:9393/dashboard](http://localhost:9393/dashboard)`. + +If you have enabled HTTPS, the dashboard is available at `[localhost:9393/dashboard](https://localhost:9393/dashboard)`. +If you have enabled security, a login form is available at `[localhost:9393/dashboard/#/login](http://localhost:9393/dashboard/#/login)`. + +| |The default Dashboard server port is `9393`.| +|---|--------------------------------------------| + +The following image shows the opening page of the Spring Cloud Data Flow dashboard: + +![The Spring Cloud Data Flow Dashboard](https://raw.githubusercontent.com/spring-cloud/spring-cloud-dataflow/master/spring-cloud-dataflow-docs/src/main/asciidoc/images/dataflow-dashboard-about.png) + +Figure 10. The Spring Cloud Data Flow Dashboard + +## 35. Apps + +The **Applications** tab of the dashboard lists all the available applications and provides the controls to register and unregister them (if applicable). +You can import a number of applications at once by using the Bulk Import Applications action. + +The following image shows a typical list of available applications within the dashboard: + +![List of available applications](https://raw.githubusercontent.com/spring-cloud/spring-cloud-dataflow/master/spring-cloud-dataflow-docs/src/main/asciidoc/images/dataflow-available-apps-list.png) + +Figure 11. List of Available Applications + +### 35.1. Bulk Import of Applications + +Applications can be imported in numerous ways which are available on the "Applications" page. +For bulk import, the application definitions are expected to be expressed in a properties style, as follows: + +``` +. = +``` + +The following examples show typical application definitions: + +``` +task.timestamp=maven://org.springframework.cloud.task.app:timestamp-task:1.2.0.RELEASE +processor.transform=maven://org.springframework.cloud.stream.app:transform-processor-rabbit:1.2.0.RELEASE +``` + +In the "Import application coordinates from an HTTP URI location" section, you can specify a URI that points to a properties file stored elsewhere, it should contain properties formatted as shown in the previous example. +Alternatively, by using the **Apps as Properties** textbox in the "Import application coordinates from a properties file" section , you can directly list each property string. Finally, if the properties are stored in a local file, the **Import a File** option opens a local file browser to select the file. +After setting your definitions through one of these routes, click **Import Application(s)**. + +The following image shows an example page of one way to bulk import applications: + +![Bulk Import Applications](https://raw.githubusercontent.com/spring-cloud/spring-cloud-dataflow/master/spring-cloud-dataflow-docs/src/main/asciidoc/images/dataflow-bulk-import-applications.png) + +Figure 12. Bulk Import Applications + +## 36. Runtime + +The **Runtime** tab of the Dashboard application shows the list of all running applications. +For each runtime applicaiton, the state of the deployment and the number of deployed instances is shown. +A list of the used deployment properties is available by clicking on the application ID. + +The following image shows an example of the **Runtime** tab in use: + +![List of running applications](https://raw.githubusercontent.com/spring-cloud/spring-cloud-dataflow/master/spring-cloud-dataflow-docs/src/main/asciidoc/images/dataflow-runtime.png) + +Figure 13. List of Running Applications + +## 37. Streams + +The **Streams** tab has two child tabs: **Definitions** and **Create Stream**. The following topics describe how to work with each one: + +* [Working with Stream Definitions](#dashboard-stream-definitions) + +* [Creating a Stream](#dashboard-flo-streams-designer) + +* [Deploying a Stream](#dashboard-stream-deploy) + +* [Accessing Stream Logs](#dashboard-stream-logs) + +### 37.1. Working with Stream Definitions + +The **Streams** section of the Dashboard includes the **Definitions** tab that provides a listing of stream definitions. +There you have the option to deploy or undeploy those stream definitions. +Additionally, you can remove the definition by clicking on **Destroy**. +Each row includes an arrow on the left, which you can click to see a visual representation of the definition. +Hovering over the boxes in the visual representation shows more details about the applications, including any options passed to them. + +In the following screenshot, the `timer` stream has been expanded to show the visual representation: + +![List of Stream Definitions](https://raw.githubusercontent.com/spring-cloud/spring-cloud-dataflow/master/spring-cloud-dataflow-docs/src/main/asciidoc/images/dataflow-streams-list-definitions.png) + +Figure 14. List of Stream Definitions + +If you click the details button, the view changes to show a visual representation of that stream and any related streams. +In the preceding example, if you click details for the `timer` stream, the view changes to the following view, which clearly shows the relationship between the three streams (two of them are tapping into the `timer` stream): + +![Stream Details Page](https://raw.githubusercontent.com/spring-cloud/spring-cloud-dataflow/master/spring-cloud-dataflow-docs/src/main/asciidoc/images/dataflow-stream-details.png) + +Figure 15. Stream Details Page + +### 37.2. Creating a Stream + +The **Streams** section of the Dashboard includes the **Create Stream** tab, which makes the [Spring Flo](https://github.com/spring-projects/spring-flo) designer available. The designer is a canvas application that offers an interactive graphical interface for creating data pipelines. + +In this tab, you can: + +* Create, manage, and visualize stream pipelines by using DSL, a graphical canvas, or both + +* Write pipelines by using DSL with content-assist and auto-complete + +* Use auto-adjustment and grid-layout capabilities in the GUI for simpler and interactive organization of pipelines + +You should watch this [screencast](https://www.youtube.com/watch?v=78CgV46OstI) that highlights some of the "Flo for Spring Cloud Data Flow" capabilities. +The Spring Flo [wiki](https://github.com/spring-projects/spring-flo/wiki) includes more detailed content on core Flo capabilities. + +The following image shows the Flo designer in use: + +![Flo for Spring Cloud Data Flo](https://raw.githubusercontent.com/spring-cloud/spring-cloud-dataflow/master/spring-cloud-dataflow-docs/src/main/asciidoc/images/dataflow-flo-create-stream.png) + +Figure 16. Flo for Spring Cloud Data Flow + +### 37.3. Deploying a Stream + +The stream deploy page includes tabs that provide different ways to set up the deployment properties and deploy the stream. +The following screenshots show the stream deploy page for `foobar` (`time | log`). + +You can define deployments properties by using: + +* Form builder tab: a builder that helps you to define deployment properties (deployer, application properties, and so on) + +* Free text tab: a free text area (for key-value pairs) + +You can switch between both views. + +| |The form builder offers stronger validation of the inputs.| +|---|----------------------------------------------------------| + +![Form builder](https://raw.githubusercontent.com/spring-cloud/spring-cloud-dataflow/master/spring-cloud-dataflow-docs/src/main/asciidoc/images/dataflow-stream-deploy-builder.png) + +Figure 17. The following image shows the form builder + +![Free text](https://raw.githubusercontent.com/spring-cloud/spring-cloud-dataflow/master/spring-cloud-dataflow-docs/src/main/asciidoc/images/dataflow-stream-deploy-freetext.png) + +Figure 18. The following image shows the same properties in the free text + +### 37.4. Accessing Stream Logs + +Once the stream applications are deployed, their logs can be accessed from the Stream `summary` page, as the following image shows: + +![Stream Logs](https://raw.githubusercontent.com/spring-cloud/spring-cloud-dataflow/master/spring-cloud-dataflow-docs/src/main/asciidoc/images/dataflow-stream-logs.png) + +### 37.5. Creating Fan-In and Fan-Out Streams + +In the [Fan-in and Fan-out](#spring-cloud-dataflow-stream-dsl-fanin-fanout) chapter, you can learn how to support fan-in and fan-out use cases by using [named destinations](#spring-cloud-dataflow-stream-dsl-named-destinations). +The UI provides dedicated support for named destinations as well: + +![Fan-in and Fan-out example](https://raw.githubusercontent.com/spring-cloud/spring-cloud-dataflow/master/spring-cloud-dataflow-docs/src/main/asciidoc/images/dataflow-flo-create-stream-fanin-fanout.png) + +Figure 19. Flo for Spring Cloud Data Flow + +In this example, we have data from an *HTTP Source* and a *JDBC Source* that is being sent to the*sharedData* channel, which represents a fan-in use case. +On the other end we have a *Cassandra Sink* and a *File Sink* subscribed to the *sharedData* channel, which represents a fan-out use case. + +### 37.6. Creating a Tap Stream + +Creating taps by using the Dashboard is straightforward. +Suppose you have a stream consisting of an *HTTP Source* and a *File Sink* and you would like to tap into the stream +to also send data to a *JDBC Sink*. +To create the tap stream, connect the output connector of the *HTTP Source* to the *JDBC Sink*. +The connection is displayed as a dotted line, indicating that you created a tap stream. + +![Tap stream example](https://raw.githubusercontent.com/spring-cloud/spring-cloud-dataflow/master/spring-cloud-dataflow-docs/src/main/asciidoc/images/dataflow-flo-create-tap-stream.png) + +Figure 20. Creating a Tap Stream + +The primary stream (*HTTP Source* to *File Sink*) will be automatically named, in case you did not provide a name for the stream, yet. +When creating tap streams, the primary stream must always be explicitly named. +In the preceding image, the primary stream was named *HTTP\_INGEST*. + +By using the Dashboard, you can also switch the primary stream so that it becomes the secondary tap stream. + +![Switch tap stream to primary stream](https://raw.githubusercontent.com/spring-cloud/spring-cloud-dataflow/master/spring-cloud-dataflow-docs/src/main/asciidoc/images/dataflow-flo-tap-stream-switch-to-primary-stream.png) + +Figure 21. Change Primary Stream to Secondary Tap Stream + +Hover over the existing primary stream, the line between *HTTP Source* and *File Sink*. +Several control icons appear, and, by clicking on the icon labeled *Switch to/from tap*, +you change the primary stream into a tap stream. +Do the same for the tap stream and switch it to a primary stream. + +![End result of switching the tap stream to a primary stream](https://raw.githubusercontent.com/spring-cloud/spring-cloud-dataflow/master/spring-cloud-dataflow-docs/src/main/asciidoc/images/dataflow-flo-tap-stream-switch-to-primary-stream-result.png) + +Figure 22. End Result of Switching the Primary Stream + +| |When interacting directly with [named destinations](#spring-cloud-dataflow-stream-dsl-named-destinations),
there can be "n" combinations (Inputs/Outputs). This allows you to create complex topologies involving a
wide variety of data sources and destinations.| +|---|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +### 37.7. Import and Export Streams + +The **Import/Export** tab of the Dashboard includes a page that provides the option to import and export streams. + +The following image shows the streams export page: + +![Stream Utils Export](https://raw.githubusercontent.com/spring-cloud/spring-cloud-dataflow/master/spring-cloud-dataflow-docs/src/main/asciidoc/images/dataflow-streams-utils-export.png) + +Figure 23. Stream Utils Export page + +When importing the streams, you have to import from a valid JSON file. You can either manually draft the file or export the file from the streams export page. + +![Stream Utils Import](https://raw.githubusercontent.com/spring-cloud/spring-cloud-dataflow/master/spring-cloud-dataflow-docs/src/main/asciidoc/images/dataflow-streams-utils-import.png) + +Figure 24. Stream Utils Import page + +After importing the file, you get confirmation of whether the operation completed successfully. + +![Stream Utils Import Result](https://raw.githubusercontent.com/spring-cloud/spring-cloud-dataflow/master/spring-cloud-dataflow-docs/src/main/asciidoc/images/dataflow-streams-utils-import-result.png) + +Figure 25. Stream Utils Import Result page + +## 38. Tasks + +The **Tasks** tab of the Dashboard currently has three tabs: + +* [Apps](#dashboard-tasks-apps) + +* [Definitions](#dashboard-task-definition) + +* [Executions](#dashboard-tasks-executions) + +* [Scheduling](#dashboard-task-scheduling) + +### 38.1. Apps + +Each application encapsulates a unit of work into a reusable component. +Within the Data Flow runtime environment, applications let you create definitions for streams as well as tasks. +Consequently, the **Apps** tab within the **Tasks** tab lets you create task definitions. + +| |You can also use this tab to create Batch Jobs.| +|---|-----------------------------------------------| + +The following image shows a typical list of task applications: + +![List of Task Apps](https://raw.githubusercontent.com/spring-cloud/spring-cloud-dataflow/master/spring-cloud-dataflow-docs/src/main/asciidoc/images/dataflow-task-apps-list.png) + +Figure 26. List of Task Apps + +On this screen, you can perform the following actions: + +* View details, such as the task application options. + +* Create a task definition from the respective application. + +#### 38.1.1. View Task Application Details + +On this page, you can view the details of a selected task application, including the list of available options (properties) for that application. + +### 38.2. Definitions + +This page lists the Data Flow task definitions and provides actions to launch or destroy those tasks. + +The following image shows the Definitions page: + +![List of Task Definitions](https://raw.githubusercontent.com/spring-cloud/spring-cloud-dataflow/master/spring-cloud-dataflow-docs/src/main/asciidoc/images/dataflow-task-definitions-list.png) + +Figure 27. List of Task Definitions + +#### 38.2.1. Create a Task Definition + +The following image shows a task definition composed of the timestamp application as well as the list of task applications that can be used to create a task definiton: + +![List of Task Applications](https://raw.githubusercontent.com/spring-cloud/spring-cloud-dataflow/master/spring-cloud-dataflow-docs/src/main/asciidoc/images/dataflow-task-definition-create.png) + +On this page, you can also specify various properties that are used during the deployment of the application. +Once you are satisfied with the task definition, you can click the **CREATE TASK** button. A dialog box then asks for a task definition name and description. At a minimum, you must provide a name for the new definition. + +#### 38.2.2. Creating Composed Task Definitions + +The dashboard includes the **Create Composed Task** tab, which provides an interactive graphical interface for creating composed tasks. + +In this tab, you can: + +* Create and visualize composed tasks by using DSL, a graphical canvas, or both. + +* Use auto-adjustment and grid-layout capabilities in the GUI for simpler and interactive organization of the composed task. + +On the **Create Composed Task** screen, you can define one or more task parameters by entering both the parameter key and the parameter value. + +| |Task parameters are not typed.| +|---|------------------------------| + +The following image shows the composed task designer: + +![Composed Task Designer](https://raw.githubusercontent.com/spring-cloud/spring-cloud-dataflow/master/spring-cloud-dataflow-docs/src/main/asciidoc/images/dataflow-ctr-flo-tab.png) + +Figure 28. Composed Task Designer + +#### 38.2.3. Launching Tasks + +Once the task definition has been created, you can launch the tasks through the dashboard. +To do so, click the **Tasks** tab and select the task you want to launch by pressing `Launch`. +The following image shows the Task Launch page: + +![Task Launch](https://raw.githubusercontent.com/spring-cloud/spring-cloud-dataflow/master/spring-cloud-dataflow-docs/src/main/asciidoc/images/dataflow-task-launch.png) + +Figure 29. Task Launch Page + +#### 38.2.4. Import/Export Tasks + +The **Import/Export** page provides the option to import and export tasks. This is done by clicking the **Import/Export** option on the left side of page. From here, click the **Export task(s): Create a JSON file with the selected tasks** option. The `Export Tasks(s)` page appears. + +The following image shows the tasks export page: + +![Tasks Utils Export](https://raw.githubusercontent.com/spring-cloud/spring-cloud-dataflow/master/spring-cloud-dataflow-docs/src/main/asciidoc/images/dataflow-tasks-utils-export.png) + +Figure 30. Tasks Utils Export page + +Similarly, you can import task definitions. To do so, click the **Import/Export** option on the left side of page. From here, click the **Import task(s): Import tasks from a JSON file** option to show the **Import Tasks** page. On the **Import Tasks** page, you have to import from a valid JSON file. You can either manually draft the file or export the file from the **Tasks Export** page. + +![Tasks Utils Import](https://raw.githubusercontent.com/spring-cloud/spring-cloud-dataflow/master/spring-cloud-dataflow-docs/src/main/asciidoc/images/dataflow-tasks-utils-import.png) + +Figure 31. Tasks Utils Import page + +After importing the file, you get confirmation on whether the operation completed successfully. + +![Tasks Utils Import Result](https://raw.githubusercontent.com/spring-cloud/spring-cloud-dataflow/master/spring-cloud-dataflow-docs/src/main/asciidoc/images/dataflow-tasks-utils-import-result.png) + +Figure 32. Tasks Utils Import Result page + +### 38.3. Executions + +The **Task Executions** tab shows the current running and completed task executions. From this page, you can drill down into the **Task Execution** details page. Furthermore, you can relaunch a **Task Execution** or stop a running execution. + +Finally, you can clean up one or more task executions. This operation removes any associated task or batch job from the underlying persistence store. This operation can only be triggered for *parent* task executions and cascades down to the child task executions (if there are any). + +The following image shows the **Executions** tab: + +![List of Task Executions](https://raw.githubusercontent.com/spring-cloud/spring-cloud-dataflow/master/spring-cloud-dataflow-docs/src/main/asciidoc/images/dataflow-task-executions-list.png) + +Figure 33. List of Task Executions + +### 38.4. Execution Detail + +For each task execution on the **Task Executions** tab, you can retrieve detailed information about a specific execution by clicking the **Execution ID** of the task execution. + +![List of Task Executions](https://raw.githubusercontent.com/spring-cloud/spring-cloud-dataflow/master/spring-cloud-dataflow-docs/src/main/asciidoc/images/dataflow-task-execution-detail.png) + +On this screen, you can view not only the information from the task executions page but also: + +* Task Arguments + +* External Execution ID + +* Batch Job Indicator (indicates if the task execution contained Spring Batch jobs.) + +* Job Execution IDs links (Clicking the Job Execution Id will take you to the [Job Execution Details](#dashboard-job-executions-details) for that Job Execution ID.) + +* Task Execution Duration + +* Task Execution Exit Message + +* Logging output from the Task Execution + +Additionally, you can trigger the following operations: + +* Relaunch a task + +* Stop a running task + +* Task execution cleanup (for parent task executions only) + +#### 38.4.1. Stop Executing Tasks + +To submit a stop task execution request to the platform, click the drop down button next to the task execution that needs to be stopped. +Now click the **Stop task** option. The dashboard presents a dialog box asking if you are sure that you want to stop the task execution. If so, click `Stop Task Execution(s)`. + +![Stop Executing Tasks](https://raw.githubusercontent.com/spring-cloud/spring-cloud-dataflow/master/spring-cloud-dataflow-docs/src/main/asciidoc/images/dataflow-task-execution-stop.png) + +| |Child Spring Cloud Task applications launched via Spring Batch applications that use remote partitioning are not stopped.| +|---|-------------------------------------------------------------------------------------------------------------------------| + +## 39. Jobs + +The **Job Executions** tab of the Dashboard lets you inspect batch jobs. +The main section of the screen provides a list of job executions. +Batch jobs are tasks that each execute one or more batch jobs. +Each job execution has a reference to the task execution ID (in the Task ID column). + +The list of job executions also shows the state of the underlying Job Definition. +Thus, if the underlying definition has been deleted, “No definition found” appears in the **Status** column. + +You can take the following actions for each job: + +* Restart (for failed jobs). + +* Stop (for running jobs). + +* View execution details. + +| |Clicking the stop button actually sends a stop request to the running job, which may not immediately stop.| +|---|----------------------------------------------------------------------------------------------------------| + +The following image shows the **Jobs** tab: + +![List of Job Executions](https://raw.githubusercontent.com/spring-cloud/spring-cloud-dataflow/master/spring-cloud-dataflow-docs/src/main/asciidoc/images/dataflow-job-executions-list.png) + +Figure 34. List of Job Executions + +### 39.1. Job Execution Details + +After you have launched a batch job, the Job Execution Details page shows information about the job. + +The following image shows the Job Execution Details page: + +![Job Execution Details](https://raw.githubusercontent.com/spring-cloud/spring-cloud-dataflow/master/spring-cloud-dataflow-docs/src/main/asciidoc/images/dataflow-jobs-job-execution-details.png) + +Figure 35. Job Execution Details + +The Job Execution Details page contains a list of the executed steps. +You can further drill into the details of each step’s execution by clicking the magnifying glass icon. + +### 39.2. Step Execution Details + +The Step Execution Details page provides information about an individual step within a job. + +The following image shows the Step Execution Details page: + +![Step Execution History](https://raw.githubusercontent.com/spring-cloud/spring-cloud-dataflow/master/spring-cloud-dataflow-docs/src/main/asciidoc/images/dataflow-step-execution-history.png) + +Figure 36. Step Execution Details + +The Step Execution Details screen provides a complete list of all Step Execution Context key-value pairs. + +| |For exceptions, the **Exit Description** field contains additional error information.
However, this field can have a maximum of 2500 characters.
Therefore, in the case of long exception stack traces, trimming of error messages may occur.
When that happens, check the server log files for further details.| +|---|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +### 39.3. Step Execution History + +Under **Step Execution History**, you can also view various metrics associated with the selected step, such as duration, read counts, write counts, and others. + +## 40. Scheduling + +You can create schedules from the SCDF Dashboard for the Task Definitions. See the [Scheduling Batch Jobs](https://dataflow.spring.io/docs/feature-guides/batch/scheduling/) section of the microsite for more information. + +## 41. Auditing + +The Auditing page of the Dashboard gives you access to recorded audit events. Audit events +are recorded for: + +* Streams + + * Create + + * Delete + + * Deploy + + * Undeploy + +* Tasks + + * Create + + * Delete + + * Launch + +* Scheduling of Tasks + + * Create Schedule + + * Delete Schedule + +The following image shows the Audit Records page: + +![List of available audit records](https://raw.githubusercontent.com/spring-cloud/spring-cloud-dataflow/master/spring-cloud-dataflow-docs/src/main/asciidoc/images/dataflow-audit-records-list.png) + +Figure 37. List Overview of Audit Records + +By clicking the *show details* icon (the “i” in a circle on the right), you can obtain further details regarding +the auditing details: + +![Details of a single audit record](https://raw.githubusercontent.com/spring-cloud/spring-cloud-dataflow/master/spring-cloud-dataflow-docs/src/main/asciidoc/images/dataflow-audit-records-details.png) + +Figure 38. List Details of an Audit Record + +Generally, auditing provides the following information: + +* When was the record created? + +* The name of the user who triggered the audit event (if security is enabled) + +* Audit operation (Schedule, Stream, or Task) + +* The performed action (Create, Delete, Deploy, Rollback, Undeploy, or Update) + +* Correlation ID, such as the Stream or Task name + +* Audit Data + +The written value of the *audit data* property depends on the performed *audit operation* and the *action type*. +For example, when a schedule is being created, the name of the task definition, +task definition properties, deployment properties, and command line arguments are written +to the persistence store. + +Sensitive information is sanitized prior to saving the Audit Record, in a best-effort manner. +Any of the following keys are being detected and their sensitive values are +masked: + +* password + +* secret + +* key + +* token + +* .\*credentials.\* + +* vcap\_services + +# Samples + +This section shows the available samples. + +## 42. Links + +Several samples have been created to help you get started on implementing higher-level use cases than the basic Streams and Tasks shown in the reference guide. +The samples are part of a separate [repository](https://github.com/spring-cloud/spring-cloud-dataflow-samples) and have their own [reference documentation](https://docs.spring.io/spring-cloud-dataflow-samples/docs/current/reference/htmlsingle/). + +The following samples are available: + +General + +* [Java DSL](https://docs.spring.io/spring-cloud-dataflow-samples/docs/current/reference/htmlsingle/#_java_dsl) + +* [HTTP to Cassandra](https://docs.spring.io/spring-cloud-dataflow-samples/docs/current/reference/htmlsingle/#spring-cloud-data-flow-samples-http-cassandra-overview) + +* [HTTP to MySQL](https://docs.spring.io/spring-cloud-dataflow-samples/docs/current/reference/htmlsingle/#_http_to_mysql_demo) + +* [HTTP to Gemfire](https://docs.spring.io/spring-cloud-dataflow-samples/docs/current/reference/htmlsingle/#_http_to_gemfire_demo) + +* [Gemfire CQ to Log Demo](https://docs.spring.io/spring-cloud-dataflow-samples/docs/current/reference/htmlsingle/#_gemfire_cq_to_log_demo) + +* [Gemfire to Log Demo](https://docs.spring.io/spring-cloud-dataflow-samples/docs/current/reference/htmlsingle/#_gemfire_to_log_demo) + +* [Custom Processor](https://docs.spring.io/spring-cloud-dataflow-samples/docs/current/reference/htmlsingle/#_custom_spring_cloud_stream_processor) + +Task and Batch + +* [Batch Job on Cloud Foundry](https://docs.spring.io/spring-cloud-dataflow-samples/docs/current/reference/htmlsingle/#_batch_job_on_cloud_foundry) + +* [Batch File Ingest](https://docs.spring.io/spring-cloud-dataflow-samples/docs/current/reference/htmlsingle/#_batch_file_ingest) + +Data Science + +* [Species Prediction](https://docs.spring.io/spring-cloud-dataflow-samples/docs/current/reference/htmlsingle/#_species_prediction) + +Functions + +* [Using Spring Cloud Function](https://docs.spring.io/spring-cloud-dataflow-samples/docs/current/reference/htmlsingle/#_functions_in_spring_cloud_data_flow) + +# REST API Guide + +This section describes the Spring Cloud Data Flow REST API. + +## 43. Overview + +Spring Cloud Data Flow provides a REST API that lets you access all aspects of the server. +In fact, the Spring Cloud Data Flow shell is a first-class consumer of that API. + +| |If you plan to use the REST API with Java, you should consider using the
provided Java client (`DataflowTemplate`) that uses the REST API internally.| +|---|---------------------------------------------------------------------------------------------------------------------------------------------------------| + +### 43.1. HTTP verbs + +Spring Cloud Data Flow tries to adhere as closely as possible to standard HTTP and REST conventions in its use of HTTP verbs, as described in the following table: + +| Verb | Usage | +|--------|------------------------------------------------------------------------------------------------------------------------------------------------| +| `GET` | Used to retrieve a resource. | +| `POST` | Used to create a new resource. | +| `PUT` |Used to update an existing resource, including partial updates. Also used for
resources that imply the concept of `restarts`, such as tasks.| +|`DELETE`| Used to delete an existing resource. | + +### 43.2. HTTP Status Codes + +Spring Cloud Data Flow tries to adhere as closely as possible to standard HTTP and REST conventions in its use of HTTP status codes, as shown in the following table: + +| Status code | Usage | +|--------------------------|--------------------------------------------------------------------------------------------------------------------| +| `200 OK` | The request completed successfully. | +| `201 Created` |A new resource has been created successfully. The resource’s URI is available from the response’s `Location` header.| +| `204 No Content` | An update to an existing resource has been applied successfully. | +| `400 Bad Request` | The request was malformed. The response body includes an error description that provides further information. | +| `404 Not Found` | The requested resource did not exist. | +| `409 Conflict` |The requested resource already exists. For example, the task already exists or the stream was already being deployed| +|`422 Unprocessable Entity`| Returned in cases where the job execution cannot be stopped or restarted. | + +### 43.3. Headers + +Every response has the following headers: + +| Name | Description | +|--------------|------------------------------------------------------------| +|`Content-Type`|The Content-Type of the payload, e.g. `application/hal+json`| + +### 43.4. Errors + +| Path | Type | Description | +|-----------|--------|------------------------------------------------------| +| `error` |`String`| The HTTP error that occurred, e.g. `Bad Request` | +| `message` |`String`| A description of the cause of the error | +| `path` |`String`| The path to which the request was made | +| `status` |`Number`| The HTTP status code, e.g. `400` | +|`timestamp`|`String`|The time, in milliseconds, at which the error occurred| + +### 43.5. Hypermedia + +Spring Cloud Data Flow uses hypermedia, and resources include links to other resources +in their responses. +Responses are in the [Hypertext Application from resource-to-resource Language (HAL)](http://stateless.co/hal_specification.html) format. +Links can be found beneath the `_links` key. +Users of the API should not create URIs themselves. +Instead, they should use the above-described links to navigate. + +## 44. Resources + +The API includes the following resources: + +* [Index](#api-guide-resources-index) + +* [Server Meta Information](#resources-about) + +* [Audit Records](#api-guide-resources-audit-records) + +* [Registered Applications](#resources-registered-applications) + +* [Stream Definitions](#api-guide-resources-stream-definitions) + +* [Stream Deployments](#api-guide-resources-stream-deployment) + +* [Stream Validation](#api-guide-resources-stream-validate) + +* [Task Definitions](#api-guide-resources-task-definitions) + +* [Task Executions](#api-guide-resources-task-executions) + +* [Task Scheduler](#api-guide-resources-task-scheduler) + +* [Task Validation](#api-guide-resources-task-validate) + +* [Job Executions](#api-guide-resources-job-executions) + +* [Job Instances](#api-guide-resources-job-instances) + +* [Job Step Executions](#api-guide-resources-job-step-executions) + +* [Runtime Information about Applications](#api-guide-resources-runtime-information-applications) + +* [Stream Logs](#api-guide-resources-stream-logs) + +* [Task Logs](#api-guide-resources-task-logs) + +### 44.1. Index + +The index provides the entry point into Spring Cloud Data Flow’s REST API. +The following topics provide more details: + +* [Accessing the index](#api-guide-resources-index-access) + +* [Request Structure](#api-guide-resources-index-request-structure) + +* [Example Request](#api-guide-resources-index-example-request) + +* [Response Structure](#api-guide-resources-index-response-structure) + +* [Example Response](#api-guide-resources-index-example-response) + +* [Links](#api-guide-resources-index-links) + +#### 44.1.1. Accessing the index + +Use a `GET` request to access the index. + +##### Request Structure + +``` +GET / HTTP/1.1 +Host: localhost:9393 +``` + +##### Example Request + +``` +$ curl 'http://localhost:9393/' -i -X GET +``` + +##### Response Structure + +| Path | Type | Description | +|-------------------------------------------------------------------------------------|---------|------------------------------------------------------------------------------------| +| `_links` |`Object` | Links to other resources | +| `['api.revision']` |`Number` | Incremented each time a change is implemented in this REST API | +| `_links.audit-records.href` |`String` | Link to the audit records | +| `_links.dashboard.href` |`String` | Link to the dashboard | +| `_links.streams/definitions.href` |`String` | Link to the streams/definitions | +| `_links.streams/definitions/definition.href` |`String` | Link to the streams/definitions/definition | +| `_links.streams/definitions/definition.templated` |`Boolean`| Link streams/definitions/definition is templated | +| `_links.runtime/apps.href` |`String` | Link to the runtime/apps | +| `_links.runtime/apps/{appId}.href` |`String` | Link to the runtime/apps/{appId} | +| `_links.runtime/apps/{appId}.templated` |`Boolean`| Link runtime/apps is templated | +| `_links.runtime/apps/{appId}/instances.href` |`String` | Link to the runtime/apps/{appId}/instances | +| `_links.runtime/apps/{appId}/instances.templated` |`Boolean`| Link runtime/apps/{appId}/instances is templated | +| `_links.runtime/apps/{appId}/instances/{instanceId}.href` |`String` | Link to the runtime/apps/{appId}/instances/{instanceId} | +| `_links.runtime/apps/{appId}/instances/{instanceId}.templated` |`Boolean`| Link runtime/apps/{appId}/instances/{instanceId} is templated | +| `_links.runtime/streams.href` |`String` | Link to the runtime/streams | +| `_links.runtime/streams.templated` |`Boolean`| Link runtime/streams is templated | +| `_links.runtime/streams/{streamNames}.href` |`String` | Link to the runtime/streams/{streamNames} | +| `_links.runtime/streams/{streamNames}.templated` |`Boolean`| Link runtime/streams/{streamNames} is templated | +| `_links.streams/logs.href` |`String` | Link to the streams/logs | +| `_links.streams/logs/{streamName}.href` |`String` | Link to the streams/logs/{streamName} | +| `_links.streams/logs/{streamName}/{appName}.href` |`String` | Link to the streams/logs/{streamName}/{appName} | +| `_links.streams/logs/{streamName}.templated` |`Boolean`| Link streams/logs/{streamName} is templated | +| `_links.streams/logs/{streamName}/{appName}.templated` |`Boolean`| Link streams/logs/{streamName}/{appName} is templated | +| `_links.streams/deployments` |`Object` | Link to streams/deployments | +| `_links.streams/deployments.href` |`String` | Link to streams/deployments | +| `_links.streams/deployments/{name}` |`Object` | Link streams/deployments/{name} is templated | +| `_links.streams/deployments/{name}.href` |`String` | Link streams/deployments/{name} is templated | +| `_links.streams/deployments/{name}.templated` |`Boolean`| Link streams/deployments/{name} is templated | +| `_links.streams/deployments/{name}{?reuse-deployment-properties}.href` |`String` | Link streams/deployments/{name} is templated | +| `_links.streams/deployments/{name}{?reuse-deployment-properties}.templated` |`Boolean`| Link streams/deployments/{name} is templated | +| `_links.streams/deployments/deployment.href` |`String` | Link to the streams/deployments/deployment | +| `_links.streams/deployments/deployment.templated` |`Boolean`| Link streams/deployments/deployment is templated | +| `_links.streams/deployments/manifest/{name}/{version}.href` |`String` | Link to the streams/deployments/manifest/{name}/{version} | +| `_links.streams/deployments/manifest/{name}/{version}.templated` |`Boolean`| Link streams/deployments/manifest/{name}/{version} is templated | +| `_links.streams/deployments/history/{name}.href` |`String` | Link to the streams/deployments/history/{name} | +| `_links.streams/deployments/history/{name}.templated` |`Boolean`| Link streams/deployments/history is templated | +| `_links.streams/deployments/rollback/{name}/{version}.href` |`String` | Link to the streams/deployments/rollback/{name}/{version} | +| `_links.streams/deployments/rollback/{name}/{version}.templated` |`Boolean`| Link streams/deployments/rollback/{name}/{version} is templated | +| `_links.streams/deployments/update/{name}.href` |`String` | Link to the streams/deployments/update/{name} | +| `_links.streams/deployments/update/{name}.templated` |`Boolean`| Link streams/deployments/update/{name} is templated | +| `_links.streams/deployments/platform/list.href` |`String` | Link to the streams/deployments/platform/list | +| `_links.streams/deployments/scale/{streamName}/{appName}/instances/{count}.href` |`String` | Link to the streams/deployments/scale/{streamName}/{appName}/instances/{count} | +|`_links.streams/deployments/scale/{streamName}/{appName}/instances/{count}.templated`|`Boolean`|Link streams/deployments/scale/{streamName}/{appName}/instances/{count} is templated| +| `_links.streams/validation.href` |`String` | Link to the streams/validation | +| `_links.streams/validation.templated` |`Boolean`| Link streams/validation is templated | +| `_links.tasks/platforms.href` |`String` | Link to the tasks/platforms | +| `_links.tasks/definitions.href` |`String` | Link to the tasks/definitions | +| `_links.tasks/definitions/definition.href` |`String` | Link to the tasks/definitions/definition | +| `_links.tasks/definitions/definition.templated` |`Boolean`| Link tasks/definitions/definition is templated | +| `_links.tasks/executions.href` |`String` | Link to the tasks/executions | +| `_links.tasks/executions/name.href` |`String` | Link to the tasks/executions/name | +| `_links.tasks/executions/name.templated` |`Boolean`| Link tasks/executions/name is templated | +| `_links.tasks/executions/current.href` |`String` | Link to the tasks/executions/current | +| `_links.tasks/executions/execution.href` |`String` | Link to the tasks/executions/execution | +| `_links.tasks/executions/execution.templated` |`Boolean`| Link tasks/executions/execution is templated | +| `_links.tasks/info/executions.href` |`String` | Link to the tasks/info/executions | +| `_links.tasks/info/executions.templated` |`Boolean`| Link tasks/info is templated | +| `_links.tasks/logs.href` |`String` | Link to the tasks/logs | +| `_links.tasks/logs.templated` |`Boolean`| Link tasks/logs is templated | +| `_links.tasks/schedules.href` |`String` | Link to the tasks/executions/schedules | +| `_links.tasks/schedules/instances.href` |`String` | Link to the tasks/schedules/instances | +| `_links.tasks/schedules/instances.templated` |`Boolean`| Link tasks/schedules/instances is templated | +| `_links.tasks/validation.href` |`String` | Link to the tasks/validation | +| `_links.tasks/validation.templated` |`Boolean`| Link tasks/validation is templated | +| `_links.jobs/executions.href` |`String` | Link to the jobs/executions | +| `_links.jobs/thinexecutions.href` |`String` | Link to the jobs/thinexecutions | +| `_links.jobs/executions/name.href` |`String` | Link to the jobs/executions/name | +| `_links.jobs/executions/name.templated` |`Boolean`| Link jobs/executions/name is templated | +| `_links.jobs/executions/status.href` |`String` | Link to the jobs/executions/status | +| `_links.jobs/executions/status.templated` |`Boolean`| Link jobs/executions/status is templated | +| `_links.jobs/thinexecutions/name.href` |`String` | Link to the jobs/thinexecutions/name | +| `_links.jobs/thinexecutions/name.templated` |`Boolean`| Link jobs/executions/name is templated | +| `_links.jobs/thinexecutions/jobInstanceId.href` |`String` | Link to the jobs/thinexecutions/jobInstanceId | +| `_links.jobs/thinexecutions/jobInstanceId.templated` |`Boolean`| Link jobs/executions/jobInstanceId is templated | +| `_links.jobs/thinexecutions/taskExecutionId.href` |`String` | Link to the jobs/thinexecutions/taskExecutionId | +| `_links.jobs/thinexecutions/taskExecutionId.templated` |`Boolean`| Link jobs/executions/taskExecutionId is templated | +| `_links.jobs/executions/execution.href` |`String` | Link to the jobs/executions/execution | +| `_links.jobs/executions/execution.templated` |`Boolean`| Link jobs/executions/execution is templated | +| `_links.jobs/executions/execution/steps.href` |`String` | Link to the jobs/executions/execution/steps | +| `_links.jobs/executions/execution/steps.templated` |`Boolean`| Link jobs/executions/execution/steps is templated | +| `_links.jobs/executions/execution/steps/step.href` |`String` | Link to the jobs/executions/execution/steps/step | +| `_links.jobs/executions/execution/steps/step.templated` |`Boolean`| Link jobs/executions/execution/steps/step is templated | +| `_links.jobs/executions/execution/steps/step/progress.href` |`String` | Link to the jobs/executions/execution/steps/step/progress | +| `_links.jobs/executions/execution/steps/step/progress.templated` |`Boolean`| Link jobs/executions/execution/steps/step/progress is templated | +| `_links.jobs/instances/name.href` |`String` | Link to the jobs/instances/name | +| `_links.jobs/instances/name.templated` |`Boolean`| Link jobs/instances/name is templated | +| `_links.jobs/instances/instance.href` |`String` | Link to the jobs/instances/instance | +| `_links.jobs/instances/instance.templated` |`Boolean`| Link jobs/instances/instance is templated | +| `_links.tools/parseTaskTextToGraph.href` |`String` | Link to the tools/parseTaskTextToGraph | +| `_links.tools/convertTaskGraphToText.href` |`String` | Link to the tools/convertTaskGraphToText | +| `_links.apps.href` |`String` | Link to the apps | +| `_links.about.href` |`String` | Link to the about | +| `_links.completions/stream.href` |`String` | Link to the completions/stream | +| `_links.completions/stream.templated` |`Boolean`| Link completions/stream is templated | +| `_links.completions/task.href` |`String` | Link to the completions/task | +| `_links.completions/task.templated` |`Boolean`| Link completions/task is templated | + +##### Example Response + +``` +HTTP/1.1 200 OK +Content-Type: application/hal+json +Content-Length: 7064 + +{ + "_links" : { + "dashboard" : { + "href" : "http://localhost:9393/dashboard" + }, + "audit-records" : { + "href" : "http://localhost:9393/audit-records" + }, + "streams/definitions" : { + "href" : "http://localhost:9393/streams/definitions" + }, + "streams/definitions/definition" : { + "href" : "http://localhost:9393/streams/definitions/{name}", + "templated" : true + }, + "streams/validation" : { + "href" : "http://localhost:9393/streams/validation/{name}", + "templated" : true + }, + "runtime/streams" : { + "href" : "http://localhost:9393/runtime/streams{?names}", + "templated" : true + }, + "runtime/streams/{streamNames}" : { + "href" : "http://localhost:9393/runtime/streams/{streamNames}", + "templated" : true + }, + "runtime/apps" : { + "href" : "http://localhost:9393/runtime/apps" + }, + "runtime/apps/{appId}" : { + "href" : "http://localhost:9393/runtime/apps/{appId}", + "templated" : true + }, + "runtime/apps/{appId}/instances" : { + "href" : "http://localhost:9393/runtime/apps/{appId}/instances", + "templated" : true + }, + "runtime/apps/{appId}/instances/{instanceId}" : { + "href" : "http://localhost:9393/runtime/apps/{appId}/instances/{instanceId}", + "templated" : true + }, + "streams/deployments" : { + "href" : "http://localhost:9393/streams/deployments" + }, + "streams/deployments/{name}{?reuse-deployment-properties}" : { + "href" : "http://localhost:9393/streams/deployments/{name}?reuse-deployment-properties=false", + "templated" : true + }, + "streams/deployments/{name}" : { + "href" : "http://localhost:9393/streams/deployments/{name}", + "templated" : true + }, + "streams/deployments/history/{name}" : { + "href" : "http://localhost:9393/streams/deployments/history/{name}", + "templated" : true + }, + "streams/deployments/manifest/{name}/{version}" : { + "href" : "http://localhost:9393/streams/deployments/manifest/{name}/{version}", + "templated" : true + }, + "streams/deployments/platform/list" : { + "href" : "http://localhost:9393/streams/deployments/platform/list" + }, + "streams/deployments/rollback/{name}/{version}" : { + "href" : "http://localhost:9393/streams/deployments/rollback/{name}/{version}", + "templated" : true + }, + "streams/deployments/update/{name}" : { + "href" : "http://localhost:9393/streams/deployments/update/{name}", + "templated" : true + }, + "streams/deployments/deployment" : { + "href" : "http://localhost:9393/streams/deployments/{name}", + "templated" : true + }, + "streams/deployments/scale/{streamName}/{appName}/instances/{count}" : { + "href" : "http://localhost:9393/streams/deployments/scale/{streamName}/{appName}/instances/{count}", + "templated" : true + }, + "streams/logs" : { + "href" : "http://localhost:9393/streams/logs" + }, + "streams/logs/{streamName}" : { + "href" : "http://localhost:9393/streams/logs/{streamName}", + "templated" : true + }, + "streams/logs/{streamName}/{appName}" : { + "href" : "http://localhost:9393/streams/logs/{streamName}/{appName}", + "templated" : true + }, + "tasks/platforms" : { + "href" : "http://localhost:9393/tasks/platforms" + }, + "tasks/definitions" : { + "href" : "http://localhost:9393/tasks/definitions" + }, + "tasks/definitions/definition" : { + "href" : "http://localhost:9393/tasks/definitions/{name}", + "templated" : true + }, + "tasks/executions" : { + "href" : "http://localhost:9393/tasks/executions" + }, + "tasks/executions/name" : { + "href" : "http://localhost:9393/tasks/executions{?name}", + "templated" : true + }, + "tasks/executions/current" : { + "href" : "http://localhost:9393/tasks/executions/current" + }, + "tasks/executions/execution" : { + "href" : "http://localhost:9393/tasks/executions/{id}", + "templated" : true + }, + "tasks/validation" : { + "href" : "http://localhost:9393/tasks/validation/{name}", + "templated" : true + }, + "tasks/info/executions" : { + "href" : "http://localhost:9393/tasks/info/executions{?completed,name}", + "templated" : true + }, + "tasks/logs" : { + "href" : "http://localhost:9393/tasks/logs/{taskExternalExecutionId}{?platformName}", + "templated" : true + }, + "tasks/schedules" : { + "href" : "http://localhost:9393/tasks/schedules" + }, + "tasks/schedules/instances" : { + "href" : "http://localhost:9393/tasks/schedules/instances/{taskDefinitionName}", + "templated" : true + }, + "jobs/executions" : { + "href" : "http://localhost:9393/jobs/executions" + }, + "jobs/executions/name" : { + "href" : "http://localhost:9393/jobs/executions{?name}", + "templated" : true + }, + "jobs/executions/status" : { + "href" : "http://localhost:9393/jobs/executions{?status}", + "templated" : true + }, + "jobs/executions/execution" : { + "href" : "http://localhost:9393/jobs/executions/{id}", + "templated" : true + }, + "jobs/executions/execution/steps" : { + "href" : "http://localhost:9393/jobs/executions/{jobExecutionId}/steps", + "templated" : true + }, + "jobs/executions/execution/steps/step" : { + "href" : "http://localhost:9393/jobs/executions/{jobExecutionId}/steps/{stepId}", + "templated" : true + }, + "jobs/executions/execution/steps/step/progress" : { + "href" : "http://localhost:9393/jobs/executions/{jobExecutionId}/steps/{stepId}/progress", + "templated" : true + }, + "jobs/instances/name" : { + "href" : "http://localhost:9393/jobs/instances{?name}", + "templated" : true + }, + "jobs/instances/instance" : { + "href" : "http://localhost:9393/jobs/instances/{id}", + "templated" : true + }, + "tools/parseTaskTextToGraph" : { + "href" : "http://localhost:9393/tools" + }, + "tools/convertTaskGraphToText" : { + "href" : "http://localhost:9393/tools" + }, + "jobs/thinexecutions" : { + "href" : "http://localhost:9393/jobs/thinexecutions" + }, + "jobs/thinexecutions/name" : { + "href" : "http://localhost:9393/jobs/thinexecutions{?name}", + "templated" : true + }, + "jobs/thinexecutions/jobInstanceId" : { + "href" : "http://localhost:9393/jobs/thinexecutions{?jobInstanceId}", + "templated" : true + }, + "jobs/thinexecutions/taskExecutionId" : { + "href" : "http://localhost:9393/jobs/thinexecutions{?taskExecutionId}", + "templated" : true + }, + "apps" : { + "href" : "http://localhost:9393/apps" + }, + "about" : { + "href" : "http://localhost:9393/about" + }, + "completions/stream" : { + "href" : "http://localhost:9393/completions/stream{?start,detailLevel}", + "templated" : true + }, + "completions/task" : { + "href" : "http://localhost:9393/completions/task{?start,detailLevel}", + "templated" : true + } + }, + "api.revision" : 14 +} +``` + +##### Links + +The main element of the index are the links, as they let you traverse the API +and execute the desired functionality: + +| Relation | Description | +|--------------------------------------------------------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| `about` | Access meta information, including enabled features, security info, version information | +| `dashboard` | Access the dashboard UI | +| `audit-records` | Provides audit trail information | +| `apps` | Handle registered applications | +| `completions/stream` | Exposes the DSL completion features for Stream | +| `completions/task` | Exposes the DSL completion features for Task | +| `jobs/executions` | Provides the JobExecution resource | +| `jobs/thinexecutions` | Provides the JobExecution thin resource with no step executions included | +| `jobs/executions/execution` | Provides details for a specific JobExecution | +| `jobs/executions/execution/steps` | Provides the steps for a JobExecution | +| `jobs/executions/execution/steps/step` | Returns the details for a specific step | +| `jobs/executions/execution/steps/step/progress` | Provides progress information for a specific step | +| `jobs/executions/name` | Retrieve Job Executions by Job name | +| `jobs/executions/status` | Retrieve Job Executions by Job status | +| `jobs/thinexecutions/name` | Retrieve Job Executions by Job name with no step executions included | +| `jobs/thinexecutions/jobInstanceId` | Retrieve Job Executions by Job Instance Id with no step executions included | +| `jobs/thinexecutions/taskExecutionId` | Retrieve Job Executions by Task Execution Id with no step executions included | +| `jobs/instances/instance` | Provides the job instance resource for a specific job instance | +| `jobs/instances/name` | Provides the Job instance resource for a specific job name | +| `runtime/streams` | Exposes stream runtime status | +| `runtime/streams/{streamNames}` | Exposes streams runtime status for a given stream names | +| `runtime/apps` | Provides the runtime application resource | +| `runtime/apps/{appId}` | Exposes the runtime status for a specific app | +| `runtime/apps/{appId}/instances` | Provides the status for app instances | +| `runtime/apps/{appId}/instances/{instanceId}` | Provides the status for specific app instance | +| `tasks/definitions` | Provides the task definition resource | +| `tasks/definitions/definition` | Provides details for a specific task definition | +| `tasks/validation` | Provides the validation for a task definition | +| `tasks/executions` | Returns Task executions and allows launching of tasks | +| `tasks/executions/current` | Provides the current count of running tasks | +| `tasks/info/executions` | Provides the task executions info | +| `tasks/schedules` | Provides schedule information of tasks | +| `tasks/schedules/instances` | Provides schedule information of a specific task | +| `tasks/executions/name` | Returns all task executions for a given Task name | +| `tasks/executions/execution` | Provides details for a specific task execution | +| `tasks/platforms` |Provides platform accounts for launching tasks. The results can be filtered to show the platforms that support scheduling by adding a request parameter of 'schedulesEnabled=true| +| `tasks/logs` | Retrieve the task application log | +| `streams/definitions` | Exposes the Streams resource | +| `streams/definitions/definition` | Handle a specific Stream definition | +| `streams/validation` | Provides the validation for a stream definition | +| `streams/deployments` | Provides Stream deployment operations | +| `streams/deployments/{name}` | Request deployment info for a stream definition | +| `streams/deployments/{name}{?reuse-deployment-properties}` | Request deployment info for a stream definition | +| `streams/deployments/deployment` | Request (un-)deployment of an existing stream definition | +| `streams/deployments/manifest/{name}/{version}` | Return a manifest info of a release version | +| `streams/deployments/history/{name}` | Get stream’s deployment history as list or Releases for this release | +| `streams/deployments/rollback/{name}/{version}` | Rollback the stream to the previous or a specific version of the stream | +| `streams/deployments/update/{name}` | Update the stream. | +| `streams/deployments/platform/list` | List of supported deployment platforms | +|`streams/deployments/scale/{streamName}/{appName}/instances/{count}`| Scale up or down number of application instances for a selected stream | +| `streams/logs` | Retrieve application logs of the stream | +| `streams/logs/{streamName}` | Retrieve application logs of the stream | +| `streams/logs/{streamName}/{appName}` | Retrieve a specific application log of the stream | +| `tools/parseTaskTextToGraph` | Parse a task definition into a graph structure | +| `tools/convertTaskGraphToText` | Convert a graph format into DSL text format | + +### 44.2. Server Meta Information + +The server meta information endpoint provides more information about the server itself. +The following topics provide more details: + +* [Retrieving information about the server](#api-guide-resources-server-meta-retrieving) + +* [Request Structure](#api-guide-resources-server-meta-request-structure) + +* [Example Request](#api-guide-resources-server-meta-example-request) + +* [Response Structure](#api-guide-resources-server-meta-response-structure) + +#### 44.2.1. Retrieving information about the server + +A `GET` request returns meta information for Spring Cloud Data Flow, including: + +* Runtime environment information + +* Information regarding which features are enabled + +* Dependency information of Spring Cloud Data Flow Server + +* Security information + +##### Request Structure + +``` +GET /about HTTP/1.1 +Accept: application/json +Host: localhost:9393 +``` + +##### Example Request + +``` +$ curl 'http://localhost:9393/about' -i -X GET \ + -H 'Accept: application/json' +``` + +##### Response Structure + +``` +HTTP/1.1 200 OK +Content-Type: application/json +Content-Length: 2598 + +{ + "featureInfo" : { + "analyticsEnabled" : true, + "streamsEnabled" : true, + "tasksEnabled" : true, + "schedulesEnabled" : true, + "monitoringDashboardType" : "NONE" + }, + "versionInfo" : { + "implementation" : { + "name" : "${info.app.name}", + "version" : "${info.app.version}" + }, + "core" : { + "name" : "Spring Cloud Data Flow Core", + "version" : "2.9.2" + }, + "dashboard" : { + "name" : "Spring Cloud Dataflow UI", + "version" : "3.2.2" + }, + "shell" : { + "name" : "Spring Cloud Data Flow Shell", + "version" : "2.9.2", + "url" : "https://repo1.maven.org/maven2/org/springframework/cloud/spring-cloud-dataflow-shell/2.9.2/spring-cloud-dataflow-shell-2.9.2.jar" + } + }, + "securityInfo" : { + "authenticationEnabled" : false, + "authenticated" : false, + "username" : null, + "roles" : [ ] + }, + "runtimeEnvironment" : { + "appDeployer" : { + "deployerImplementationVersion" : "Test Version", + "deployerName" : "Test Server", + "deployerSpiVersion" : "2.8.2", + "javaVersion" : "1.8.0_322", + "platformApiVersion" : "", + "platformClientVersion" : "", + "platformHostVersion" : "", + "platformSpecificInfo" : { + "default" : "local" + }, + "platformType" : "Skipper Managed", + "springBootVersion" : "2.5.8", + "springVersion" : "5.3.14" + }, + "taskLaunchers" : [ { + "deployerImplementationVersion" : "2.7.2", + "deployerName" : "LocalTaskLauncher", + "deployerSpiVersion" : "2.7.2", + "javaVersion" : "1.8.0_322", + "platformApiVersion" : "Linux 5.11.0-1025-azure", + "platformClientVersion" : "5.11.0-1025-azure", + "platformHostVersion" : "5.11.0-1025-azure", + "platformSpecificInfo" : { }, + "platformType" : "Local", + "springBootVersion" : "2.5.8", + "springVersion" : "5.3.14" + }, { + "deployerImplementationVersion" : "2.7.2", + "deployerName" : "LocalTaskLauncher", + "deployerSpiVersion" : "2.7.2", + "javaVersion" : "1.8.0_322", + "platformApiVersion" : "Linux 5.11.0-1025-azure", + "platformClientVersion" : "5.11.0-1025-azure", + "platformHostVersion" : "5.11.0-1025-azure", + "platformSpecificInfo" : { }, + "platformType" : "Local", + "springBootVersion" : "2.5.8", + "springVersion" : "5.3.14" + } ] + }, + "monitoringDashboardInfo" : { + "url" : "", + "refreshInterval" : 15, + "dashboardType" : "NONE", + "source" : "default-scdf-source" + }, + "_links" : { + "self" : { + "href" : "http://localhost:9393/about" + } + } +} +``` + +### 44.3. Registered Applications + +The registered applications endpoint provides information about the applications that are registered with the Spring Cloud Data Flow server. +The following topics provide more details: + +* [Listing Applications](#resources-app-registry-list) + +* [Getting Information on a Particular Application](#resources-app-registry-get) + +* [Registering a New Application](#resources-app-registry-post) + +* [Registering a New Application with version](#resources-app-registry-post-versioned) + +* [Registering Applications in Bulk](#resources-app-registry-bulk) + +* [Set the Default Application Version](#resources-app-registry-default) + +* [Unregistering an Application](#resources-app-registry-delete) + +* [Unregistering all Applications](#resources-app-registry-delete-all) + +#### 44.3.1. Listing Applications + +A `GET` request lists all of the applications known to Spring Cloud Data Flow. +The following topics provide more details: + +* [Request Structure](#api-guide-resources-app-registry-request-structure) + +* [Request Parameters](#api-guide-resources-app-registry-request-parameters) + +* [Example Request](#api-guide-resources-app-registry-example-request) + +* [Response Structure](#api-guide-resources-app-registry-response-structure) + +##### Request Structure + +``` +GET /apps?search=&type=source&defaultVersion=true&page=0&size=10&sort=name%2CASC HTTP/1.1 +Accept: application/json +Host: localhost:9393 +``` + +##### Request Parameters + +| Parameter | Description | +|----------------|----------------------------------------------------------------------------------------------| +| `search` | The search string performed on the name (optional) | +| `type` |Restrict the returned apps to the type of the app. One of [app, source, processor, sink, task]| +|`defaultVersion`| The boolean flag to set to retrieve only the apps of the default versions (optional) | +| `page` | The zero-based page number (optional) | +| `sort` | The sort on the list (optional) | +| `size` | The requested page size (optional) | + +##### Example Request + +``` +$ curl 'http://localhost:9393/apps?search=&type=source&defaultVersion=true&page=0&size=10&sort=name%2CASC' -i -X GET \ + -H 'Accept: application/json' +``` + +##### Response Structure + +``` +HTTP/1.1 200 OK +Content-Type: application/json +Content-Length: 1097 + +{ + "_embedded" : { + "appRegistrationResourceList" : [ { + "name" : "http", + "type" : "source", + "uri" : "maven://org.springframework.cloud.stream.app:http-source-rabbit:1.2.0.RELEASE", + "version" : "1.2.0.RELEASE", + "defaultVersion" : true, + "versions" : [ "1.2.0.RELEASE" ], + "label" : null, + "_links" : { + "self" : { + "href" : "http://localhost:9393/apps/source/http/1.2.0.RELEASE" + } + } + }, { + "name" : "time", + "type" : "source", + "uri" : "maven://org.springframework.cloud.stream.app:time-source-rabbit:1.2.0.RELEASE", + "version" : "1.2.0.RELEASE", + "defaultVersion" : true, + "versions" : [ "1.2.0.RELEASE" ], + "label" : null, + "_links" : { + "self" : { + "href" : "http://localhost:9393/apps/source/time/1.2.0.RELEASE" + } + } + } ] + }, + "_links" : { + "self" : { + "href" : "http://localhost:9393/apps?page=0&size=10&sort=name,asc" + } + }, + "page" : { + "size" : 10, + "totalElements" : 2, + "totalPages" : 1, + "number" : 0 + } +} +``` + +#### 44.3.2. Getting Information on a Particular Application + +A `GET` request on `/apps//` gets info on a particular application. +The following topics provide more details: + +* [Request Structure](#api-guide-resources-app-registry-get-request-structure) + +* [Path Parameters](#api-guide-resources-app-registry-get-path-parameters) + +* [Example Request](#api-guide-resources-app-registry-get-example-request) + +* [Response Structure](#api-guide-resources-app-registry-get-response-structure) + +##### Request Structure + +``` +GET /apps/source/http?exhaustive=false HTTP/1.1 +Accept: application/json +Host: localhost:9393 +``` + +##### Request Parameters + +| Parameter | Description | +|------------|--------------------------------------------------------------------------| +|`exhaustive`|Return all application properties, including common Spring Boot properties| + +##### Path Parameters + +/apps/{type}/{name} + +|Parameter| Description | +|---------|-----------------------------------------------------------------------------| +| `type` |The type of application to query. One of [app, source, processor, sink, task]| +| `name` | The name of the application to query | + +##### Example Request + +``` +$ curl 'http://localhost:9393/apps/source/http?exhaustive=false' -i -X GET \ + -H 'Accept: application/json' +``` + +##### Response Structure + +``` +HTTP/1.1 200 OK +Content-Type: application/json +Content-Length: 2100 + +{ + "name" : "http", + "type" : "source", + "uri" : "maven://org.springframework.cloud.stream.app:http-source-rabbit:1.2.0.RELEASE", + "version" : "1.2.0.RELEASE", + "defaultVersion" : true, + "versions" : null, + "label" : null, + "options" : [ { + "id" : "http.path-pattern", + "name" : "path-pattern", + "type" : "java.lang.String", + "description" : "An Ant-Style pattern to determine which http requests will be captured.", + "shortDescription" : "An Ant-Style pattern to determine which http requests will be captured.", + "defaultValue" : "/", + "hints" : { + "keyHints" : [ ], + "keyProviders" : [ ], + "valueHints" : [ ], + "valueProviders" : [ ] + }, + "deprecation" : null, + "deprecated" : false + }, { + "id" : "http.mapped-request-headers", + "name" : "mapped-request-headers", + "type" : "java.lang.String[]", + "description" : "Headers that will be mapped.", + "shortDescription" : "Headers that will be mapped.", + "defaultValue" : null, + "hints" : { + "keyHints" : [ ], + "keyProviders" : [ ], + "valueHints" : [ ], + "valueProviders" : [ ] + }, + "deprecation" : null, + "deprecated" : false + }, { + "id" : "http.secured", + "name" : "secured", + "type" : "java.lang.Boolean", + "description" : "Secure or not HTTP source path.", + "shortDescription" : "Secure or not HTTP source path.", + "defaultValue" : false, + "hints" : { + "keyHints" : [ ], + "keyProviders" : [ ], + "valueHints" : [ ], + "valueProviders" : [ ] + }, + "deprecation" : null, + "deprecated" : false + }, { + "id" : "server.port", + "name" : "port", + "type" : "java.lang.Integer", + "description" : "Server HTTP port.", + "shortDescription" : "Server HTTP port.", + "defaultValue" : null, + "hints" : { + "keyHints" : [ ], + "keyProviders" : [ ], + "valueHints" : [ ], + "valueProviders" : [ ] + }, + "deprecation" : null, + "deprecated" : false + } ], + "shortDescription" : null, + "inboundPortNames" : [ ], + "outboundPortNames" : [ ], + "optionGroups" : { } +} +``` + +#### 44.3.3. Registering a New Application + +A `POST` request on `/apps//` allows registration of a new application. +The following topics provide more details: + +* [Request Structure](#api-guide-resources-app-registry-post-request-structure) + +* [Request Parameters](#api-guide-resources-app-registry-post-request-parameters) + +* [Path Parameters](#api-guide-resources-app-registry-post-path-parameters) + +* [Example Request](#api-guide-resources-app-registry-post-example-request) + +* [Response Structure](#api-guide-resources-app-registry-post-response-structure) + +##### Request Structure + +``` +POST /apps/source/http HTTP/1.1 +Host: localhost:9393 +Content-Type: application/x-www-form-urlencoded + +uri=maven%3A%2F%2Forg.springframework.cloud.stream.app%3Ahttp-source-rabbit%3A1.1.0.RELEASE +``` + +##### Request Parameters + +| Parameter | Description | +|--------------|--------------------------------------------------------------------------------------------------------| +| `uri` | URI where the application bits reside | +|`metadata-uri`| URI where the application metadata jar can be found | +| `force` |Must be true if a registration with the same name and type already exists, otherwise an error will occur| + +##### Path Parameters + +/apps/{type}/{name} + +|Parameter| Description | +|---------|--------------------------------------------------------------------------------| +| `type` |The type of application to register. One of [app, source, processor, sink, task]| +| `name` | The name of the application to register | + +##### Example Request + +``` +$ curl 'http://localhost:9393/apps/source/http' -i -X POST \ + -d 'uri=maven%3A%2F%2Forg.springframework.cloud.stream.app%3Ahttp-source-rabbit%3A1.1.0.RELEASE' +``` + +##### Response Structure + +``` +HTTP/1.1 201 Created +``` + +#### 44.3.4. Registering a New Application with version + +A `POST` request on `/apps///` allows registration of a new application. +The following topics provide more details: + +* [Request Structure](#api-guide-resources-app-registry-post-versioned-request-structure) + +* [Request Parameters](#api-guide-resources-app-registry-post-versioned-request-parameters) + +* [Path Parameters](#api-guide-resources-app-registry-post-versioned-path-parameters) + +* [Example Request](#api-guide-resources-app-registry-post-versioned-example-request) + +* [Response Structure](#api-guide-resources-app-registry-post-versioned-response-structure) + +##### Request Structure + +``` +POST /apps/source/http/1.1.0.RELEASE HTTP/1.1 +Host: localhost:9393 +Content-Type: application/x-www-form-urlencoded + +uri=maven%3A%2F%2Forg.springframework.cloud.stream.app%3Ahttp-source-rabbit%3A1.1.0.RELEASE +``` + +##### Request Parameters + +| Parameter | Description | +|--------------|--------------------------------------------------------------------------------------------------------| +| `uri` | URI where the application bits reside | +|`metadata-uri`| URI where the application metadata jar can be found | +| `force` |Must be true if a registration with the same name and type already exists, otherwise an error will occur| + +##### Path Parameters + +/apps/{type}/{name}/{version:.+} + +|Parameter| Description | +|---------|-------------------------------------------------------------------------------------------| +| `type` |The type of application to register. One of [app, source, processor, sink, task] (optional)| +| `name` | The name of the application to register | +|`version`| The version of the application to register | + +##### Example Request + +``` +$ curl 'http://localhost:9393/apps/source/http/1.1.0.RELEASE' -i -X POST \ + -d 'uri=maven%3A%2F%2Forg.springframework.cloud.stream.app%3Ahttp-source-rabbit%3A1.1.0.RELEASE' +``` + +##### Response Structure + +``` +HTTP/1.1 201 Created +``` + +#### 44.3.5. Registering Applications in Bulk + +A `POST` request on `/apps` allows registering multiple applications at once. +The following topics provide more details: + +* [Request Structure](#api-guide-resources-app-registry-bulk-request-structure) + +* [Request Parameters](#api-guide-resources-app-registry-bulk-request-parameters) + +* [Example Request](#api-guide-resources-app-registry-bulk-example-request) + +* [Response Structure](#api-guide-resources-app-registry-bulk-response-structure) + +##### Request Structure + +``` +POST /apps HTTP/1.1 +Host: localhost:9393 +Content-Type: application/x-www-form-urlencoded + +apps=source.http%3Dmaven%3A%2F%2Forg.springframework.cloud.stream.app%3Ahttp-source-rabbit%3A1.1.0.RELEASE&force=false +``` + +##### Request Parameters + +|Parameter| Description | +|---------|--------------------------------------------------------------------------------------------------------| +| `uri` | URI where a properties file containing registrations can be fetched. Exclusive with `apps`. | +| `apps` | Inline set of registrations. Exclusive with `uri`. | +| `force` |Must be true if a registration with the same name and type already exists, otherwise an error will occur| + +##### Example Request + +``` +$ curl 'http://localhost:9393/apps' -i -X POST \ + -d 'apps=source.http%3Dmaven%3A%2F%2Forg.springframework.cloud.stream.app%3Ahttp-source-rabbit%3A1.1.0.RELEASE&force=false' +``` + +##### Response Structure + +``` +HTTP/1.1 201 Created +Content-Type: application/hal+json +Content-Length: 658 + +{ + "_embedded" : { + "appRegistrationResourceList" : [ { + "name" : "http", + "type" : "source", + "uri" : "maven://org.springframework.cloud.stream.app:http-source-rabbit:1.1.0.RELEASE", + "version" : "1.1.0.RELEASE", + "defaultVersion" : true, + "versions" : null, + "label" : null, + "_links" : { + "self" : { + "href" : "http://localhost:9393/apps/source/http/1.1.0.RELEASE" + } + } + } ] + }, + "_links" : { + "self" : { + "href" : "http://localhost:9393/apps?page=0&size=20" + } + }, + "page" : { + "size" : 20, + "totalElements" : 1, + "totalPages" : 1, + "number" : 0 + } +} +``` + +#### 44.3.6. Set the Default Application Version + +For an application with the same `name` and `type`, you can register multiple versions. +In this case, you can choose one of the versions as the default application. + +The following topics provide more details: + +* [Request Structure](#api-guide-resources-app-registry-default-request-structure) + +* [Path Parameters](#api-guide-resources-app-registry-default-path-parameters) + +* [Example Request](#api-guide-resources-app-registry-default-example-request) + +* [Response Structure](#api-guide-resources-app-registry-default-response-structure) + +##### Request Structure + +``` +PUT /apps/source/http/1.2.0.RELEASE HTTP/1.1 +Accept: application/json +Host: localhost:9393 +``` + +##### Path Parameters + +/apps/{type}/{name}/{version:.+} + +|Parameter| Description | +|---------|--------------------------------------------------------------------| +| `type` |The type of application. One of [app, source, processor, sink, task]| +| `name` | The name of the application | +|`version`| The version of the application | + +##### Example Request + +``` +$ curl 'http://localhost:9393/apps/source/http/1.2.0.RELEASE' -i -X PUT \ + -H 'Accept: application/json' +``` + +##### Response Structure + +``` +HTTP/1.1 202 Accepted +``` + +#### 44.3.7. Unregistering an Application + +A `DELETE` request on `/apps//` unregisters a previously registered application. +The following topics provide more details: + +* [Request Structure](#api-guide-resources-app-registry-delete-request-structure) + +* [Path Parameters](#api-guide-resources-app-registry-delete-path-parameters) + +* [Example Request](#api-guide-resources-app-registry-delete-example-request) + +* [Response Structure](#api-guide-resources-app-registry-delete-response-structure) + +##### Request Structure + +``` +DELETE /apps/source/http/1.2.0.RELEASE HTTP/1.1 +Host: localhost:9393 +``` + +##### Path Parameters + +/apps/{type}/{name}/{version} + +|Parameter| Description | +|---------|----------------------------------------------------------------------------------| +| `type` |The type of application to unregister. One of [app, source, processor, sink, task]| +| `name` | The name of the application to unregister | +|`version`| The version of the application to unregister (optional) | + +##### Example Request + +``` +$ curl 'http://localhost:9393/apps/source/http/1.2.0.RELEASE' -i -X DELETE +``` + +##### Response Structure + +``` +HTTP/1.1 200 OK +``` + +#### 44.3.8. Unregistering all Applications + +A `DELETE` request on `/apps` unregisters all the applications. +The following topics provide more details: + +* [Request Structure](#api-guide-resources-app-registry-delete-all-request-structure) + +* [Example Request](#api-guide-resources-app-registry-delete-all-example-request) + +* [Response Structure](#api-guide-resources-app-registry-delete-all-response-structure) + +##### Request Structure + +``` +DELETE /apps HTTP/1.1 +Host: localhost:9393 +``` + +##### Example Request + +``` +$ curl 'http://localhost:9393/apps' -i -X DELETE +``` + +##### Response Structure + +``` +HTTP/1.1 200 OK +``` + +### 44.4. Audit Records + +The audit records endpoint provides information about the audit records. The following topics provide more details: + +* [List All Audit Records](#api-guide-resources-audit-records-list) + +* [Retrieve Audit Record Detail](#api-guide-resources-audit-record-get) + +* [List all the Audit Action Types](#api-guide-resources-audit-action-types) + +* [List all the Audit Operation Types](#api-guide-resources-audit-operation-types) + +#### 44.4.1. List All Audit Records + +The audit records endpoint lets you retrieve audit trail information. + +The following topics provide more details: + +* [Request Structure](#api-guide-resources-audit-records-list-request-structure) + +* [Request Parameters](#api-guide-resources-audit-records-list-request-parameters) + +* [Example Request](#api-guide-resources-audit-records-list-example-request) + +* [Response Structure](#api-guide-resources-audit-records-list-response-structure) + +##### Request Structure + +``` +GET /audit-records?page=0&size=10&operations=STREAM&actions=CREATE&fromDate=2000-01-01T00%3A00%3A00&toDate=2099-01-01T00%3A00%3A00 HTTP/1.1 +Host: localhost:9393 +``` + +##### Request Parameters + +| Parameter | Description | +|------------|------------------------------------------------------| +| `page` | The zero-based page number (optional) | +| `size` | The requested page size (optional) | +|`operations`| Comma-separated list of Audit Operations (optional) | +| `actions` | Comma-separated list of Audit Actions (optional) | +| `fromDate` |From date filter (ex.: 2019-02-03T00:00:30) (optional)| +| `toDate` | To date filter (ex.: 2019-02-03T00:00:30) (optional) | + +##### Example Request + +``` +$ curl 'http://localhost:9393/audit-records?page=0&size=10&operations=STREAM&actions=CREATE&fromDate=2000-01-01T00%3A00%3A00&toDate=2099-01-01T00%3A00%3A00' -i -X GET +``` + +##### Response Structure + +``` +HTTP/1.1 200 OK +Content-Type: application/hal+json +Content-Length: 680 + +{ + "_embedded" : { + "auditRecordResourceList" : [ { + "auditRecordId" : 5, + "createdBy" : null, + "correlationId" : "timelog", + "auditData" : "time --format='YYYY MM DD' | log", + "createdOn" : "2022-01-18T18:52:24.663Z", + "auditAction" : "CREATE", + "auditOperation" : "STREAM", + "platformName" : null, + "_links" : { + "self" : { + "href" : "http://localhost:9393/audit-records/5" + } + } + } ] + }, + "_links" : { + "self" : { + "href" : "http://localhost:9393/audit-records?page=0&size=10" + } + }, + "page" : { + "size" : 10, + "totalElements" : 1, + "totalPages" : 1, + "number" : 0 + } +} +``` + +#### 44.4.2. Retrieve Audit Record Detail + +The audit record endpoint lets you get a single audit record. The following topics provide more details: + +* [Request Structure](#api-guide-resources-audit-record-get-request-structure) + +* [Path Parameters](#api-guide-resources-audit-record-get-path-parameters) + +* [Example Request](#api-guide-resources-audit-record-get-example-request) + +* [Response Structure](#api-guide-resources-audit-record-get-response-structure) + +##### Request Structure + +``` +GET /audit-records/5 HTTP/1.1 +Host: localhost:9393 +``` + +##### Path Parameters + +/audit-records/{id} + +|Parameter| Description | +|---------|----------------------------------------------| +| `id` |The id of the audit record to query (required)| + +##### Example Request + +``` +$ curl 'http://localhost:9393/audit-records/5' -i -X GET +``` + +##### Response Structure + +``` +HTTP/1.1 200 OK +Content-Type: application/hal+json +Content-Length: 354 + +{ + "auditRecordId" : 5, + "createdBy" : null, + "correlationId" : "timelog", + "auditData" : "time --format='YYYY MM DD' | log", + "createdOn" : "2022-01-18T18:52:24.663Z", + "auditAction" : "CREATE", + "auditOperation" : "STREAM", + "platformName" : null, + "_links" : { + "self" : { + "href" : "http://localhost:9393/audit-records/5" + } + } +} +``` + +#### 44.4.3. List all the Audit Action Types + +The audit record endpoint lets you get the action types. The following topics provide more details: + +* [Request Structure](#api-guide-resources-audit-action-types-request-structure) + +* [Example Request](#api-guide-resources-audit-action-types-example-request) + +* [Response Structure](#api-guide-resources-audit-action-types-response-structure) + +##### Request Structure + +``` +GET /audit-records/audit-action-types HTTP/1.1 +Host: localhost:9393 +``` + +##### Example Request + +``` +$ curl 'http://localhost:9393/audit-records/audit-action-types' -i -X GET +``` + +##### Response Structure + +``` +HTTP/1.1 200 OK +Content-Type: application/json +Content-Length: 1111 + +[ { + "id" : 100, + "name" : "Create", + "description" : "Create an Entity", + "nameWithDescription" : "Create (Create an Entity)", + "key" : "CREATE" +}, { + "id" : 200, + "name" : "Delete", + "description" : "Delete an Entity", + "nameWithDescription" : "Delete (Delete an Entity)", + "key" : "DELETE" +}, { + "id" : 300, + "name" : "Deploy", + "description" : "Deploy an Entity", + "nameWithDescription" : "Deploy (Deploy an Entity)", + "key" : "DEPLOY" +}, { + "id" : 400, + "name" : "Rollback", + "description" : "Rollback an Entity", + "nameWithDescription" : "Rollback (Rollback an Entity)", + "key" : "ROLLBACK" +}, { + "id" : 500, + "name" : "Undeploy", + "description" : "Undeploy an Entity", + "nameWithDescription" : "Undeploy (Undeploy an Entity)", + "key" : "UNDEPLOY" +}, { + "id" : 600, + "name" : "Update", + "description" : "Update an Entity", + "nameWithDescription" : "Update (Update an Entity)", + "key" : "UPDATE" +}, { + "id" : 700, + "name" : "SuccessfulLogin", + "description" : "Successful login", + "nameWithDescription" : "SuccessfulLogin (Successful login)", + "key" : "LOGIN_SUCCESS" +} ] +``` + +#### 44.4.4. List all the Audit Operation Types + +The audit record endpoint lets you get the operation types. The following topics provide more details: + +* [Request Structure](#api-guide-resources-audit-operation-types-request-structure) + +* [Example Request](#api-guide-resources-audit-operation-types-example-request) + +* [Response Structure](#api-guide-resources-audit-operation-types-response-structure) + +##### Request Structure + +``` +GET /audit-records/audit-operation-types HTTP/1.1 +Host: localhost:9393 +``` + +##### Example Request + +``` +$ curl 'http://localhost:9393/audit-records/audit-operation-types' -i -X GET +``` + +##### Response Structure + +``` +HTTP/1.1 200 OK +Content-Type: application/json +Content-Length: 315 + +[ { + "id" : 100, + "name" : "App Registration", + "key" : "APP_REGISTRATION" +}, { + "id" : 200, + "name" : "Schedule", + "key" : "SCHEDULE" +}, { + "id" : 300, + "name" : "Stream", + "key" : "STREAM" +}, { + "id" : 400, + "name" : "Task", + "key" : "TASK" +}, { + "id" : 500, + "name" : "Login", + "key" : "LOGIN" +} ] +``` + +### 44.5. Stream Definitions + +The registered applications endpoint provides information about the stream definitions that are registered with the Spring Cloud Data Flow server. +The following topics provide more details: + +* [Creating a New Stream Definition](#api-guide-resources-stream-definitions-create) + +* [List All Stream Definitions](#api-guide-resources-stream-definitions-list) + +* [List Related Stream Definitions](#api-guide-resources-stream-definitions-list-related) + +* [Retrieve Stream Definition Detail](#api-guide-resources-stream-definition-get) + +* [Delete a Single Stream Definition](#api-guide-resources-stream-definitions-delete-one) + +* [Delete All Stream Definitions](#api-guide-resources-stream-definitions-delete-all) + +* [Deploying Stream Definition](#api-guide-resources-stream-deployment-deploy) + +* [Undeploy Stream Definition](#api-guide-resources-stream-deployment-undeploy) + +* [Undeploy All Stream Definitions](#api-guide-resources-stream-deployment-undeploy-all) + +#### 44.5.1. Creating a New Stream Definition + +Creating a stream definition is achieved by creating a POST request to the stream definitions endpoint. +A curl request for a `ticktock` stream might resemble the following: + +``` +curl -X POST -d "name=ticktock&definition=time | log" localhost:9393/streams/definitions?deploy=false +``` + +A stream definition can also contain additional parameters. +For instance, in the example shown under “[Request Structure](#api-guide-resources-stream-definitions-create-request-structure)”, we also provide the date-time format. + +The following topics provide more details: + +* [Request Structure](#api-guide-resources-stream-definitions-create-request-structure) + +* [Request Parameters](#api-guide-resources-stream-definitions-create-request-parameters) + +* [Example Request](#api-guide-resources-stream-definitions-create-example-request) + +* [Response Structure](#api-guide-resources-stream-definitions-create-response-structure) + +##### Request Structure + +``` +POST /streams/definitions HTTP/1.1 +Host: localhost:9393 +Content-Type: application/x-www-form-urlencoded + +name=timelog&definition=time+--format%3D%27YYYY+MM+DD%27+%7C+log&description=Demo+stream+for+testing&deploy=false +``` + +##### Request Parameters + +| Parameter | Description | +|-------------|----------------------------------------------------------------| +| `name` | The name for the created task definitions | +|`definition` | The definition for the stream, using Data Flow DSL | +|`description`| The description of the stream definition | +| `deploy` |If true, the stream is deployed upon creation (default is false)| + +##### Example Request + +``` +$ curl 'http://localhost:9393/streams/definitions' -i -X POST \ + -d 'name=timelog&definition=time+--format%3D%27YYYY+MM+DD%27+%7C+log&description=Demo+stream+for+testing&deploy=false' +``` + +##### Response Structure + +``` +HTTP/1.1 201 Created +Content-Type: application/hal+json +Content-Length: 410 + +{ + "name" : "timelog", + "dslText" : "time --format='YYYY MM DD' | log", + "originalDslText" : "time --format='YYYY MM DD' | log", + "status" : "undeployed", + "description" : "Demo stream for testing", + "statusDescription" : "The app or group is known to the system, but is not currently deployed", + "_links" : { + "self" : { + "href" : "http://localhost:9393/streams/definitions/timelog" + } + } +} +``` + +#### 44.5.2. List All Stream Definitions + +The streams endpoint lets you list all the stream definitions. +The following topics provide more details: + +* [Request Structure](#api-guide-resources-stream-definitions-list-request-structure) + +* [Request Parameters](#api-guide-resources-stream-definitions-list-request-parameters) + +* [Example Request](#api-guide-resources-stream-definitions-list-example-request) + +* [Response Structure](#api-guide-resources-stream-definitions-list-response-structure) + +##### Request Structure + +``` +GET /streams/definitions?page=0&sort=name%2CASC&search=&size=10 HTTP/1.1 +Host: localhost:9393 +``` + +##### Request Parameters + +|Parameter| Description | +|---------|--------------------------------------------------| +| `page` | The zero-based page number (optional) | +|`search` |The search string performed on the name (optional)| +| `sort` | The sort on the list (optional) | +| `size` | The requested page size (optional) | + +##### Example Request + +``` +$ curl 'http://localhost:9393/streams/definitions?page=0&sort=name%2CASC&search=&size=10' -i -X GET +``` + +##### Response Structure + +``` +HTTP/1.1 200 OK +Content-Type: application/hal+json +Content-Length: 1160 + +{ + "_embedded" : { + "streamDefinitionResourceList" : [ { + "name" : "mysamplestream", + "dslText" : "time | log", + "originalDslText" : "time | log", + "status" : "undeployed", + "description" : "", + "statusDescription" : "The app or group is known to the system, but is not currently deployed", + "_links" : { + "self" : { + "href" : "http://localhost:9393/streams/definitions/mysamplestream" + } + } + }, { + "name" : "timelog", + "dslText" : "time --format='YYYY MM DD' | log", + "originalDslText" : "time --format='YYYY MM DD' | log", + "status" : "undeployed", + "description" : "Demo stream for testing", + "statusDescription" : "The app or group is known to the system, but is not currently deployed", + "_links" : { + "self" : { + "href" : "http://localhost:9393/streams/definitions/timelog" + } + } + } ] + }, + "_links" : { + "self" : { + "href" : "http://localhost:9393/streams/definitions?page=0&size=10&sort=name,asc" + } + }, + "page" : { + "size" : 10, + "totalElements" : 2, + "totalPages" : 1, + "number" : 0 + } +} +``` + +#### 44.5.3. List Related Stream Definitions + +The streams endpoint lets you list related stream definitions. +The following topics provide more details: + +* [Request Structure](#api-guide-resources-stream-definitions-list-related-request-structure) + +* [Request Parameters](#api-guide-resources-stream-definitions-list-related-request-parameters) + +* [Example Request](#api-guide-resources-stream-definitions-list-related-example-request) + +* [Response Structure](#api-guide-resources-stream-definitions-list-related-response-structure) + +##### Request Structure + +``` +GET /streams/definitions/timelog/related?page=0&sort=name%2CASC&search=&size=10&nested=true HTTP/1.1 +Host: localhost:9393 +``` + +##### Request Parameters + +|Parameter| Description | +|---------|--------------------------------------------------------------------------------------| +|`nested` |Should we recursively findByTaskNameContains for related stream definitions (optional)| +| `page` | The zero-based page number (optional) | +|`search` | The search string performed on the name (optional) | +| `sort` | The sort on the list (optional) | +| `size` | The requested page size (optional) | + +##### Example Request + +``` +$ curl 'http://localhost:9393/streams/definitions/timelog/related?page=0&sort=name%2CASC&search=&size=10&nested=true' -i -X GET +``` + +##### Response Structure + +``` +HTTP/1.1 200 OK +Content-Type: application/hal+json +Content-Length: 769 + +{ + "_embedded" : { + "streamDefinitionResourceList" : [ { + "name" : "timelog", + "dslText" : "time --format='YYYY MM DD' | log", + "originalDslText" : "time --format='YYYY MM DD' | log", + "status" : "undeployed", + "description" : "Demo stream for testing", + "statusDescription" : "The app or group is known to the system, but is not currently deployed", + "_links" : { + "self" : { + "href" : "http://localhost:9393/streams/definitions/timelog" + } + } + } ] + }, + "_links" : { + "self" : { + "href" : "http://localhost:9393/streams/definitions/timelog/related?page=0&size=10&sort=name,asc" + } + }, + "page" : { + "size" : 10, + "totalElements" : 1, + "totalPages" : 1, + "number" : 0 + } +} +``` + +#### 44.5.4. Retrieve Stream Definition Detail + +The stream definition endpoint lets you get a single stream definition. +The following topics provide more details: + +* [Request Structure](#api-guide-resources-stream-definition-get-request-structure) + +* [Path Parameters](#api-guide-resources-stream-definition-get-path-parameters) + +* [Example Request](#api-guide-resources-stream-definition-get-example-request) + +* [Response Structure](#api-guide-resources-stream-definition-get-response-structure) + +##### Request Structure + +``` +GET /streams/definitions/timelog HTTP/1.1 +Host: localhost:9393 +``` + +##### Path Parameters + +/streams/definitions/{name} + +|Parameter| Description | +|---------|-----------------------------------------------------| +| `name` |The name of the stream definition to query (required)| + +##### Example Request + +``` +$ curl 'http://localhost:9393/streams/definitions/timelog' -i -X GET +``` + +##### Response Structure + +``` +HTTP/1.1 200 OK +Content-Type: application/hal+json +Content-Length: 410 + +{ + "name" : "timelog", + "dslText" : "time --format='YYYY MM DD' | log", + "originalDslText" : "time --format='YYYY MM DD' | log", + "status" : "undeployed", + "description" : "Demo stream for testing", + "statusDescription" : "The app or group is known to the system, but is not currently deployed", + "_links" : { + "self" : { + "href" : "http://localhost:9393/streams/definitions/timelog" + } + } +} +``` + +#### 44.5.5. Delete a Single Stream Definition + +The streams endpoint lets you delete a single stream definition. +(See also: [Delete All Stream Definitions](#api-guide-resources-stream-definitions-delete-all).) +The following topics provide more details: + +* [Request Structure](#api-guide-resources-stream-definitions-delete-one-request-structure) + +* [Request Parameters](#api-guide-resources-stream-definitions-delete-one-request-parameters) + +* [Example Request](#api-guide-resources-stream-definitions-delete-one-example-request) + +* [Response Structure](#api-guide-resources-stream-definitions-delete-one-response-structure) + +##### Request Structure + +``` +DELETE /streams/definitions/timelog HTTP/1.1 +Host: localhost:9393 +``` + +##### Request Parameters + +There are no request parameters for this endpoint. + +##### Example Request + +``` +$ curl 'http://localhost:9393/streams/definitions/timelog' -i -X DELETE +``` + +##### Response Structure + +``` +HTTP/1.1 200 OK +``` + +#### 44.5.6. Delete All Stream Definitions + +The streams endpoint lets you delete all single stream definitions. +(See also: [Delete a Single Stream Definition](#api-guide-resources-stream-definitions-delete-one).) +The following topics provide more details: + +* [Request Structure](#api-guide-resources-stream-definitions-delete-all-request-structure) + +* [Request Parameters](#api-guide-resources-stream-definitions-delete-all-request-parameters) + +* [Example Request](#api-guide-resources-stream-definitions-delete-all-example-request) + +* [Response Structure](#api-guide-resources-stream-definitions-delete-all-response-structure) + +##### Request Structure + +``` +DELETE /streams/definitions HTTP/1.1 +Host: localhost:9393 +``` + +##### Request Parameters + +There are no request parameters for this endpoint. + +##### Example Request + +``` +$ curl 'http://localhost:9393/streams/definitions' -i -X DELETE +``` + +##### Response Structure + +``` +HTTP/1.1 200 OK +``` + +### 44.6. Stream Validation + +The stream validation endpoint lets you validate the apps in a stream definition. +The following topics provide more details: + +* [Request Structure](#api-guide-resources-stream-validation-request-structure) + +* [Path Parameters](#api-guide-resources-stream-validation-path-parameters) + +* [Example Request](#api-guide-resources-stream-validation-example-request) + +* [Response Structure](#api-guide-resources-stream-validation-response-structure) + +#### 44.6.1. Request Structure + +``` +GET /streams/validation/timelog HTTP/1.1 +Host: localhost:9393 +``` + +#### 44.6.2. Path Parameters + +/streams/validation/{name} + +|Parameter| Description | +|---------|----------------------------------------------------------| +| `name` |The name of a stream definition to be validated (required)| + +#### 44.6.3. Example Request + +``` +$ curl 'http://localhost:9393/streams/validation/timelog' -i -X GET +``` + +#### 44.6.4. Response Structure + +``` +HTTP/1.1 200 OK +Content-Type: application/hal+json +Content-Length: 197 + +{ + "appName" : "timelog", + "dsl" : "time --format='YYYY MM DD' | log", + "description" : "Demo stream for testing", + "appStatuses" : { + "source:time" : "valid", + "sink:log" : "valid" + } +} +``` + +### 44.7. Stream Deployments + +The deployment definitions endpoint provides information about the deployments that are registered with the Spring Cloud Data Flow server. +The following topics provide more details: + +* [Deploying Stream Definition](#api-guide-resources-stream-deployment-deploy) + +* [Undeploy Stream Definition](#api-guide-resources-stream-deployment-undeploy) + +* [Undeploy All Stream Definitions](#api-guide-resources-stream-deployment-undeploy-all) + +* [Update Deployed Stream](#api-guide-resources-stream-deployment-update) + +* [Rollback Stream Definition](#api-guide-resources-stream-deployment-rollback) + +* [Get Manifest](#api-guide-resources-stream-deployment-manifest) + +* [Get Deployment History](#api-guide-resources-stream-deployment-history) + +* [Get Deployment Platforms](#api-guide-resources-stream-deployment-platform-list) + +* [Scale Stream Definition](#api-guide-resources-stream-deployment-scale) + +#### 44.7.1. Deploying Stream Definition + +The stream definition endpoint lets you deploy a single stream definition. +Optionally, you can pass application parameters as properties in the request body. +The following topics provide more details: + +* [Request Structure](#api-guide-resources-stream-definitions-deployment-deploy-request-structure) + +* [Request Parameters](#api-guide-resources-stream-definitions-deployment-deploy-request-parameters) + +* [Example Request](#api-guide-resources-stream-definitions-deployment-deploy-example-request) + +* [Response Structure](#api-guide-resources-stream-definitions-deployment-deploy-response-structure) + +##### Request Structure + +``` +POST /streams/deployments/timelog HTTP/1.1 +Content-Type: application/json +Content-Length: 36 +Host: localhost:9393 + +{"app.time.timestamp.format":"YYYY"} +``` + +/streams/deployments/{timelog} + +|Parameter| Description | +|---------|----------------------------------------------------| +|`timelog`|The name of an existing stream definition (required)| + +##### Request Parameters + +There are no request parameters for this endpoint. + +##### Example Request + +``` +$ curl 'http://localhost:9393/streams/deployments/timelog' -i -X POST \ + -H 'Content-Type: application/json' \ + -d '{"app.time.timestamp.format":"YYYY"}' +``` + +##### Response Structure + +``` +HTTP/1.1 201 Created +``` + +#### 44.7.2. Undeploy Stream Definition + +The stream definition endpoint lets you undeploy a single stream definition. +The following topics provide more details: + +* [Request Structure](#api-guide-resources-stream-definitions-deployment-undeploy-request-structure) + +* [Request Parameters](#api-guide-resources-stream-definitions-deployment-undeploy-request-parameters) + +* [Example Request](#api-guide-resources-stream-definitions-deployment-undeploy-example-request) + +* [Response Structure](#api-guide-resources-stream-definitions-deployment-undeploy-response-structure) + +##### Request Structure + +``` +DELETE /streams/deployments/timelog HTTP/1.1 +Host: localhost:9393 +``` + +/streams/deployments/{timelog} + +|Parameter| Description | +|---------|----------------------------------------------------| +|`timelog`|The name of an existing stream definition (required)| + +##### Request Parameters + +There are no request parameters for this endpoint. + +##### Example Request + +``` +$ curl 'http://localhost:9393/streams/deployments/timelog' -i -X DELETE +``` + +##### Response Structure + +``` +HTTP/1.1 200 OK +``` + +#### 44.7.3. Undeploy All Stream Definitions + +The stream definition endpoint lets you undeploy all single stream definitions. +The following topics provide more details: + +* [Request Structure](#api-guide-resources-stream-definitions-deployment-undeploy-all-request-structure) + +* [Request Parameters](#api-guide-resources-stream-definitions-deployment-undeploy-all-request-parameters) + +* [Example Request](#api-guide-resources-stream-definitions-deployment-undeploy-all-example-request) + +* [Response Structure](#api-guide-resources-stream-definitions-deployment-undeploy-all-response-structure) + +##### Request Structure + +``` +DELETE /streams/deployments HTTP/1.1 +Host: localhost:9393 +``` + +##### Request Parameters + +There are no request parameters for this endpoint. + +##### Example Request + +``` +$ curl 'http://localhost:9393/streams/deployments' -i -X DELETE +``` + +##### Response Structure + +``` +HTTP/1.1 200 OK +``` + +#### 44.7.4. Update Deployed Stream + +Thanks to Skipper, you can update deployed streams, and provide additional deployment properties. + +* [Request Structure](#api-guide-resources-stream-definitions-deployment-update-request-structure) + +* [Request Parameters](#api-guide-resources-stream-definitions-deployment-update-request-parameters) + +* [Example Request](#api-guide-resources-stream-definitions-deployment-update-example-request) + +* [Response Structure](#api-guide-resources-stream-definitions-deployment-update-response-structure) + +##### Request Structure + +``` +POST /streams/deployments/update/timelog1 HTTP/1.1 +Content-Type: application/json +Content-Length: 196 +Host: localhost:9393 + +{"releaseName":"timelog1","packageIdentifier":{"repositoryName":"test","packageName":"timelog1","packageVersion":"1.0.0"},"updateProperties":{"app.time.timestamp.format":"YYYYMMDD"},"force":false} +``` + +/streams/deployments/update/{timelog1} + +|Parameter | Description | +|----------|----------------------------------------------------| +|`timelog1`|The name of an existing stream definition (required)| + +##### Request Parameters + +There are no request parameters for this endpoint. + +##### Example Request + +``` +$ curl 'http://localhost:9393/streams/deployments/update/timelog1' -i -X POST \ + -H 'Content-Type: application/json' \ + -d '{"releaseName":"timelog1","packageIdentifier":{"repositoryName":"test","packageName":"timelog1","packageVersion":"1.0.0"},"updateProperties":{"app.time.timestamp.format":"YYYYMMDD"},"force":false}' +``` + +##### Response Structure + +``` +HTTP/1.1 201 Created +``` + +#### 44.7.5. Rollback Stream Definition + +Rollback the stream to the previous or a specific version of the stream. + +* [Request Structure](#api-guide-resources-stream-definitions-deployment-rollback-request-structure) + +* [Request Parameters](#api-guide-resources-stream-definitions-deployment-rollback-request-parameters) + +* [Example Request](#api-guide-resources-stream-definitions-deployment-rollback-example-request) + +* [Response Structure](#api-guide-resources-stream-definitions-deployment-rollback-response-structure) + +##### Request Structure + +``` +POST /streams/deployments/rollback/timelog1/1 HTTP/1.1 +Content-Type: application/json +Host: localhost:9393 +``` + +/streams/deployments/rollback/{name}/{version} + +|Parameter| Description | +|---------|----------------------------------------------------| +| `name` |The name of an existing stream definition (required)| +|`version`| The version to rollback to | + +##### Request Parameters + +There are no request parameters for this endpoint. + +##### Example Request + +``` +$ curl 'http://localhost:9393/streams/deployments/rollback/timelog1/1' -i -X POST \ + -H 'Content-Type: application/json' +``` + +##### Response Structure + +``` +HTTP/1.1 201 Created +``` + +#### 44.7.6. Get Manifest + +Return a manifest of a released version. For packages with dependencies, the manifest includes the contents of those dependencies. + +* [Request Structure](#api-guide-resources-stream-definitions-deployment-manifest-request-structure) + +* [Request Parameters](#api-guide-resources-stream-definitions-deployment-manifest-request-parameters) + +* [Example Request](#api-guide-resources-stream-definitions-deployment-manifest-example-request) + +* [Response Structure](#api-guide-resources-stream-definitions-deployment-manifest-response-structure) + +##### Request Structure + +``` +GET /streams/deployments/manifest/timelog1/1 HTTP/1.1 +Content-Type: application/json +Host: localhost:9393 +``` + +/streams/deployments/manifest/{name}/{version} + +|Parameter| Description | +|---------|----------------------------------------------------| +| `name` |The name of an existing stream definition (required)| +|`version`| The version of the stream | + +##### Request Parameters + +There are no request parameters for this endpoint. + +##### Example Request + +``` +$ curl 'http://localhost:9393/streams/deployments/manifest/timelog1/1' -i -X GET \ + -H 'Content-Type: application/json' +``` + +##### Response Structure + +``` +HTTP/1.1 200 OK +``` + +#### 44.7.7. Get Deployment History + +Get the stream’s deployment history. + +* [Request Structure](#api-guide-resources-stream-definitions-deployment-history-request-structure) + +* [Example Request](#api-guide-resources-stream-definitions-deployment-history-example-request) + +* [Response Structure](#api-guide-resources-stream-definitions-deployment-history-response-structure) + +##### Request Structure + +``` +GET /streams/deployments/history/timelog1 HTTP/1.1 +Content-Type: application/json +Host: localhost:9393 +``` + +##### Example Request + +``` +$ curl 'http://localhost:9393/streams/deployments/history/timelog1' -i -X GET \ + -H 'Content-Type: application/json' +``` + +##### Response Structure + +``` +HTTP/1.1 200 OK +Content-Type: application/json +Content-Length: 162 + +[ { + "name" : null, + "version" : 0, + "info" : null, + "pkg" : null, + "configValues" : { + "raw" : null + }, + "manifest" : null, + "platformName" : null +} ] +``` + +#### 44.7.8. Get Deployment Platforms + +Retrieve a list of supported deployment platforms. + +* [Request Structure](#api-guide-resources-stream-definitions-deployment-platform-list-request-structure) + +* [Example Request](#api-guide-resources-stream-definitions-deployment-history-example-request) + +* [Response Structure](#api-guide-resources-stream-definitions-deployment-history-response-structure) + +##### Request Structure + +``` +GET /streams/deployments/platform/list HTTP/1.1 +Content-Type: application/json +Host: localhost:9393 +``` + +##### Example Request + +``` +$ curl 'http://localhost:9393/streams/deployments/platform/list' -i -X GET \ + -H 'Content-Type: application/json' +``` + +##### Response Structure + +``` +HTTP/1.1 200 OK +Content-Type: application/json +Content-Length: 106 + +[ { + "id" : null, + "name" : "default", + "type" : "local", + "description" : null, + "options" : [ ] +} ] +``` + +#### 44.7.9. Scale Stream Definition + +The stream definition endpoint lets you scale a single app in a stream definition. +Optionally, you can pass application parameters as properties in the request body. +The following topics provide more details: + +* [Request Structure](#api-guide-resources-stream-definitions-deployment-scale-request-structure) + +* [Request Parameters](#api-guide-resources-stream-definitions-deployment-scale-request-parameters) + +* [Example Request](#api-guide-resources-stream-definitions-deployment-scale-example-request) + +* [Response Structure](#api-guide-resources-stream-definitions-deployment-scale-response-structure) + +##### Request Structure + +``` +POST /streams/deployments/scale/timelog/log/instances/1 HTTP/1.1 +Content-Type: application/json +Content-Length: 36 +Host: localhost:9393 + +{"app.time.timestamp.format":"YYYY"} +``` + +/streams/deployments/scale/{streamName}/{appName}/instances/{count} + +| Parameter | Description | +|------------|------------------------------------------------------------------| +|`streamName`| the name of an existing stream definition (required) | +| `appName` | in stream application name to scale | +| `count` |number of instances for the selected stream application (required)| + +##### Request Parameters + +There are no request parameters for this endpoint. + +##### Example Request + +``` +$ curl 'http://localhost:9393/streams/deployments/scale/timelog/log/instances/1' -i -X POST \ + -H 'Content-Type: application/json' \ + -d '{"app.time.timestamp.format":"YYYY"}' +``` + +##### Response Structure + +``` +HTTP/1.1 201 Created +``` + +### 44.8. Task Definitions + +The task definitions endpoint provides information about the task definitions that are registered with the Spring Cloud Data Flow server. +The following topics provide more details: + +* [Creating a New Task Definition](#api-guide-resources-task-definitions-creating) + +* [List All Task Definitions](#api-guide-resources-task-definitions-list) + +* [Retrieve Task Definition Detail](#api-guide-resources-task-definition-detail) + +* [Delete Task Definition](#api-guide-resources-delete-task-definition) + +#### 44.8.1. Creating a New Task Definition + +The task definition endpoint lets you create a new task definition. +The following topics provide more details: + +* [Request Structure](#api-guide-resources-stream-task-definitions-creating-request-structure) + +* [Request Parameters](#api-guide-resources-stream-task-definitions-creating-request-parameters) + +* [Example Request](#api-guide-resources-stream-task-definitions-creating-example-request) + +* [Response Structure](#api-guide-resources-stream-task-definitions-creating-response-structure) + +##### Request Structure + +``` +POST /tasks/definitions HTTP/1.1 +Host: localhost:9393 +Content-Type: application/x-www-form-urlencoded + +name=my-task&definition=timestamp+--format%3D%27YYYY+MM+DD%27&description=Demo+task+definition+for+testing +``` + +##### Request Parameters + +| Parameter | Description | +|-------------|------------------------------------------------| +| `name` | The name for the created task definition | +|`definition` |The definition for the task, using Data Flow DSL| +|`description`| The description of the task definition | + +##### Example Request + +``` +$ curl 'http://localhost:9393/tasks/definitions' -i -X POST \ + -d 'name=my-task&definition=timestamp+--format%3D%27YYYY+MM+DD%27&description=Demo+task+definition+for+testing' +``` + +##### Response Structure + +``` +HTTP/1.1 200 OK +Content-Type: application/hal+json +Content-Length: 342 + +{ + "name" : "my-task", + "dslText" : "timestamp --format='YYYY MM DD'", + "description" : "Demo task definition for testing", + "composed" : false, + "composedTaskElement" : false, + "lastTaskExecution" : null, + "status" : "UNKNOWN", + "_links" : { + "self" : { + "href" : "http://localhost:9393/tasks/definitions/my-task" + } + } +} +``` + +#### 44.8.2. List All Task Definitions + +The task definition endpoint lets you get all task definitions. +The following topics provide more details: + +* [Request Structure](#api-guide-resources-stream-task-definitions-list-request-structure) + +* [Request Parameters](#api-guide-resources-stream-task-definitions-list-request-parameters) + +* [Example Request](#api-guide-resources-stream-task-definitions-list-example-request) + +* [Response Structure](#api-guide-resources-stream-task-definitions-list-response-structure) + +##### Request Structure + +``` +GET /tasks/definitions?page=0&size=10&sort=taskName%2CASC&search=&manifest=true HTTP/1.1 +Host: localhost:9393 +``` + +##### Request Parameters + +|Parameter | Description | +|----------|-------------------------------------------------------------------------------| +| `page` | The zero-based page number (optional) | +| `size` | The requested page size (optional) | +| `search` | The search string performed on the name (optional) | +| `sort` | The sort on the list (optional) | +|`manifest`|The flag to include the task manifest into the latest task execution (optional)| + +##### Example Request + +``` +$ curl 'http://localhost:9393/tasks/definitions?page=0&size=10&sort=taskName%2CASC&search=&manifest=true' -i -X GET +``` + +##### Response Structure + +``` +HTTP/1.1 200 OK +Content-Type: application/hal+json +Content-Length: 689 + +{ + "_embedded" : { + "taskDefinitionResourceList" : [ { + "name" : "my-task", + "dslText" : "timestamp --format='YYYY MM DD'", + "description" : "Demo task definition for testing", + "composed" : false, + "composedTaskElement" : false, + "lastTaskExecution" : null, + "status" : "UNKNOWN", + "_links" : { + "self" : { + "href" : "http://localhost:9393/tasks/definitions/my-task" + } + } + } ] + }, + "_links" : { + "self" : { + "href" : "http://localhost:9393/tasks/definitions?page=0&size=10&sort=taskName,asc" + } + }, + "page" : { + "size" : 10, + "totalElements" : 1, + "totalPages" : 1, + "number" : 0 + } +} +``` + +#### 44.8.3. Retrieve Task Definition Detail + +The task definition endpoint lets you get a single task definition. +The following topics provide more details: + +* [Request Structure](#api-guide-resources-stream-task-definitions-detail-request-structure) + +* [Request Parameters](#api-guide-resources-stream-task-definitions-detail-request-parameters) + +* [Example Request](#api-guide-resources-stream-task-definitions-detail-example-request) + +* [Response Structure](#api-guide-resources-stream-task-definitions-detail-response-structure) + +##### Request Structure + +``` +GET /tasks/definitions/my-task?manifest=true HTTP/1.1 +Host: localhost:9393 +``` + +/tasks/definitions/{my-task} + +|Parameter| Description | +|---------|--------------------------------------------------| +|`my-task`|The name of an existing task definition (required)| + +##### Request Parameters + +There are no request parameters for this endpoint. + +##### Example Request + +``` +$ curl 'http://localhost:9393/tasks/definitions/my-task?manifest=true' -i -X GET +``` + +##### Response Structure + +``` +HTTP/1.1 200 OK +Content-Type: application/hal+json +Content-Length: 342 + +{ + "name" : "my-task", + "dslText" : "timestamp --format='YYYY MM DD'", + "description" : "Demo task definition for testing", + "composed" : false, + "composedTaskElement" : false, + "lastTaskExecution" : null, + "status" : "UNKNOWN", + "_links" : { + "self" : { + "href" : "http://localhost:9393/tasks/definitions/my-task" + } + } +} +``` + +#### 44.8.4. Delete Task Definition + +The task definition endpoint lets you delete a single task definition. +The following topics provide more details: + +* [Request Structure](#api-guide-resources-delete-task-definition-request-structure) + +* [Request Parameters](#api-guide-resources-delete-task-definition-request-parameters) + +* [Example Request](#api-guide-resources-delete-task-definition-example-request) + +* [Response Structure](#api-guide-resources-delete-task-definition-response-structure) + +##### Request Structure + +``` +DELETE /tasks/definitions/my-task?cleanup=true HTTP/1.1 +Host: localhost:9393 +``` + +/tasks/definitions/{my-task} + +|Parameter| Description | +|---------|--------------------------------------------------| +|`my-task`|The name of an existing task definition (required)| + +##### Request Parameters + +There are no request parameters for this endpoint. + +##### Example Request + +``` +$ curl 'http://localhost:9393/tasks/definitions/my-task?cleanup=true' -i -X DELETE +``` + +##### Response Structure + +``` +HTTP/1.1 200 OK +``` + +### 44.9. Task Scheduler + +The task scheduler endpoint provides information about the task schedules that are registered with the Scheduler Implementation. +The following topics provide more details: + +* [Creating a New Task Schedule](#api-guide-resources-task-schedule-creating) + +* [List All Schedules](#api-guide-resources-task-schedule-list) + +* [List Filtered Schedules](#api-guide-resources-task-schedule-filtered-list) + +* [Delete Task Schedule](#api-guide-resources-task-delete-schedule) + +#### 44.9.1. Creating a New Task Schedule + +The task schedule endpoint lets you create a new task schedule. +The following topics provide more details: + +* [Request Structure](#api-guide-resources-stream-task-schedule-creating-request-structure) + +* [Request Parameters](#api-guide-resources-stream-task-schedule-creating-request-parameters) + +* [Example Request](#api-guide-resources-stream-task-schedule-creating-example-request) + +* [Response Structure](#api-guide-resources-stream-task-schedule-creating-response-structure) + +##### Request Structure + +``` +POST /tasks/schedules HTTP/1.1 +Host: localhost:9393 +Content-Type: application/x-www-form-urlencoded + +scheduleName=myschedule&taskDefinitionName=mytaskname&properties=scheduler.cron.expression%3D00+22+17+%3F+*&arguments=--foo%3Dbar +``` + +##### Request Parameters + +| Parameter | Description | +|--------------------|----------------------------------------------------------------| +| `scheduleName` | The name for the created schedule | +|`taskDefinitionName`| The name of the task definition to be scheduled | +| `properties` |the properties that are required to schedule and launch the task| +| `arguments` | the command line arguments to be used for launching the task | + +##### Example Request + +``` +$ curl 'http://localhost:9393/tasks/schedules' -i -X POST \ + -d 'scheduleName=myschedule&taskDefinitionName=mytaskname&properties=scheduler.cron.expression%3D00+22+17+%3F+*&arguments=--foo%3Dbar' +``` + +##### Response Structure + +``` +HTTP/1.1 201 Created +``` + +#### 44.9.2. List All Schedules + +The task schedules endpoint lets you get all task schedules. +The following topics provide more details: + +* [Request Structure](#api-guide-resources-stream-task-schedule-list-request-structure) + +* [Request Parameters](#api-guide-resources-stream-task-schedule-list-request-parameters) + +* [Example Request](#api-guide-resources-stream-task-schedule-list-example-request) + +* [Response Structure](#api-guide-resources-stream-task-schedule-list-response-structure) + +##### Request Structure + +``` +GET /tasks/schedules?page=0&size=10 HTTP/1.1 +Host: localhost:9393 +``` + +##### Request Parameters + +|Parameter| Description | +|---------|-------------------------------------| +| `page` |The zero-based page number (optional)| +| `size` | The requested page size (optional) | + +##### Example Request + +``` +$ curl 'http://localhost:9393/tasks/schedules?page=0&size=10' -i -X GET +``` + +##### Response Structure + +``` +HTTP/1.1 200 OK +Content-Type: application/hal+json +Content-Length: 587 + +{ + "_embedded" : { + "scheduleInfoResourceList" : [ { + "scheduleName" : "FOO", + "taskDefinitionName" : "BAR", + "scheduleProperties" : { + "scheduler.AAA.spring.cloud.scheduler.cron.expression" : "00 41 17 ? * *" + }, + "_links" : { + "self" : { + "href" : "http://localhost:9393/tasks/schedules/FOO" + } + } + } ] + }, + "_links" : { + "self" : { + "href" : "http://localhost:9393/tasks/schedules?page=0&size=10" + } + }, + "page" : { + "size" : 10, + "totalElements" : 1, + "totalPages" : 1, + "number" : 0 + } +} +``` + +#### 44.9.3. List Filtered Schedules + +The task schedules endpoint lets you get all task schedules that have the specified task definition name. +The following topics provide more details: + +* [Request Structure](#api-guide-resources-stream-task-schedule-list-filtered-request-structure) + +* [Request Parameters](#api-guide-resources-stream-task-schedule-list-filtered-request-parameters) + +* [Example Request](#api-guide-resources-stream-task-schedule-list-filtered-example-request) + +* [Response Structure](#api-guide-resources-stream-task-schedule-list-filtered-response-structure) + +##### Request Structure + +``` +GET /tasks/schedules/instances/FOO?page=0&size=10 HTTP/1.1 +Host: localhost:9393 +``` + +/tasks/schedules/instances/{task-definition-name} + +| Parameter | Description | +|----------------------|------------------------------------------------------------------| +|`task-definition-name`|Filter schedules based on the specified task definition (required)| + +##### Request Parameters + +|Parameter| Description | +|---------|-------------------------------------| +| `page` |The zero-based page number (optional)| +| `size` | The requested page size (optional) | + +##### Example Request + +``` +$ curl 'http://localhost:9393/tasks/schedules/instances/FOO?page=0&size=10' -i -X GET +``` + +##### Response Structure + +``` +HTTP/1.1 200 OK +Content-Type: application/hal+json +Content-Length: 599 + +{ + "_embedded" : { + "scheduleInfoResourceList" : [ { + "scheduleName" : "FOO", + "taskDefinitionName" : "BAR", + "scheduleProperties" : { + "scheduler.AAA.spring.cloud.scheduler.cron.expression" : "00 41 17 ? * *" + }, + "_links" : { + "self" : { + "href" : "http://localhost:9393/tasks/schedules/FOO" + } + } + } ] + }, + "_links" : { + "self" : { + "href" : "http://localhost:9393/tasks/schedules/instances/FOO?page=0&size=1" + } + }, + "page" : { + "size" : 1, + "totalElements" : 1, + "totalPages" : 1, + "number" : 0 + } +} +``` + +#### 44.9.4. Delete Task Schedule + +The task schedule endpoint lets you delete a single task schedule. +The following topics provide more details: + +* [Request Structure](#api-guide-resources-delete-task-schedule-request-structure) + +* [Request Parameters](#api-guide-resources-delete-task-schedule-request-parameters) + +* [Example Request](#api-guide-resources-delete-task-schedule-example-request) + +* [Response Structure](#api-guide-resources-delete-task-schedule-response-structure) + +##### Request Structure + +``` +DELETE /tasks/schedules/mytestschedule HTTP/1.1 +Host: localhost:9393 +``` + +/tasks/schedules/{scheduleName} + +| Parameter | Description | +|--------------|-------------------------------------------| +|`scheduleName`|The name of an existing schedule (required)| + +##### Request Parameters + +There are no request parameters for this endpoint. + +##### Example Request + +``` +$ curl 'http://localhost:9393/tasks/schedules/mytestschedule' -i -X DELETE +``` + +##### Response Structure + +``` +HTTP/1.1 200 OK +``` + +### 44.10. Task Validation + +The task validation endpoint lets you validate the apps in a task definition. +The following topics provide more details: + +* [Request Structure](#api-guide-resources-task-validation-request-structure) + +* [Path Parameters](#api-guide-resources-task-validation-path-parameters) + +* [Example Request](#api-guide-resources-task-validation-example-request) + +* [Response Structure](#api-guide-resources-task-validation-response-structure) + +#### 44.10.1. Request Structure + +``` +GET /tasks/validation/taskC HTTP/1.1 +Host: localhost:9393 +``` + +#### 44.10.2. Path Parameters + +/tasks/validation/{name} + +|Parameter| Description | +|---------|--------------------------------------------------------| +| `name` |The name of a task definition to be validated (required)| + +#### 44.10.3. Example Request + +``` +$ curl 'http://localhost:9393/tasks/validation/taskC' -i -X GET +``` + +#### 44.10.4. Response Structure + +``` +HTTP/1.1 200 OK +Content-Type: application/hal+json +Content-Length: 144 + +{ + "appName" : "taskC", + "dsl" : "timestamp --format='yyyy MM dd'", + "description" : "", + "appStatuses" : { + "task:taskC" : "valid" + } +} +``` + +### 44.11. Task Executions + +The task executions endpoint provides information about the task executions that are registered with the Spring Cloud Data Flow server. +The following topics provide more details: + +* [Launching a Task](#api-guide-resources-task-executions-launching) + +* [Stopping a Task](#api-guide-resources-task-executions-stopping) + +* [List All Task Executions](#api-guide-resources-task-executions-list) + +* [List All Task Executions With a Specified Task Name](#api-guide-resources-task-executions-list-by-name) + +* [Task Execution Detail](#api-guide-resources-task-executions-detail) + +* [Delete Task Execution](#api-guide-resources-task-executions-delete) + +* [Task Execution Current Count](#api-guide-resources-task-executions-current-count) + +#### 44.11.1. Launching a Task + +Launching a task is done by requesting the creation of a new task execution. +The following topics provide more details: + +* [Request Structure](#api-guide-resources-task-executions-launching-request-structure) + +* [Request Parameters](#api-guide-resources-task-executions-launching-request-parameters) + +* [Example Request](#api-guide-resources-task-executions-launching-example-request) + +* [Response Structure](#api-guide-resources-task-executions-launching-response-structure) + +##### Request Structure + +``` +POST /tasks/executions HTTP/1.1 +Host: localhost:9393 +Content-Type: application/x-www-form-urlencoded + +name=taskA&properties=app.my-task.foo%3Dbar%2Cdeployer.my-task.something-else%3D3&arguments=--server.port%3D8080+--foo%3Dbar +``` + +##### Request Parameters + +| Parameter | Description | +|------------|----------------------------------------------------------| +| `name` | The name of the task definition to launch | +|`properties`|Application and Deployer properties to use while launching| +|`arguments` | Command line arguments to pass to the task | + +##### Example Request + +``` +$ curl 'http://localhost:9393/tasks/executions' -i -X POST \ + -d 'name=taskA&properties=app.my-task.foo%3Dbar%2Cdeployer.my-task.something-else%3D3&arguments=--server.port%3D8080+--foo%3Dbar' +``` + +##### Response Structure + +``` +HTTP/1.1 201 Created +Content-Type: application/json +Content-Length: 1 + +1 +``` + +#### 44.11.2. Stopping a Task + +Stopping a task is done by posting the id of an existing task execution. +The following topics provide more details: + +* [Request Structure](#api-guide-resources-task-executions-stopping-request-structure) + +* [Path Parameters](#api-guide-resources-task-executions-stopping-path-parameters) + +* [Request Parameters](#api-guide-resources-task-executions-stopping-request-parameters) + +* [Example Request](#api-guide-resources-task-executions-stopping-example-request) + +* [Response Structure](#api-guide-resources-task-executions-stopping-response-structure) + +##### Request Structure + +``` +POST /tasks/executions/1 HTTP/1.1 +Host: localhost:9393 +Content-Type: application/x-www-form-urlencoded + +platform=default +``` + +##### Path Parameters + +/tasks/executions/{id} + +|Parameter| Description | +|---------|------------------------------------------------| +| `id` |The ids of an existing task execution (required)| + +##### Request Parameters + +|Parameter | Description | +|----------|---------------------------------------------------------| +|`platform`|The platform associated with the task execution(optional)| + +##### Example Request + +``` +$ curl 'http://localhost:9393/tasks/executions/1' -i -X POST \ + -d 'platform=default' +``` + +##### Response Structure + +``` +HTTP/1.1 200 OK +``` + +#### 44.11.3. List All Task Executions + +The task executions endpoint lets you list all task executions. +The following topics provide more details: + +* [Request Structure](#api-guide-resources-task-executions-list-request-structure) + +* [Request Parameters](#api-guide-resources-task-executions-list-request-parameters) + +* [Example Request](#api-guide-resources-task-executions-list-example-request) + +* [Response Structure](#api-guide-resources-task-executions-list-response-structure) + +##### Request Structure + +``` +GET /tasks/executions?page=0&size=10 HTTP/1.1 +Host: localhost:9393 +``` + +##### Request Parameters + +|Parameter| Description | +|---------|-------------------------------------| +| `page` |The zero-based page number (optional)| +| `size` | The requested page size (optional) | + +##### Example Request + +``` +$ curl 'http://localhost:9393/tasks/executions?page=0&size=10' -i -X GET +``` + +##### Response Structure + +``` +HTTP/1.1 200 OK +Content-Type: application/hal+json +Content-Length: 2711 + +{ + "_embedded" : { + "taskExecutionResourceList" : [ { + "executionId" : 2, + "exitCode" : null, + "taskName" : "taskB", + "startTime" : null, + "endTime" : null, + "exitMessage" : null, + "arguments" : [ ], + "jobExecutionIds" : [ ], + "errorMessage" : null, + "externalExecutionId" : "taskB-7939f7fe-40a8-438c-a00c-4b2e041a42a7", + "parentExecutionId" : null, + "resourceUrl" : "org.springframework.cloud.task.app:timestamp-task:jar:1.2.0.RELEASE", + "appProperties" : { + "management.metrics.tags.service" : "task-application", + "timestamp.format" : "yyyy MM dd", + "spring.datasource.username" : null, + "spring.datasource.url" : null, + "spring.datasource.driverClassName" : null, + "management.metrics.tags.application" : "${spring.cloud.task.name:unknown}-${spring.cloud.task.executionid:unknown}", + "spring.cloud.task.name" : "taskB" + }, + "deploymentProperties" : { + "app.my-task.foo" : "bar", + "deployer.my-task.something-else" : "3" + }, + "platformName" : "default", + "taskExecutionStatus" : "UNKNOWN", + "_links" : { + "self" : { + "href" : "http://localhost:9393/tasks/executions/2" + } + } + }, { + "executionId" : 1, + "exitCode" : null, + "taskName" : "taskA", + "startTime" : null, + "endTime" : null, + "exitMessage" : null, + "arguments" : [ ], + "jobExecutionIds" : [ ], + "errorMessage" : null, + "externalExecutionId" : "taskA-37a1d8dd-9c33-4080-859f-899ed0e91b84", + "parentExecutionId" : null, + "resourceUrl" : "org.springframework.cloud.task.app:timestamp-task:jar:1.2.0.RELEASE", + "appProperties" : { + "management.metrics.tags.service" : "task-application", + "timestamp.format" : "yyyy MM dd", + "spring.datasource.username" : null, + "spring.datasource.url" : null, + "spring.datasource.driverClassName" : null, + "management.metrics.tags.application" : "${spring.cloud.task.name:unknown}-${spring.cloud.task.executionid:unknown}", + "spring.cloud.task.name" : "taskA" + }, + "deploymentProperties" : { + "app.my-task.foo" : "bar", + "deployer.my-task.something-else" : "3" + }, + "platformName" : "default", + "taskExecutionStatus" : "UNKNOWN", + "_links" : { + "self" : { + "href" : "http://localhost:9393/tasks/executions/1" + } + } + } ] + }, + "_links" : { + "self" : { + "href" : "http://localhost:9393/tasks/executions?page=0&size=10" + } + }, + "page" : { + "size" : 10, + "totalElements" : 2, + "totalPages" : 1, + "number" : 0 + } +} +``` + +#### 44.11.4. List All Task Executions With a Specified Task Name + +The task executions endpoint lets you list task executions with a specified task name. +The following topics provide more details: + +* [Request Structure](#api-guide-resources-task-executions-list-by-name-request-structure) + +* [Request Parameters](#api-guide-resources-task-executions-list-by-name-request-parameters) + +* [Example Request](#api-guide-resources-task-executions-list-by-name-example-request) + +* [Response Structure](#api-guide-resources-task-executions-list-by-name-response-structure) + +##### Request Structure + +``` +GET /tasks/executions?name=taskB&page=0&size=10 HTTP/1.1 +Host: localhost:9393 +``` + +##### Request Parameters + +|Parameter| Description | +|---------|-------------------------------------------| +| `page` | The zero-based page number (optional) | +| `size` | The requested page size (optional) | +| `name` |The name associated with the task execution| + +##### Example Request + +``` +$ curl 'http://localhost:9393/tasks/executions?name=taskB&page=0&size=10' -i -X GET +``` + +##### Response Structure + +``` +HTTP/1.1 200 OK +Content-Type: application/hal+json +Content-Length: 1492 + +{ + "_embedded" : { + "taskExecutionResourceList" : [ { + "executionId" : 2, + "exitCode" : null, + "taskName" : "taskB", + "startTime" : null, + "endTime" : null, + "exitMessage" : null, + "arguments" : [ ], + "jobExecutionIds" : [ ], + "errorMessage" : null, + "externalExecutionId" : "taskB-7939f7fe-40a8-438c-a00c-4b2e041a42a7", + "parentExecutionId" : null, + "resourceUrl" : "org.springframework.cloud.task.app:timestamp-task:jar:1.2.0.RELEASE", + "appProperties" : { + "management.metrics.tags.service" : "task-application", + "timestamp.format" : "yyyy MM dd", + "spring.datasource.username" : null, + "spring.datasource.url" : null, + "spring.datasource.driverClassName" : null, + "management.metrics.tags.application" : "${spring.cloud.task.name:unknown}-${spring.cloud.task.executionid:unknown}", + "spring.cloud.task.name" : "taskB" + }, + "deploymentProperties" : { + "app.my-task.foo" : "bar", + "deployer.my-task.something-else" : "3" + }, + "platformName" : "default", + "taskExecutionStatus" : "UNKNOWN", + "_links" : { + "self" : { + "href" : "http://localhost:9393/tasks/executions/2" + } + } + } ] + }, + "_links" : { + "self" : { + "href" : "http://localhost:9393/tasks/executions?page=0&size=10" + } + }, + "page" : { + "size" : 10, + "totalElements" : 1, + "totalPages" : 1, + "number" : 0 + } +} +``` + +#### 44.11.5. Task Execution Detail + +The task executions endpoint lets you get the details about a task execution. +The following topics provide more details: + +* [Request Structure](#api-guide-resources-task-executions-detail-request-structure) + +* [Request Parameters](#api-guide-resources-task-executions-detail-request-parameters) + +* [Example Request](#api-guide-resources-task-executions-detail-example-request) + +* [Response Structure](#api-guide-resources-task-executions-detail-response-structure) + +##### Request Structure + +``` +GET /tasks/executions/1 HTTP/1.1 +Host: localhost:9393 +``` + +/tasks/executions/{id} + +|Parameter| Description | +|---------|-----------------------------------------------| +| `id` |The id of an existing task execution (required)| + +##### Request Parameters + +There are no request parameters for this endpoint. + +##### Example Request + +``` +$ curl 'http://localhost:9393/tasks/executions/1' -i -X GET +``` + +##### Response Structure + +``` +HTTP/1.1 200 OK +Content-Type: application/hal+json +Content-Length: 1085 + +{ + "executionId" : 1, + "exitCode" : null, + "taskName" : "taskA", + "startTime" : null, + "endTime" : null, + "exitMessage" : null, + "arguments" : [ ], + "jobExecutionIds" : [ ], + "errorMessage" : null, + "externalExecutionId" : "taskA-37a1d8dd-9c33-4080-859f-899ed0e91b84", + "parentExecutionId" : null, + "resourceUrl" : "org.springframework.cloud.task.app:timestamp-task:jar:1.2.0.RELEASE", + "appProperties" : { + "management.metrics.tags.service" : "task-application", + "timestamp.format" : "yyyy MM dd", + "spring.datasource.username" : null, + "spring.datasource.url" : null, + "spring.datasource.driverClassName" : null, + "management.metrics.tags.application" : "${spring.cloud.task.name:unknown}-${spring.cloud.task.executionid:unknown}", + "spring.cloud.task.name" : "taskA" + }, + "deploymentProperties" : { + "app.my-task.foo" : "bar", + "deployer.my-task.something-else" : "3" + }, + "platformName" : "default", + "taskExecutionStatus" : "UNKNOWN", + "_links" : { + "self" : { + "href" : "http://localhost:9393/tasks/executions/1" + } + } +} +``` + +#### 44.11.6. Delete Task Execution + +The task execution endpoint lets you: + +* Clean up resources used to deploy the task + +* Remove relevant task data as well as possibly associated Spring Batch job data from the persistence store + +| |The cleanup implementation (first option) is platform specific. Both operations can be triggered
at once or separately.| +|---|---------------------------------------------------------------------------------------------------------------------------| + +The following topics provide more details: + +* [Request Structure](#api-guide-resources-task-executions-delete-request-structure) + +* [Request Parameters](#api-guide-resources-task-executions-delete-request-parameters) + +* [Example Request](#api-guide-resources-task-executions-delete-example-request) + +* [Response Structure](#api-guide-resources-task-executions-delete-response-structure) + +Please refer to the following section in regards to [Deleting Task Execution Data](#api-guide-resources-task-executions-delete-multiple-and-task-data). + +##### Request Structure + +``` +DELETE /tasks/executions/1,2?action=CLEANUP,REMOVE_DATA HTTP/1.1 +Host: localhost:9393 +``` + +/tasks/executions/{ids} + +|Parameter| Description | +|---------|-----------------------------------------------------| +| `ids` |Providing 2 comma separated task execution id values.| + +| |You must provide task execution IDs that actually exist. Otherwise, a `404` (Not Found) HTTP status is returned.
In the case of submitting multiple task execution IDs, the invalidity of a single task execution ID causes the entire request to fail,
without performing any operation.| +|---|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +##### Request Parameters + +This endpoint supports one optional request parameter named **action**. It is an enumeration and supports the following +values: + +* CLEANUP + +* REMOVE\_DATA + +|Parameter| Description | +|---------|-----------------------------------------------------------| +|`action` |Using both actions CLEANUP and REMOVE\_DATA simultaneously.| + +##### Example Request + +``` +$ curl 'http://localhost:9393/tasks/executions/1,2?action=CLEANUP,REMOVE_DATA' -i -X DELETE +``` + +##### Response Structure + +``` +HTTP/1.1 200 OK +``` + +#### 44.11.7. Deleting Task Execution Data + +Not only can you clean up resources that were used to deploy tasks but you can also delete the data associated with +task executions from the underlying persistence store. Also, if a task execution is associated with one or +more batch job executions, these are removed as well. + +The following example illustrates how a request can be made using multiple task execution IDs and multiple actions: + +``` +$ curl 'http://localhost:9393/tasks/executions/1,2?action=CLEANUP,REMOVE_DATA' -i -X DELETE +``` + +/tasks/executions/{ids} + +|Parameter| Description | +|---------|-----------------------------------------------------| +| `ids` |Providing 2 comma separated task execution id values.| + +|Parameter| Description | +|---------|-----------------------------------------------------------| +|`action` |Using both actions CLEANUP and REMOVE\_DATA simultaneously.| + +| |When deleting data from the persistence store by using the `REMOVE_DATA` action parameter, you must provide
task execution IDs that represent parent task executions. When you provide child task executions (executed as part of a composed task),
a `400` (Bad Request) HTTP status is returned.| +|---|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +| |When deleting large number of task executions some database types limit the number of entries in the `IN` clause (the method Spring Cloud Data Flow uses to delete relationships for task executions).
Spring Cloud Data Flow supports the chunking of deletes for Sql Server (Maximum 2100 entries) and Oracle DBs (Maximum 1000 entries).
However, Spring Cloud Data Flow allows users to set their own chunking factor. To do this set the `spring.cloud.dataflow.task.executionDeleteChunkSize` property to the appropriate chunk size.
Default is `0` which means Spring Cloud Data Flow will not chunk the task execution deletes (except for Oracle and Sql Server databases).| +|---|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +#### 44.11.8. Task Execution Current Count + +The task executions current endpoint lets you retrieve the current number of running executions. +The following topics provide more details: + +* [Request Structure](#api-guide-resources-task-executions-current-count-request-structure) + +* [Request Parameters](#api-guide-resources-task-executions-current-count-request-parameters) + +* [Example Request](#api-guide-resources-task-executions-current-count-example-request) + +* [Response Structure](#api-guide-resources-task-executions-current-count-response-structure) + +##### Request Structure + +Unresolved directive in api-guide.adoc - include::/home/runner/work/spring-cloud-dataflow/spring-cloud-dataflow/spring-cloud-dataflow-docs/../spring-cloud-dataflow-classic-docs/target/generated-snippets/task-executions-documentation/launch-task-current-count/http-request.adoc[] + +##### Request Parameters + +There are no request parameters for this endpoint. + +##### Example Request + +Unresolved directive in api-guide.adoc - include::/home/runner/work/spring-cloud-dataflow/spring-cloud-dataflow/spring-cloud-dataflow-docs/../spring-cloud-dataflow-classic-docs/target/generated-snippets/task-executions-documentation/launch-task-current-count/curl-request.adoc[] + +##### Response Structure + +Unresolved directive in api-guide.adoc - include::/home/runner/work/spring-cloud-dataflow/spring-cloud-dataflow/spring-cloud-dataflow-docs/../spring-cloud-dataflow-classic-docs/target/generated-snippets/task-executions-documentation/launch-task-current-count/http-response.adoc[] + +### 44.12. Job Executions + +The job executions endpoint provides information about the job executions that are registered with the Spring Cloud Data Flow server. +The following topics provide more details: + +* [List All Job Executions](#api-guide-resources-job-executions-list) + +* [List All Job Executions Without Step Executions Included](#api-guide-resources-job-executions-thin-job-execution-list) + +* [List All Job Executions With a Specified Job Name](#api-guide-resources-job-executions-job-execution-info-only-list-by-name) + +* [List All Job Executions With a Specified Job Name Without Step Executions Included](#api-guide-resources-job-executions-thin-job-execution-info-only-list-by-name) + +* [List All Job Executions For A Specified Date Range Without Step Executions Included](#api-guide-resources-job-executions-thin-job-execution-info-only-list-by-date) + +* [List All Job Executions For A Specified Job Instance Id Without Step Executions Included](#api-guide-resources-job-executions-thin-job-execution-info-only-list-by-job-instance-id) + +* [List All Job Executions For A Specified Task Execution Id Without Step Executions Included](#api-guide-resources-job-executions-thin-job-execution-info-only-list-by-task-execution-id) + +* [Job Execution Detail](#api-guide-resources-job-executions-detail) + +* [Stop Job Execution](#api-guide-resources-job-executions-stop) + +* [Restart Job Execution](#api-guide-resources-job-executions-restart) + +#### 44.12.1. List All Job Executions + +The job executions endpoint lets you list all job executions. +The following topics provide more details: + +* [Request Structure](#api-guide-resources-job-executions-list-request-structure) + +* [Request Parameters](#api-guide-resources-job-executions-list-request-parameters) + +* [Example Request](#api-guide-resources-job-executions-list-example-request) + +* [Response Structure](#api-guide-resources-job-executions-list-response-structure) + +##### Request Structure + +``` +GET /jobs/executions?page=0&size=10 HTTP/1.1 +Host: localhost:9393 +``` + +##### Request Parameters + +|Parameter| Description | +|---------|-------------------------------------| +| `page` |The zero-based page number (optional)| +| `size` | The requested page size (optional) | + +##### Example Request + +``` +$ curl 'http://localhost:9393/jobs/executions?page=0&size=10' -i -X GET +``` + +##### Response Structure + +``` +HTTP/1.1 200 OK +Content-Type: application/json +Content-Length: 3066 + +{ + "_embedded" : { + "jobExecutionResourceList" : [ { + "executionId" : 2, + "stepExecutionCount" : 0, + "jobId" : 2, + "taskExecutionId" : 2, + "name" : "DOCJOB1", + "startDate" : "2022-01-18", + "startTime" : "18:54:42", + "duration" : "00:00:00", + "jobExecution" : { + "id" : 2, + "version" : 1, + "jobParameters" : { + "parameters" : { } + }, + "jobInstance" : { + "id" : 2, + "jobName" : "DOCJOB1", + "version" : null + }, + "stepExecutions" : [ ], + "status" : "STOPPED", + "startTime" : "2022-01-18T18:54:42.193+0000", + "createTime" : "2022-01-18T18:54:42.192+0000", + "endTime" : null, + "lastUpdated" : "2022-01-18T18:54:42.193+0000", + "exitStatus" : { + "exitCode" : "UNKNOWN", + "exitDescription" : "" + }, + "executionContext" : { + "dirty" : false, + "empty" : true, + "values" : [ ] + }, + "failureExceptions" : [ ], + "jobConfigurationName" : null, + "allFailureExceptions" : [ ] + }, + "jobParameters" : { }, + "jobParametersString" : "", + "restartable" : true, + "abandonable" : true, + "stoppable" : false, + "defined" : true, + "timeZone" : "UTC", + "_links" : { + "self" : { + "href" : "http://localhost:9393/jobs/executions/2" + } + } + }, { + "executionId" : 1, + "stepExecutionCount" : 0, + "jobId" : 1, + "taskExecutionId" : 1, + "name" : "DOCJOB", + "startDate" : "2022-01-18", + "startTime" : "18:54:42", + "duration" : "00:00:00", + "jobExecution" : { + "id" : 1, + "version" : 2, + "jobParameters" : { + "parameters" : { } + }, + "jobInstance" : { + "id" : 1, + "jobName" : "DOCJOB", + "version" : null + }, + "stepExecutions" : [ ], + "status" : "STOPPING", + "startTime" : "2022-01-18T18:54:42.189+0000", + "createTime" : "2022-01-18T18:54:42.188+0000", + "endTime" : null, + "lastUpdated" : "2022-01-18T18:54:42.260+0000", + "exitStatus" : { + "exitCode" : "UNKNOWN", + "exitDescription" : "" + }, + "executionContext" : { + "dirty" : false, + "empty" : true, + "values" : [ ] + }, + "failureExceptions" : [ ], + "jobConfigurationName" : null, + "allFailureExceptions" : [ ] + }, + "jobParameters" : { }, + "jobParametersString" : "", + "restartable" : false, + "abandonable" : true, + "stoppable" : false, + "defined" : false, + "timeZone" : "UTC", + "_links" : { + "self" : { + "href" : "http://localhost:9393/jobs/executions/1" + } + } + } ] + }, + "_links" : { + "self" : { + "href" : "http://localhost:9393/jobs/executions?page=0&size=10" + } + }, + "page" : { + "size" : 10, + "totalElements" : 2, + "totalPages" : 1, + "number" : 0 + } +} +``` + +#### 44.12.2. List All Job Executions Without Step Executions Included + +The job executions endpoint lets you list all job executions without step executions included. +The following topics provide more details: + +* [Request Structure](#api-guide-resources-job-executions-thin-job-execution-list-request-structure) + +* [Request Parameters](#api-guide-resources-job-executions-thin-job-execution-list-request-parameters) + +* [Example Request](#api-guide-resources-job-executions-thin-job-execution-list-example-request) + +* [Response Structure](#api-guide-resources-job-executions-thin-job-execution-list-response-structure) + +##### Request Structure + +``` +GET /jobs/thinexecutions?page=0&size=10 HTTP/1.1 +Host: localhost:9393 +``` + +##### Request Parameters + +|Parameter| Description | +|---------|-------------------------------------| +| `page` |The zero-based page number (optional)| +| `size` | The requested page size (optional) | + +##### Example Request + +``` +$ curl 'http://localhost:9393/jobs/thinexecutions?page=0&size=10' -i -X GET +``` + +##### Response Structure + +``` +HTTP/1.1 200 OK +Content-Type: application/json +Content-Length: 1604 + +{ + "_embedded" : { + "jobExecutionThinResourceList" : [ { + "executionId" : 2, + "stepExecutionCount" : 0, + "jobId" : 2, + "taskExecutionId" : 2, + "instanceId" : 2, + "name" : "DOCJOB1", + "startDate" : "2022-01-18", + "startTime" : "18:54:42", + "startDateTime" : "2022-01-18T18:54:42.193+0000", + "duration" : "00:00:00", + "jobParameters" : { }, + "jobParametersString" : "", + "restartable" : true, + "abandonable" : true, + "stoppable" : false, + "defined" : true, + "timeZone" : "UTC", + "status" : "STOPPED", + "_links" : { + "self" : { + "href" : "http://localhost:9393/jobs/thinexecutions/2" + } + } + }, { + "executionId" : 1, + "stepExecutionCount" : 0, + "jobId" : 1, + "taskExecutionId" : 1, + "instanceId" : 1, + "name" : "DOCJOB", + "startDate" : "2022-01-18", + "startTime" : "18:54:42", + "startDateTime" : "2022-01-18T18:54:42.189+0000", + "duration" : "00:00:00", + "jobParameters" : { }, + "jobParametersString" : "", + "restartable" : false, + "abandonable" : false, + "stoppable" : true, + "defined" : false, + "timeZone" : "UTC", + "status" : "STARTED", + "_links" : { + "self" : { + "href" : "http://localhost:9393/jobs/thinexecutions/1" + } + } + } ] + }, + "_links" : { + "self" : { + "href" : "http://localhost:9393/jobs/thinexecutions?page=0&size=10" + } + }, + "page" : { + "size" : 10, + "totalElements" : 2, + "totalPages" : 1, + "number" : 0 + } +} +``` + +#### 44.12.3. List All Job Executions With a Specified Job Name + +The job executions endpoint lets you list all job executions. +The following topics provide more details: + +* [Request Structure](#api-guide-resources-job-executions-list-by-name-request-structure) + +* [Request Parameters](#api-guide-resources-job-executions-list-by-name-request-parameters) + +* [Example Request](#api-guide-resources-job-executions-list-by-name-example-request) + +* [Response Structure](#api-guide-resources-job-executions-list-by-name-response-structure) + +##### Request Structure + +``` +GET /jobs/executions?name=DOCJOB&page=0&size=10 HTTP/1.1 +Host: localhost:9393 +``` + +##### Request Parameters + +|Parameter| Description | +|---------|------------------------------------------| +| `page` | The zero-based page number (optional) | +| `size` | The requested page size (optional) | +| `name` |The name associated with the job execution| + +##### Example Request + +``` +$ curl 'http://localhost:9393/jobs/executions?name=DOCJOB&page=0&size=10' -i -X GET +``` + +##### Response Structure + +``` +HTTP/1.1 200 OK +Content-Type: application/json +Content-Length: 1669 + +{ + "_embedded" : { + "jobExecutionResourceList" : [ { + "executionId" : 1, + "stepExecutionCount" : 0, + "jobId" : 1, + "taskExecutionId" : 1, + "name" : "DOCJOB", + "startDate" : "2022-01-18", + "startTime" : "18:54:42", + "duration" : "00:00:00", + "jobExecution" : { + "id" : 1, + "version" : 2, + "jobParameters" : { + "parameters" : { } + }, + "jobInstance" : { + "id" : 1, + "jobName" : "DOCJOB", + "version" : null + }, + "stepExecutions" : [ ], + "status" : "STOPPING", + "startTime" : "2022-01-18T18:54:42.189+0000", + "createTime" : "2022-01-18T18:54:42.188+0000", + "endTime" : null, + "lastUpdated" : "2022-01-18T18:54:42.260+0000", + "exitStatus" : { + "exitCode" : "UNKNOWN", + "exitDescription" : "" + }, + "executionContext" : { + "dirty" : false, + "empty" : true, + "values" : [ ] + }, + "failureExceptions" : [ ], + "jobConfigurationName" : null, + "allFailureExceptions" : [ ] + }, + "jobParameters" : { }, + "jobParametersString" : "", + "restartable" : false, + "abandonable" : true, + "stoppable" : false, + "defined" : false, + "timeZone" : "UTC", + "_links" : { + "self" : { + "href" : "http://localhost:9393/jobs/executions/1" + } + } + } ] + }, + "_links" : { + "self" : { + "href" : "http://localhost:9393/jobs/executions?page=0&size=10" + } + }, + "page" : { + "size" : 10, + "totalElements" : 1, + "totalPages" : 1, + "number" : 0 + } +} +``` + +#### 44.12.4. List All Job Executions With a Specified Job Name Without Step Executions Included + +The job executions endpoint lets you list all job executions. +The following topics provide more details: + +* [Request Structure](#api-guide-resources-job-executions-thin-list-by-name-request-structure) + +* [Request Parameters](#api-guide-resources-job-executions-thin-list-by-name-request-parameters) + +* [Example Request](#api-guide-resources-job-executions-thin-list-by-name-example-request) + +* [Response Structure](#api-guide-resources-job-executions-thin-list-by-name-response-structure) + +##### Request Structure + +``` +GET /jobs/thinexecutions?name=DOCJOB&page=0&size=10 HTTP/1.1 +Host: localhost:9393 +``` + +##### Request Parameters + +|Parameter| Description | +|---------|------------------------------------------| +| `page` | The zero-based page number (optional) | +| `size` | The requested page size (optional) | +| `name` |The name associated with the job execution| + +##### Example Request + +``` +$ curl 'http://localhost:9393/jobs/thinexecutions?name=DOCJOB&page=0&size=10' -i -X GET +``` + +##### Response Structure + +``` +HTTP/1.1 200 OK +Content-Type: application/json +Content-Length: 943 + +{ + "_embedded" : { + "jobExecutionThinResourceList" : [ { + "executionId" : 1, + "stepExecutionCount" : 0, + "jobId" : 1, + "taskExecutionId" : 1, + "instanceId" : 1, + "name" : "DOCJOB", + "startDate" : "2022-01-18", + "startTime" : "18:54:42", + "startDateTime" : "2022-01-18T18:54:42.189+0000", + "duration" : "00:00:00", + "jobParameters" : { }, + "jobParametersString" : "", + "restartable" : false, + "abandonable" : true, + "stoppable" : false, + "defined" : false, + "timeZone" : "UTC", + "status" : "STOPPING", + "_links" : { + "self" : { + "href" : "http://localhost:9393/jobs/thinexecutions/1" + } + } + } ] + }, + "_links" : { + "self" : { + "href" : "http://localhost:9393/jobs/thinexecutions?page=0&size=10" + } + }, + "page" : { + "size" : 10, + "totalElements" : 1, + "totalPages" : 1, + "number" : 0 + } +} +``` + +#### 44.12.5. List All Job Executions For A Specified Date Range Without Step Executions Included + +The job executions endpoint lets you list all job executions. +The following topics provide more details: + +* [Request Structure](#api-guide-resources-job-executions-thin-list-by-date-request-structure) + +* [Request Parameters](#api-guide-resources-job-executions-thin-list-by-date-request-parameters) + +* [Example Request](#api-guide-resources-job-executions-thin-list-by-date-example-request) + +* [Response Structure](#api-guide-resources-job-executions-thin-list-by-date-response-structure) + +##### Request Structure + +``` +GET /jobs/thinexecutions?page=0&size=10&fromDate=2000-09-24T17%3A00%3A45%2C000&toDate=2050-09-24T18%3A00%3A45%2C000 HTTP/1.1 +Host: localhost:9393 +``` + +##### Request Parameters + +|Parameter | Description | +|----------|----------------------------------------------------------------------------| +| `page` | The zero-based page number (optional) | +| `size` | The requested page size (optional) | +|`fromDate`|Filter result from a starting date in the format 'yyyy-MM-dd’T’HH:mm:ss,SSS'| +| `toDate` |Filter result up to the `to` date in the format 'yyyy-MM-dd’T’HH:mm:ss,SSS' | + +##### Example Request + +``` +$ curl 'http://localhost:9393/jobs/thinexecutions?page=0&size=10&fromDate=2000-09-24T17%3A00%3A45%2C000&toDate=2050-09-24T18%3A00%3A45%2C000' -i -X GET +``` + +##### Response Structure + +``` +HTTP/1.1 200 OK +Content-Type: application/json +Content-Length: 1605 + +{ + "_embedded" : { + "jobExecutionThinResourceList" : [ { + "executionId" : 2, + "stepExecutionCount" : 0, + "jobId" : 2, + "taskExecutionId" : 2, + "instanceId" : 2, + "name" : "DOCJOB1", + "startDate" : "2022-01-18", + "startTime" : "18:54:42", + "startDateTime" : "2022-01-18T18:54:42.193+0000", + "duration" : "00:00:00", + "jobParameters" : { }, + "jobParametersString" : "", + "restartable" : true, + "abandonable" : true, + "stoppable" : false, + "defined" : true, + "timeZone" : "UTC", + "status" : "STOPPED", + "_links" : { + "self" : { + "href" : "http://localhost:9393/jobs/thinexecutions/2" + } + } + }, { + "executionId" : 1, + "stepExecutionCount" : 0, + "jobId" : 1, + "taskExecutionId" : 1, + "instanceId" : 1, + "name" : "DOCJOB", + "startDate" : "2022-01-18", + "startTime" : "18:54:42", + "startDateTime" : "2022-01-18T18:54:42.189+0000", + "duration" : "00:00:00", + "jobParameters" : { }, + "jobParametersString" : "", + "restartable" : false, + "abandonable" : true, + "stoppable" : false, + "defined" : false, + "timeZone" : "UTC", + "status" : "STOPPING", + "_links" : { + "self" : { + "href" : "http://localhost:9393/jobs/thinexecutions/1" + } + } + } ] + }, + "_links" : { + "self" : { + "href" : "http://localhost:9393/jobs/thinexecutions?page=0&size=10" + } + }, + "page" : { + "size" : 10, + "totalElements" : 2, + "totalPages" : 1, + "number" : 0 + } +} +``` + +#### 44.12.6. List All Job Executions For A Specified Job Instance Id Without Step Executions Included + +The job executions endpoint lets you list all job executions. +The following topics provide more details: + +* [Request Structure](#api-guide-resources-job-executions-thin-list-by-job-instance-id-request-structure) + +* [Request Parameters](#api-guide-resources-job-executions-thin-list-by-job-instance-id-request-parameters) + +* [Example Request](#api-guide-resources-job-executions-thin-list-by-job-instance-id-example-request) + +* [Response Structure](#api-guide-resources-job-executions-thin-list-by-job-instance-id-response-structure) + +##### Request Structure + +``` +GET /jobs/thinexecutions?page=0&size=10&jobInstanceId=1 HTTP/1.1 +Host: localhost:9393 +``` + +##### Request Parameters + +| Parameter | Description | +|---------------|-------------------------------------| +| `page` |The zero-based page number (optional)| +| `size` | The requested page size (optional) | +|`jobInstanceId`|Filter result by the job instance id | + +##### Example Request + +``` +$ curl 'http://localhost:9393/jobs/thinexecutions?page=0&size=10&jobInstanceId=1' -i -X GET +``` + +##### Response Structure + +``` +HTTP/1.1 200 OK +Content-Type: application/json +Content-Length: 943 + +{ + "_embedded" : { + "jobExecutionThinResourceList" : [ { + "executionId" : 1, + "stepExecutionCount" : 0, + "jobId" : 1, + "taskExecutionId" : 1, + "instanceId" : 1, + "name" : "DOCJOB", + "startDate" : "2022-01-18", + "startTime" : "18:54:42", + "startDateTime" : "2022-01-18T18:54:42.189+0000", + "duration" : "00:00:00", + "jobParameters" : { }, + "jobParametersString" : "", + "restartable" : false, + "abandonable" : true, + "stoppable" : false, + "defined" : false, + "timeZone" : "UTC", + "status" : "STOPPING", + "_links" : { + "self" : { + "href" : "http://localhost:9393/jobs/thinexecutions/1" + } + } + } ] + }, + "_links" : { + "self" : { + "href" : "http://localhost:9393/jobs/thinexecutions?page=0&size=10" + } + }, + "page" : { + "size" : 10, + "totalElements" : 1, + "totalPages" : 1, + "number" : 0 + } +} +``` + +#### 44.12.7. List All Job Executions For A Specified Task Execution Id Without Step Executions Included + +The job executions endpoint lets you list all job executions. +The following topics provide more details: + +* [Request Structure](#api-guide-resources-job-executions-thin-list-by-task-execution-id-request-structure) + +* [Request Parameters](#api-guide-resources-job-executions-thin-list-by-task-execution-id-request-parameters) + +* [Example Request](#api-guide-resources-job-executions-thin-list-by-task-execution-id-example-request) + +* [Response Structure](#api-guide-resources-job-executions-thin-list-by-task-execution-id-response-structure) + +##### Request Structure + +``` +GET /jobs/thinexecutions?page=0&size=10&taskExecutionId=1 HTTP/1.1 +Host: localhost:9393 +``` + +##### Request Parameters + +| Parameter | Description | +|-----------------|--------------------------------------| +| `page` |The zero-based page number (optional) | +| `size` | The requested page size (optional) | +|`taskExecutionId`|Filter result by the task execution id| + +##### Example Request + +``` +$ curl 'http://localhost:9393/jobs/thinexecutions?page=0&size=10&taskExecutionId=1' -i -X GET +``` + +##### Response Structure + +``` +HTTP/1.1 200 OK +Content-Type: application/json +Content-Length: 943 + +{ + "_embedded" : { + "jobExecutionThinResourceList" : [ { + "executionId" : 1, + "stepExecutionCount" : 0, + "jobId" : 1, + "taskExecutionId" : 1, + "instanceId" : 1, + "name" : "DOCJOB", + "startDate" : "2022-01-18", + "startTime" : "18:54:42", + "startDateTime" : "2022-01-18T18:54:42.189+0000", + "duration" : "00:00:00", + "jobParameters" : { }, + "jobParametersString" : "", + "restartable" : false, + "abandonable" : true, + "stoppable" : false, + "defined" : false, + "timeZone" : "UTC", + "status" : "STOPPING", + "_links" : { + "self" : { + "href" : "http://localhost:9393/jobs/thinexecutions/1" + } + } + } ] + }, + "_links" : { + "self" : { + "href" : "http://localhost:9393/jobs/thinexecutions?page=0&size=10" + } + }, + "page" : { + "size" : 10, + "totalElements" : 1, + "totalPages" : 1, + "number" : 0 + } +} +``` + +#### 44.12.8. Job Execution Detail + +The job executions endpoint lets you get the details about a job execution. +The following topics provide more details: + +* [Request Structure](#api-guide-resources-job-executions-detail-request-structure) + +* [Request Parameters](#api-guide-resources-job-executions-detail-request-parameters) + +* [Example Request](#api-guide-resources-job-executions-detail-example-request) + +* [Response Structure](#api-guide-resources-job-executions-detail-response-structure) + +##### Request Structure + +``` +GET /jobs/executions/2 HTTP/1.1 +Host: localhost:9393 +``` + +/jobs/executions/{id} + +|Parameter| Description | +|---------|----------------------------------------------| +| `id` |The id of an existing job execution (required)| + +##### Request Parameters + +There are no request parameter for this endpoint. + +##### Example Request + +``` +$ curl 'http://localhost:9393/jobs/executions/2' -i -X GET +``` + +##### Response Structure + +``` +HTTP/1.1 200 OK +Content-Type: application/json +Content-Length: 1188 + +{ + "executionId" : 2, + "stepExecutionCount" : 0, + "jobId" : 2, + "taskExecutionId" : 2, + "name" : "DOCJOB1", + "startDate" : "2022-01-18", + "startTime" : "18:54:42", + "duration" : "00:00:00", + "jobExecution" : { + "id" : 2, + "version" : 1, + "jobParameters" : { + "parameters" : { } + }, + "jobInstance" : { + "id" : 2, + "jobName" : "DOCJOB1", + "version" : 0 + }, + "stepExecutions" : [ ], + "status" : "STOPPED", + "startTime" : "2022-01-18T18:54:42.193+0000", + "createTime" : "2022-01-18T18:54:42.192+0000", + "endTime" : null, + "lastUpdated" : "2022-01-18T18:54:42.193+0000", + "exitStatus" : { + "exitCode" : "UNKNOWN", + "exitDescription" : "" + }, + "executionContext" : { + "dirty" : false, + "empty" : true, + "values" : [ ] + }, + "failureExceptions" : [ ], + "jobConfigurationName" : null, + "allFailureExceptions" : [ ] + }, + "jobParameters" : { }, + "jobParametersString" : "", + "restartable" : true, + "abandonable" : true, + "stoppable" : false, + "defined" : true, + "timeZone" : "UTC", + "_links" : { + "self" : { + "href" : "http://localhost:9393/jobs/executions/2" + } + } +} +``` + +#### 44.12.9. Stop Job Execution + +The job executions endpoint lets you stop a job execution. +The following topics provide more details: + +* [Request structure](#api-guide-resources-job-executions-stop-request-structure) + +* [Request parameters](#api-guide-resources-job-executions-stop-request-parameters) + +* [Example request](#api-guide-resources-job-executions-stop-example-request) + +* [Response structure](#api-guide-resources-job-executions-stop-response-structure) + +##### Request structure + +``` +PUT /jobs/executions/1 HTTP/1.1 +Accept: application/json +Host: localhost:9393 +Content-Type: application/x-www-form-urlencoded + +stop=true +``` + +/jobs/executions/{id} + +|Parameter| Description | +|---------|----------------------------------------------| +| `id` |The id of an existing job execution (required)| + +##### Request parameters + +|Parameter| Description | +|---------|-------------------------------------------| +| `stop` |Sends signal to stop the job if set to true| + +##### Example request + +``` +$ curl 'http://localhost:9393/jobs/executions/1' -i -X PUT \ + -H 'Accept: application/json' \ + -d 'stop=true' +``` + +##### Response structure + +``` +HTTP/1.1 200 OK +``` + +#### 44.12.10. Restart Job Execution + +The job executions endpoint lets you restart a job execution. +The following topics provide more details: + +* [Request Structure](#api-guide-resources-job-executions-restart-request-structure) + +* [Request Parameters](#api-guide-resources-job-executions-restart-request-parameters) + +* [Example Request](#api-guide-resources-job-executions-restart-example-request) + +* [Response Structure](#api-guide-resources-job-executions-restart-response-structure) + +##### Request Structure + +``` +PUT /jobs/executions/2 HTTP/1.1 +Accept: application/json +Host: localhost:9393 +Content-Type: application/x-www-form-urlencoded + +restart=true +``` + +/jobs/executions/{id} + +|Parameter| Description | +|---------|----------------------------------------------| +| `id` |The id of an existing job execution (required)| + +##### Request Parameters + +|Parameter| Description | +|---------|----------------------------------------------| +|`restart`|Sends signal to restart the job if set to true| + +##### Example Request + +``` +$ curl 'http://localhost:9393/jobs/executions/2' -i -X PUT \ + -H 'Accept: application/json' \ + -d 'restart=true' +``` + +##### Response Structure + +``` +HTTP/1.1 200 OK +``` + +### 44.13. Job Instances + +The job instances endpoint provides information about the job instances that are registered with the Spring Cloud Data Flow server. +The following topics provide more details: + +* [List All Job Instances](#api-guide-resources-job-instances-list) + +* [Job Instance Detail](#api-guide-resources-job-instances-detail) + +#### 44.13.1. List All Job Instances + +The job instances endpoint lets you list all job instances. +The following topics provide more details: + +* [Request Structure](#api-guide-resources-job-instances-list-request-structure) + +* [Request Parameters](#api-guide-resources-job-instances-list-request-parameters) + +* [Example Request](#api-guide-resources-job-instances-list-example-request) + +* [Response Structure](#api-guide-resources-job-instances-list-response-structure) + +##### Request Structure + +``` +GET /jobs/instances?name=DOCJOB&page=0&size=10 HTTP/1.1 +Host: localhost:9393 +``` + +##### Request Parameters + +|Parameter| Description | +|---------|-----------------------------------------| +| `page` | The zero-based page number (optional) | +| `size` | The requested page size (optional) | +| `name` |The name associated with the job instance| + +##### Example Request + +``` +$ curl 'http://localhost:9393/jobs/instances?name=DOCJOB&page=0&size=10' -i -X GET +``` + +##### Response Structure + +``` +HTTP/1.1 200 OK +Content-Type: application/hal+json +Content-Length: 1845 + +{ + "_embedded" : { + "jobInstanceResourceList" : [ { + "jobName" : "DOCJOB", + "jobInstanceId" : 1, + "jobExecutions" : [ { + "executionId" : 1, + "stepExecutionCount" : 0, + "jobId" : 1, + "taskExecutionId" : 1, + "name" : "DOCJOB", + "startDate" : "2022-01-18", + "startTime" : "18:54:40", + "duration" : "00:00:00", + "jobExecution" : { + "id" : 1, + "version" : 1, + "jobParameters" : { + "parameters" : { } + }, + "jobInstance" : { + "id" : 1, + "jobName" : "DOCJOB", + "version" : 0 + }, + "stepExecutions" : [ ], + "status" : "STARTED", + "startTime" : "2022-01-18T18:54:40.048+0000", + "createTime" : "2022-01-18T18:54:40.045+0000", + "endTime" : null, + "lastUpdated" : "2022-01-18T18:54:40.048+0000", + "exitStatus" : { + "exitCode" : "UNKNOWN", + "exitDescription" : "" + }, + "executionContext" : { + "dirty" : false, + "empty" : true, + "values" : [ ] + }, + "failureExceptions" : [ ], + "jobConfigurationName" : null, + "allFailureExceptions" : [ ] + }, + "jobParameters" : { }, + "jobParametersString" : "", + "restartable" : false, + "abandonable" : false, + "stoppable" : true, + "defined" : false, + "timeZone" : "UTC" + } ], + "_links" : { + "self" : { + "href" : "http://localhost:9393/jobs/instances/1" + } + } + } ] + }, + "_links" : { + "self" : { + "href" : "http://localhost:9393/jobs/instances?page=0&size=10" + } + }, + "page" : { + "size" : 10, + "totalElements" : 1, + "totalPages" : 1, + "number" : 0 + } +} +``` + +#### 44.13.2. Job Instance Detail + +The job instances endpoint lets you list all job instances. +The following topics provide more details: + +* [Request Structure](#api-guide-resources-job-instances-detail-request-structure) + +* [Request Parameters](#api-guide-resources-job-instances-detail-request-parameters) + +* [Example Request](#api-guide-resources-job-instances-detail-example-request) + +* [Response Structure](#api-guide-resources-job-instances-detail-response-structure) + +##### Request Structure + +``` +GET /jobs/instances/1 HTTP/1.1 +Host: localhost:9393 +``` + +/jobs/instances/{id} + +|Parameter| Description | +|---------|---------------------------------------------| +| `id` |The id of an existing job instance (required)| + +##### Request Parameters + +There are no request parameters for this endpoint. + +##### Example Request + +``` +$ curl 'http://localhost:9393/jobs/instances/1' -i -X GET +``` + +##### Response Structure + +``` +HTTP/1.1 200 OK +Content-Type: application/hal+json +Content-Length: 1354 + +{ + "jobName" : "DOCJOB", + "jobInstanceId" : 1, + "jobExecutions" : [ { + "executionId" : 1, + "stepExecutionCount" : 0, + "jobId" : 1, + "taskExecutionId" : 1, + "name" : "DOCJOB", + "startDate" : "2022-01-18", + "startTime" : "18:54:40", + "duration" : "00:00:00", + "jobExecution" : { + "id" : 1, + "version" : 1, + "jobParameters" : { + "parameters" : { } + }, + "jobInstance" : { + "id" : 1, + "jobName" : "DOCJOB", + "version" : 0 + }, + "stepExecutions" : [ ], + "status" : "STARTED", + "startTime" : "2022-01-18T18:54:40.048+0000", + "createTime" : "2022-01-18T18:54:40.045+0000", + "endTime" : null, + "lastUpdated" : "2022-01-18T18:54:40.048+0000", + "exitStatus" : { + "exitCode" : "UNKNOWN", + "exitDescription" : "" + }, + "executionContext" : { + "dirty" : false, + "empty" : true, + "values" : [ ] + }, + "failureExceptions" : [ ], + "jobConfigurationName" : null, + "allFailureExceptions" : [ ] + }, + "jobParameters" : { }, + "jobParametersString" : "", + "restartable" : false, + "abandonable" : false, + "stoppable" : true, + "defined" : false, + "timeZone" : "UTC" + } ], + "_links" : { + "self" : { + "href" : "http://localhost:9393/jobs/instances/1" + } + } +} +``` + +### 44.14. Job Step Executions + +The job step executions endpoint provides information about the job step executions that are registered with the Spring Cloud Data Flow server. +The following topics provide more details: + +* [List All Step Executions For a Job Execution](#api-guide-resources-job-step-executions-list) + +* [Job Step Execution Detail](#api-guide-resources-job-step-execution-detail) + +* [Job Step Execution Progress](#api-guide-resources-job-step-execution-progress) + +#### 44.14.1. List All Step Executions For a Job Execution + +The job step executions endpoint lets you list all job step executions. +The following topics provide more details: + +* [Request Structure](#api-guide-resources-job-step-executions-list-request-structure) + +* [Request Parameters](#api-guide-resources-job-step-executions-list-request-parameters) + +* [Example Request](#api-guide-resources-job-step-executions-list-example-request) + +* [Response Structure](#api-guide-resources-job-step-executions-list-response-structure) + +##### Request Structure + +``` +GET /jobs/executions/1/steps?page=0&size=10 HTTP/1.1 +Host: localhost:9393 +``` + +##### Request Parameters + +|Parameter| Description | +|---------|-------------------------------------| +| `page` |The zero-based page number (optional)| +| `size` | The requested page size (optional) | + +##### Example Request + +``` +$ curl 'http://localhost:9393/jobs/executions/1/steps?page=0&size=10' -i -X GET +``` + +##### Response Structure + +``` +HTTP/1.1 200 OK +Content-Type: application/hal+json +Content-Length: 1623 + +{ + "_embedded" : { + "stepExecutionResourceList" : [ { + "jobExecutionId" : 1, + "stepExecution" : { + "stepName" : "DOCJOB_STEP", + "id" : 1, + "version" : 0, + "status" : "STARTING", + "readCount" : 0, + "writeCount" : 0, + "commitCount" : 0, + "rollbackCount" : 0, + "readSkipCount" : 0, + "processSkipCount" : 0, + "writeSkipCount" : 0, + "startTime" : "2022-01-18T18:52:35.974+0000", + "endTime" : null, + "lastUpdated" : "2022-01-18T18:52:35.974+0000", + "executionContext" : { + "dirty" : false, + "empty" : true, + "values" : [ ] + }, + "exitStatus" : { + "exitCode" : "EXECUTING", + "exitDescription" : "" + }, + "terminateOnly" : false, + "filterCount" : 0, + "failureExceptions" : [ ], + "jobParameters" : { + "parameters" : { } + }, + "jobExecutionId" : 1, + "skipCount" : 0, + "summary" : "StepExecution: id=1, version=0, name=DOCJOB_STEP, status=STARTING, exitStatus=EXECUTING, readCount=0, filterCount=0, writeCount=0 readSkipCount=0, writeSkipCount=0, processSkipCount=0, commitCount=0, rollbackCount=0" + }, + "stepType" : "", + "_links" : { + "self" : { + "href" : "http://localhost:9393/jobs/executions/1/steps/1" + } + } + } ] + }, + "_links" : { + "self" : { + "href" : "http://localhost:9393/jobs/executions/1/steps?page=0&size=10" + } + }, + "page" : { + "size" : 10, + "totalElements" : 1, + "totalPages" : 1, + "number" : 0 + } +} +``` + +#### 44.14.2. Job Step Execution Detail + +The job step executions endpoint lets you get details about a job step execution. +The following topics provide more details: + +* [Request Structure](#api-guide-resources-job-step-execution-detail-request-structure) + +* [Request Parameters](#api-guide-resources-job-step-execution-detail-request-parameters) + +* [Example Request](#api-guide-resources-job-step-execution-detail-example-request) + +* [Response Structure](#api-guide-resources-job-step-execution-detail-response-structure) + +##### Request Structure + +``` +GET /jobs/executions/1/steps/1 HTTP/1.1 +Host: localhost:9393 +``` + +/jobs/executions/{id}/steps/{stepid} + +|Parameter| Description | +|---------|----------------------------------------------------------------------------| +| `id` | The id of an existing job execution (required) | +|`stepid` |The id of an existing step execution for a specific job execution (required)| + +##### Request Parameters + +There are no request parameters for this endpoint. + +##### Example Request + +``` +$ curl 'http://localhost:9393/jobs/executions/1/steps/1' -i -X GET +``` + +##### Response Structure + +``` +HTTP/1.1 200 OK +Content-Type: application/hal+json +Content-Length: 1173 + +{ + "jobExecutionId" : 1, + "stepExecution" : { + "stepName" : "DOCJOB_STEP", + "id" : 1, + "version" : 0, + "status" : "STARTING", + "readCount" : 0, + "writeCount" : 0, + "commitCount" : 0, + "rollbackCount" : 0, + "readSkipCount" : 0, + "processSkipCount" : 0, + "writeSkipCount" : 0, + "startTime" : "2022-01-18T18:52:35.974+0000", + "endTime" : null, + "lastUpdated" : "2022-01-18T18:52:35.974+0000", + "executionContext" : { + "dirty" : false, + "empty" : true, + "values" : [ ] + }, + "exitStatus" : { + "exitCode" : "EXECUTING", + "exitDescription" : "" + }, + "terminateOnly" : false, + "filterCount" : 0, + "failureExceptions" : [ ], + "jobParameters" : { + "parameters" : { } + }, + "jobExecutionId" : 1, + "skipCount" : 0, + "summary" : "StepExecution: id=1, version=0, name=DOCJOB_STEP, status=STARTING, exitStatus=EXECUTING, readCount=0, filterCount=0, writeCount=0 readSkipCount=0, writeSkipCount=0, processSkipCount=0, commitCount=0, rollbackCount=0" + }, + "stepType" : "", + "_links" : { + "self" : { + "href" : "http://localhost:9393/jobs/executions/1/steps/1" + } + } +} +``` + +#### 44.14.3. Job Step Execution Progress + +The job step executions endpoint lets you get details about the progress of a job step execution. +The following topics provide more details: + +* [Request Structure](#api-guide-resources-job-step-execution-progress-request-structure) + +* [Request Parameters](#api-guide-resources-job-step-execution-progress-request-parameters) + +* [Example Request](#api-guide-resources-job-step-execution-progress-example-request) + +* [Response Structure](#api-guide-resources-job-step-execution-progress-response-structure) + +##### Request Structure + +``` +GET /jobs/executions/1/steps/1/progress HTTP/1.1 +Host: localhost:9393 +``` + +/jobs/executions/{id}/steps/{stepid}/progress + +|Parameter| Description | +|---------|----------------------------------------------------------------------------| +| `id` | The id of an existing job execution (required) | +|`stepid` |The id of an existing step execution for a specific job execution (required)| + +##### Request Parameters + +There are no request parameters for this endpoint. + +##### Example Request + +``` +$ curl 'http://localhost:9393/jobs/executions/1/steps/1/progress' -i -X GET +``` + +##### Response Structure + +``` +HTTP/1.1 200 OK +Content-Type: application/hal+json +Content-Length: 2676 + +{ + "stepExecution" : { + "stepName" : "DOCJOB_STEP", + "id" : 1, + "version" : 0, + "status" : "STARTING", + "readCount" : 0, + "writeCount" : 0, + "commitCount" : 0, + "rollbackCount" : 0, + "readSkipCount" : 0, + "processSkipCount" : 0, + "writeSkipCount" : 0, + "startTime" : "2022-01-18T18:52:35.974+0000", + "endTime" : null, + "lastUpdated" : "2022-01-18T18:52:35.974+0000", + "executionContext" : { + "dirty" : false, + "empty" : true, + "values" : [ ] + }, + "exitStatus" : { + "exitCode" : "EXECUTING", + "exitDescription" : "" + }, + "terminateOnly" : false, + "filterCount" : 0, + "failureExceptions" : [ ], + "jobParameters" : { + "parameters" : { } + }, + "jobExecutionId" : 1, + "skipCount" : 0, + "summary" : "StepExecution: id=1, version=0, name=DOCJOB_STEP, status=STARTING, exitStatus=EXECUTING, readCount=0, filterCount=0, writeCount=0 readSkipCount=0, writeSkipCount=0, processSkipCount=0, commitCount=0, rollbackCount=0" + }, + "stepExecutionHistory" : { + "stepName" : "DOCJOB_STEP", + "count" : 0, + "commitCount" : { + "count" : 0, + "min" : 0.0, + "max" : 0.0, + "standardDeviation" : 0.0, + "mean" : 0.0 + }, + "rollbackCount" : { + "count" : 0, + "min" : 0.0, + "max" : 0.0, + "standardDeviation" : 0.0, + "mean" : 0.0 + }, + "readCount" : { + "count" : 0, + "min" : 0.0, + "max" : 0.0, + "standardDeviation" : 0.0, + "mean" : 0.0 + }, + "writeCount" : { + "count" : 0, + "min" : 0.0, + "max" : 0.0, + "standardDeviation" : 0.0, + "mean" : 0.0 + }, + "filterCount" : { + "count" : 0, + "min" : 0.0, + "max" : 0.0, + "standardDeviation" : 0.0, + "mean" : 0.0 + }, + "readSkipCount" : { + "count" : 0, + "min" : 0.0, + "max" : 0.0, + "standardDeviation" : 0.0, + "mean" : 0.0 + }, + "writeSkipCount" : { + "count" : 0, + "min" : 0.0, + "max" : 0.0, + "standardDeviation" : 0.0, + "mean" : 0.0 + }, + "processSkipCount" : { + "count" : 0, + "min" : 0.0, + "max" : 0.0, + "standardDeviation" : 0.0, + "mean" : 0.0 + }, + "duration" : { + "count" : 0, + "min" : 0.0, + "max" : 0.0, + "standardDeviation" : 0.0, + "mean" : 0.0 + }, + "durationPerRead" : { + "count" : 0, + "min" : 0.0, + "max" : 0.0, + "standardDeviation" : 0.0, + "mean" : 0.0 + } + }, + "percentageComplete" : 0.5, + "finished" : false, + "duration" : 151.0, + "_links" : { + "self" : { + "href" : "http://localhost:9393/jobs/executions/1/steps/1" + } + } +} +``` + +### 44.15. Runtime Information about Applications + +You can get information about running apps known to the system, either globally or individually. +The following topics provide more details: + +* [Listing All Applications at Runtime](#api-guide-resources-runtime-information-applications-listing-all) + +* [Querying All Instances of a Single App](#api-guide-resources-runtime-information-applications-querying-all-instances-single-app) + +* [Querying a Single Instance of a Single App](#api-guide-resources-runtime-information-applications-querying-single-instance-single-app) + +#### 44.15.1. Listing All Applications at Runtime + +To retrieve information about all instances of all apps, query the `/runtime/apps` endpoint by using `GET`. +The following topics provide more details: + +* [Request Structure](#api-guide-resources-runtime-information-applications-listing-all-request-structure) + +* [Example Request](#api-guide-resources-runtime-information-applications-listing-all-example-request) + +* [Response Structure](#api-guide-resources-runtime-information-applications-listing-all-response-structure) + +##### Request Structure + +``` +GET /runtime/apps HTTP/1.1 +Accept: application/json +Host: localhost:9393 +``` + +##### Example Request + +``` +$ curl 'http://localhost:9393/runtime/apps' -i -X GET \ + -H 'Accept: application/json' +``` + +##### Response Structure + +``` +HTTP/1.1 200 OK +Content-Type: application/json +Content-Length: 209 + +{ + "_links" : { + "self" : { + "href" : "http://localhost:9393/runtime/apps?page=0&size=20" + } + }, + "page" : { + "size" : 20, + "totalElements" : 0, + "totalPages" : 0, + "number" : 0 + } +} +``` + +#### 44.15.2. Querying All Instances of a Single App + +To retrieve information about all instances of a particular app, query the `/runtime/apps//instances` endpoint by using `GET`. +The following topics provide more details: + +* [Request Structure](#api-guide-resources-runtime-information-applications-querying-all-instances-single-app-request-structure) + +* [Example Request](#api-guide-resources-runtime-information-applications-querying-all-instances-single-app-example-request) + +* [Response Structure](#api-guide-resources-runtime-information-applications-querying-all-instances-single-app-response-structure) + +##### Request Structure + +``` +GET /runtime/apps HTTP/1.1 +Accept: application/json +Host: localhost:9393 +``` + +##### Example Request + +``` +$ curl 'http://localhost:9393/runtime/apps' -i -X GET \ + -H 'Accept: application/json' +``` + +##### Response Structure + +``` +HTTP/1.1 200 OK +Content-Type: application/json +Content-Length: 209 + +{ + "_links" : { + "self" : { + "href" : "http://localhost:9393/runtime/apps?page=0&size=20" + } + }, + "page" : { + "size" : 20, + "totalElements" : 0, + "totalPages" : 0, + "number" : 0 + } +} +``` + +#### 44.15.3. Querying a Single Instance of a Single App + +To retrieve information about a particular instance of a particular application, query the `/runtime/apps//instances/` endpoint by using `GET`. +The following topics provide more details: + +* [Request Structure](#api-guide-resources-runtime-information-applications-querying-single-instance-single-app-request-structure) + +* [Example Request](#api-guide-resources-runtime-information-applications-querying-single-instance-single-app-example-request) + +* [Response Structure](#api-guide-resources-runtime-information-applications-querying-single-instance-single-app-response-structure) + +##### Request Structure + +``` +GET /runtime/apps HTTP/1.1 +Accept: application/json +Host: localhost:9393 +``` + +##### Example Request + +``` +$ curl 'http://localhost:9393/runtime/apps' -i -X GET \ + -H 'Accept: application/json' +``` + +##### Response Structure + +``` +HTTP/1.1 200 OK +Content-Type: application/json +Content-Length: 209 + +{ + "_links" : { + "self" : { + "href" : "http://localhost:9393/runtime/apps?page=0&size=20" + } + }, + "page" : { + "size" : 20, + "totalElements" : 0, + "totalPages" : 0, + "number" : 0 + } +} +``` + +### 44.16. Stream Logs + +You can get the application logs of the stream for the entire stream or a specific application inside the stream. +The following topics provide more details: + +* [Get the applications' logs by the stream name](#api-guide-resources-stream-logs-by-stream-name) + +* [Get the logs of a specific application from the stream](#api-guide-resources-stream-logs-by-app-name) + +#### 44.16.1. Get the applications' logs by the stream name + +Use the HTTP `GET` method with the `/streams/logs/` REST endpoint to retrieve all the applications' logs for the given stream name. +The following topics provide more details: + +* [Request Structure](#api-guide-resources-stream-logs-by-stream-name-request-structure) + +* [Example Request](#api-guide-resources-stream-logs-by-stream-name-example-request) + +* [Response Structure](#api-guide-resources-stream-logs-by-stream-name-response-structure) + +##### Request Structure + +``` +GET /streams/logs/ticktock HTTP/1.1 +Host: localhost:9393 +``` + +##### Example Request + +``` +$ curl 'http://localhost:9393/streams/logs/ticktock' -i -X GET +``` + +##### Response Structure + +``` +HTTP/1.1 200 OK +Content-Type: application/json +Content-Length: 93 + +{ + "logs" : { + "ticktock-time-v1" : "Logs-time", + "ticktock-log-v1" : "Logs-log" + } +} +``` + +#### 44.16.2. Get the logs of a specific application from the stream + +To retrieve the logs of a specific application from the stream, query the `/streams/logs//` endpoint using the `GET` HTTP method. +The following topics provide more details: + +* [Request Structure](#api-guide-resources-stream-logs-by-app-name-request-structure) + +* [Example Request](#api-guide-resources-stream-logs-by-app-name-example-request) + +* [Response Structure](#api-guide-resources-stream-logs-by-app-name-response-structure) + +##### Request Structure + +``` +GET /streams/logs/ticktock/ticktock-log-v1 HTTP/1.1 +Host: localhost:9393 +``` + +##### Example Request + +``` +$ curl 'http://localhost:9393/streams/logs/ticktock/ticktock-log-v1' -i -X GET +``` + +##### Response Structure + +``` +HTTP/1.1 200 OK +Content-Type: application/json +Content-Length: 55 + +{ + "logs" : { + "ticktock-log-v1" : "Logs-log" + } +} +``` + +### 44.17. Task Logs + +You can get the task execution log for a specific task execution. + +The following topic provides more details: + +* [Get the task execution log](#api-guide-resources-stream-logs-by-task-id) + +#### 44.17.1. Get the task execution log + +To retrieve the logs of the task execution, query the `/tasks/logs/` endpoint by using the HTTP `GET` method.. +The following topics provide more details: + +* [Request Structure](#api-guide-resources-task-logs-by-task-id-request-structure) + +* [Request Parameters](#api-guide-resources-task-logs-by-task-id-request-parameters) + +* [Example Request](#api-guide-resources-task-logs-by-task-id-example-request) + +* [Response Structure](#api-guide-resources-task-logs-by-task-id-response-structure) + +##### Request Structure + +``` +GET /tasks/logs/taskA-a5f123da-9a3b-42e8-b839-2eb441c561de?platformName=default HTTP/1.1 +Host: localhost:9393 +``` + +##### Request Parameters + +| Parameter | Description | +|--------------|----------------------------------------------| +|`platformName`|The name of the platform the task is launched.| + +##### Example Request + +``` +$ curl 'http://localhost:9393/tasks/logs/taskA-a5f123da-9a3b-42e8-b839-2eb441c561de?platformName=default' -i -X GET +``` + +##### Response Structure + +``` +HTTP/1.1 200 OK +Content-Type: application/json +Content-Length: 10043 + +"stdout:\n2022-01-18 18:54:53.319 INFO 3680 --- [ main] s.c.a.AnnotationConfigApplicationContext : Refreshing org.spring[email protected]4e515669: startup date [Tue Jan 18 18:54:53 UTC 2022]; root of context hierarchy\n2022-01-18 18:54:53.728 INFO 3680 --- [ main] trationDelegate$BeanPostProcessorChecker : Bean 'configurationPropertiesRebinderAutoConfiguration' of type [org.springframework.cloud.autoconfigure.ConfigurationPropertiesRebinderAutoConfiguration$$EnhancerBySpringCGLIB$$b056ca48] is not eligible for getting processed by all BeanPostProcessors (for example: not eligible for auto-proxying)\n\n . ____ _ __ _ _\n /\\\\ / ___'_ __ _ _(_)_ __ __ _ \\ \\ \\ \\\n( ( )\\___ | '_ | '_| | '_ \\/ _` | \\ \\ \\ \\\n \\\\/ ___)| |_)| | | | | || (_| | ) ) ) )\n ' |____| .__|_| |_|_| |_\\__, | / / / /\n =========|_|==============|___/=/_/_/_/\n :: Spring Boot :: (v1.5.2.RELEASE)\n\n2022-01-18 18:54:53.937 INFO 3680 --- [ main] c.c.c.ConfigServicePropertySourceLocator : Fetching config from server at: http://localhost:8888\n2022-01-18 18:54:54.009 WARN 3680 --- [ main] c.c.c.ConfigServicePropertySourceLocator : Could not locate PropertySource: I/O error on GET request for \"http://localhost:8888/timestamp-task/default\": Connection refused (Connection refused); nested exception is java.net.ConnectException: Connection refused (Connection refused)\n2022-01-18 18:54:54.019 INFO 3680 --- [ main] o.s.c.t.a.t.TimestampTaskApplication : No active profile set, falling back to default profiles: default\n2022-01-18 18:54:54.042 INFO 3680 --- [ main] s.c.a.AnnotationConfigApplicationContext : Refreshing org.spring[email protected]445b84c0: startup date [Tue Jan 18 18:54:54 UTC 2022]; parent: org.spring[email protected]4e515669\n2022-01-18 18:54:54.607 INFO 3680 --- [ main] o.s.cloud.context.scope.GenericScope : BeanFactory id=1e36064f-ccbe-3d2f-9196-128427cc78a0\n2022-01-18 18:54:54.696 INFO 3680 --- [ main] trationDelegate$BeanPostProcessorChecker : Bean 'org.springframework.cloud.autoconfigure.ConfigurationPropertiesRebinderAutoConfiguration' of type [org.springframework.cloud.autoconfigure.ConfigurationPropertiesRebinderAutoConfiguration$$EnhancerBySpringCGLIB$$b056ca48] is not eligible for getting processed by all BeanPostProcessors (for example: not eligible for auto-proxying)\n2022-01-18 18:54:54.705 INFO 3680 --- [ main] trationDelegate$BeanPostProcessorChecker : Bean 'org.springframework.transaction.annotation.ProxyTransactionManagementConfiguration' of type [org.springframework.transaction.annotation.ProxyTransactionManagementConfiguration$$EnhancerBySpringCGLIB$$943cc74b] is not eligible for getting processed by all BeanPostProcessors (for example: not eligible for auto-proxying)\n2022-01-18 18:54:55.229 INFO 3680 --- [ main] o.s.jdbc.datasource.init.ScriptUtils : Executing SQL script from class path resource [org/springframework/cloud/task/schema-h2.sql]\n2022-01-18 18:54:55.258 INFO 3680 --- [ main] o.s.jdbc.datasource.init.ScriptUtils : Executed SQL script from class path resource [org/springframework/cloud/task/schema-h2.sql] in 29 ms.\n2022-01-18 18:54:55.582 INFO 3680 --- [ main] o.s.j.e.a.AnnotationMBeanExporter : Registering beans for JMX exposure on startup\n2022-01-18 18:54:55.589 INFO 3680 --- [ main] o.s.j.e.a.AnnotationMBeanExporter : Bean with name 'configurationPropertiesRebinder' has been autodetected for JMX exposure\n2022-01-18 18:54:55.589 INFO 3680 --- [ main] o.s.j.e.a.AnnotationMBeanExporter : Bean with name 'environmentManager' has been autodetected for JMX exposure\n2022-01-18 18:54:55.591 INFO 3680 --- [ main] o.s.j.e.a.AnnotationMBeanExporter : Bean with name 'refreshScope' has been autodetected for JMX exposure\n2022-01-18 18:54:55.592 INFO 3680 --- [ main] o.s.j.e.a.AnnotationMBeanExporter : Located managed bean 'environmentManager': registering with JMX server as MBean [taskA-a5f123da-9a3b-42e8-b839-2eb441c561de:name=environmentManager,type=EnvironmentManager]\n2022-01-18 18:54:55.603 INFO 3680 --- [ main] o.s.j.e.a.AnnotationMBeanExporter : Located managed bean 'refreshScope': registering with JMX server as MBean [taskA-a5f123da-9a3b-42e8-b839-2eb441c561de:name=refreshScope,type=RefreshScope]\n2022-01-18 18:54:55.613 INFO 3680 --- [ main] o.s.j.e.a.AnnotationMBeanExporter : Located managed bean 'configurationPropertiesRebinder': registering with JMX server as MBean [taskA-a5f123da-9a3b-42e8-b839-2eb441c561de:name=configurationPropertiesRebinder,context=445b84c0,type=ConfigurationPropertiesRebinder]\n2022-01-18 18:54:55.690 INFO 3680 --- [ main] o.s.c.support.DefaultLifecycleProcessor : Starting beans in phase 0\n2022-01-18 18:54:55.702 WARN 3680 --- [ main] s.c.a.AnnotationConfigApplicationContext : Exception encountered during context initialization - cancelling refresh attempt: org.springframework.context.ApplicationContextException: Failed to start bean 'taskLifecycleListener'; nested exception is java.lang.IllegalArgumentException: Invalid TaskExecution, ID 1 not found\n2022-01-18 18:54:55.703 INFO 3680 --- [ main] o.s.j.e.a.AnnotationMBeanExporter : Unregistering JMX-exposed beans on shutdown\n2022-01-18 18:54:55.703 INFO 3680 --- [ main] o.s.j.e.a.AnnotationMBeanExporter : Unregistering JMX-exposed beans\n2022-01-18 18:54:55.703 ERROR 3680 --- [ main] o.s.c.t.listener.TaskLifecycleListener : An event to end a task has been received for a task that has not yet started.\n2022-01-18 18:54:55.709 INFO 3680 --- [ main] utoConfigurationReportLoggingInitializer : \n\nError starting ApplicationContext. To display the auto-configuration report re-run your application with 'debug' enabled.\n2022-01-18 18:54:55.715 ERROR 3680 --- [ main] o.s.boot.SpringApplication : Application startup failed\n\norg.springframework.context.ApplicationContextException: Failed to start bean 'taskLifecycleListener'; nested exception is java.lang.IllegalArgumentException: Invalid TaskExecution, ID 1 not found\n\tat org.springframework.context.support.DefaultLifecycleProcessor.doStart(DefaultLifecycleProcessor.java:178) ~[spring-context-4.3.7.RELEASE.jar!/:4.3.7.RELEASE]\n\tat org.springframework.context.support.DefaultLifecycleProcessor.access$200(DefaultLifecycleProcessor.java:50) ~[spring-context-4.3.7.RELEASE.jar!/:4.3.7.RELEASE]\n\tat org.springframework.context.support.DefaultLifecycleProcessor$LifecycleGroup.start(DefaultLifecycleProcessor.java:348) ~[spring-context-4.3.7.RELEASE.jar!/:4.3.7.RELEASE]\n\tat org.springframework.context.support.DefaultLifecycleProcessor.startBeans(DefaultLifecycleProcessor.java:151) ~[spring-context-4.3.7.RELEASE.jar!/:4.3.7.RELEASE]\n\tat org.springframework.context.support.DefaultLifecycleProcessor.onRefresh(DefaultLifecycleProcessor.java:114) ~[spring-context-4.3.7.RELEASE.jar!/:4.3.7.RELEASE]\n\tat org.springframework.context.support.AbstractApplicationContext.finishRefresh(AbstractApplicationContext.java:879) ~[spring-context-4.3.7.RELEASE.jar!/:4.3.7.RELEASE]\n\tat org.springframework.context.support.AbstractApplicationContext.refresh(AbstractApplicationContext.java:545) ~[spring-context-4.3.7.RELEASE.jar!/:4.3.7.RELEASE]\n\tat org.springframework.boot.SpringApplication.refresh(SpringApplication.java:737) [spring-boot-1.5.2.RELEASE.jar!/:1.5.2.RELEASE]\n\tat org.springframework.boot.SpringApplication.refreshContext(SpringApplication.java:370) [spring-boot-1.5.2.RELEASE.jar!/:1.5.2.RELEASE]\n\tat org.springframework.boot.SpringApplication.run(SpringApplication.java:314) [spring-boot-1.5.2.RELEASE.jar!/:1.5.2.RELEASE]\n\tat org.springframework.boot.SpringApplication.run(SpringApplication.java:1162) [spring-boot-1.5.2.RELEASE.jar!/:1.5.2.RELEASE]\n\tat org.springframework.boot.SpringApplication.run(SpringApplication.java:1151) [spring-boot-1.5.2.RELEASE.jar!/:1.5.2.RELEASE]\n\tat org.springframework.cloud.task.app.timestamp.TimestampTaskApplication.main(TimestampTaskApplication.java:29) [classes!/:1.2.0.RELEASE]\n\tat sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[na:1.8.0_322]\n\tat sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62) ~[na:1.8.0_322]\n\tat sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[na:1.8.0_322]\n\tat java.lang.reflect.Method.invoke(Method.java:498) ~[na:1.8.0_322]\n\tat org.springframework.boot.loader.MainMethodRunner.run(MainMethodRunner.java:48) [timestamp-task-1.2.0.RELEASE.jar:1.2.0.RELEASE]\n\tat org.springframework.boot.loader.Launcher.launch(Launcher.java:87) [timestamp-task-1.2.0.RELEASE.jar:1.2.0.RELEASE]\n\tat org.springframework.boot.loader.Launcher.launch(Launcher.java:50) [timestamp-task-1.2.0.RELEASE.jar:1.2.0.RELEASE]\n\tat org.springframework.boot.loader.JarLauncher.main(JarLauncher.java:51) [timestamp-task-1.2.0.RELEASE.jar:1.2.0.RELEASE]\nCaused by: java.lang.IllegalArgumentException: Invalid TaskExecution, ID 1 not found\n\tat org.springframework.util.Assert.notNull(Assert.java:134) ~[spring-core-4.3.7.RELEASE.jar!/:4.3.7.RELEASE]\n\tat org.springframework.cloud.task.listener.TaskLifecycleListener.doTaskStart(TaskLifecycleListener.java:200) ~[spring-cloud-task-core-1.2.0.RELEASE.jar!/:1.2.0.RELEASE]\n\tat org.springframework.cloud.task.listener.TaskLifecycleListener.start(TaskLifecycleListener.java:282) ~[spring-cloud-task-core-1.2.0.RELEASE.jar!/:1.2.0.RELEASE]\n\tat org.springframework.context.support.DefaultLifecycleProcessor.doStart(DefaultLifecycleProcessor.java:175) ~[spring-context-4.3.7.RELEASE.jar!/:4.3.7.RELEASE]\n\t... 20 common frames omitted\n\n" +``` + +# Appendices + +Having trouble with Spring Cloud Data Flow, We’d like to help! + +* Ask a question. We monitor [stackoverflow.com](https://stackoverflow.com) for questions + tagged with [`spring-cloud-dataflow`](https://stackoverflow.com/tags/spring-cloud-dataflow). + +* Report bugs with Spring Cloud Data Flow at [github.com/spring-cloud/spring-cloud-dataflow/issues](https://github.com/spring-cloud/spring-cloud-dataflow/issues). + +## Appendix A: Data Flow Template + +As described in API Guide chapter, Spring Cloud Data Flow’s functionality is completely exposed through REST endpoints. +While you can use those endpoints directly, Spring Cloud Data Flow also provides a Java-based API, which makes using those REST endpoints even easier. + +The central entry point is the `DataFlowTemplate` class in the `org.springframework.cloud.dataflow.rest.client` package. + +This class implements the `DataFlowOperations` interface and delegates to the following sub-templates that provide the specific functionality for each feature-set: + +| Interface | Description | +|-----------------------------|----------------------------------------------| +| `StreamOperations` | REST client for stream operations | +| `CounterOperations` | REST client for counter operations | +|`FieldValueCounterOperations`|REST client for field value counter operations| +|`AggregateCounterOperations` | REST client for aggregate counter operations | +| `TaskOperations` | REST client for task operations | +| `JobOperations` | REST client for job operations | +| `AppRegistryOperations` | REST client for app registry operations | +| `CompletionOperations` | REST client for completion operations | +| `RuntimeOperations` | REST Client for runtime operations | + +When the `DataFlowTemplate` is being initialized, the sub-templates can be discovered through the REST relations, which are provided by HATEOAS (Hypermedia as the Engine of Application State). + +| |If a resource cannot be resolved, the respective sub-template results
in NULL. A common cause is that Spring Cloud Data Flow allows for specific
sets of features to be enabled or disabled when launching. For more information, see one of the [local](#configuration-local-enable-disable-specific-features), [Cloud Foundry](#configuration-cloudfoundry-enable-disable-specific-features), or [Kubernetes](#configuration-kubernetes-enable-disable-specific-features) configuration chapters, depending on where you deploy your application.| +|---|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +### A.1. Using the Data Flow Template + +When you use the Data Flow Template, the only needed Data Flow dependency is the +Spring Cloud Data Flow Rest Client, as shown in the following Maven snippet: + +``` + + org.springframework.cloud + spring-cloud-dataflow-rest-client + 2.9.2 + +``` + +With that dependency, you get the `DataFlowTemplate` class as well as all the dependencies needed to make calls to a Spring Cloud Data Flow server. + +When instantiating the `DataFlowTemplate`, you also pass in a `RestTemplate`. +Note that the needed `RestTemplate` requires some additional configuration to be valid in the context of the `DataFlowTemplate`. +When declaring a `RestTemplate` as a bean, the following configuration suffices: + +``` + @Bean + public static RestTemplate restTemplate() { + RestTemplate restTemplate = new RestTemplate(); + restTemplate.setErrorHandler(new VndErrorResponseErrorHandler(restTemplate.getMessageConverters())); + for(HttpMessageConverter converter : restTemplate.getMessageConverters()) { + if (converter instanceof MappingJackson2HttpMessageConverter) { + final MappingJackson2HttpMessageConverter jacksonConverter = + (MappingJackson2HttpMessageConverter) converter; + jacksonConverter.getObjectMapper() + .registerModule(new Jackson2HalModule()) + .addMixIn(JobExecution.class, JobExecutionJacksonMixIn.class) + .addMixIn(JobParameters.class, JobParametersJacksonMixIn.class) + .addMixIn(JobParameter.class, JobParameterJacksonMixIn.class) + .addMixIn(JobInstance.class, JobInstanceJacksonMixIn.class) + .addMixIn(ExitStatus.class, ExitStatusJacksonMixIn.class) + .addMixIn(StepExecution.class, StepExecutionJacksonMixIn.class) + .addMixIn(ExecutionContext.class, ExecutionContextJacksonMixIn.class) + .addMixIn(StepExecutionHistory.class, StepExecutionHistoryJacksonMixIn.class); + } + } + return restTemplate; + } +``` + +| |You can also get a pre-configured `RestTemplate` by using`DataFlowTemplate.getDefaultDataflowRestTemplate();`| +|---|-------------------------------------------------------------------------------------------------------------| + +Now you can instantiate the `DataFlowTemplate` with the following code: + +``` +DataFlowTemplate dataFlowTemplate = new DataFlowTemplate( + new URI("http://localhost:9393/"), restTemplate); (1) +``` + +|**1**|The `URI` points to the ROOT of your Spring Cloud Data Flow Server.| +|-----|-------------------------------------------------------------------| + +Depending on your requirements, you can now make calls to the server. For instance, +if you want to get a list of the currently available applications, you can run the following code: + +``` +PagedResources apps = dataFlowTemplate.appRegistryOperations().list(); + +System.out.println(String.format("Retrieved %s application(s)", + apps.getContent().size())); + +for (AppRegistrationResource app : apps.getContent()) { + System.out.println(String.format("App Name: %s, App Type: %s, App URI: %s", + app.getName(), + app.getType(), + app.getUri())); +} +``` + +### A.2. Data Flow Template and Security + +When using the `DataFlowTemplate`, you can also provide all the security-related +options as if you were using the *Data Flow Shell*. In fact, the *Data Flow Shell*uses the `DataFlowTemplate` for all its operations. + +To let you get started, we provide a `HttpClientConfigurer` that uses the builder +pattern to set the various security-related options: + +``` + HttpClientConfigurer + .create(targetUri) (1) + .basicAuthCredentials(username, password) (2) + .skipTlsCertificateVerification() (3) + .withProxyCredentials(proxyUri, proxyUsername, proxyPassword) (4) + .addInterceptor(interceptor) (5) + .buildClientHttpRequestFactory() (6) +``` + +|**1**| Creates a HttpClientConfigurer with the provided target URI. | +|-----|------------------------------------------------------------------------------------------------------------------------------------------------------------------| +|**2**| Sets the credentials for basic authentication (Using OAuth2 Password Grant) | +|**3**| Skip SSL certificate verification (**Use for DEVELOPMENT ONLY!**) | +|**4**| Configure any Proxy settings | +|**5**|Add a custom interceptor e.g. to set the OAuth2 Authorization header. This allows
you to pass an OAuth2 Access Token instead of username/password credentials.| +|**6**| Builds the `ClientHttpRequestFactory` that can be set on the `RestTemplate`. | + +Once the `HttpClientConfigurer` is configured, you can use its `buildClientHttpRequestFactory`to build the `ClientHttpRequestFactory` and then set the corresponding +property on the `RestTemplate`. You can then instantiate the actual `DataFlowTemplate`using that `RestTemplate`. + +To configure *Basic Authentication*, the following setup is required: + +``` + RestTemplate restTemplate = DataFlowTemplate.getDefaultDataflowRestTemplate(); + HttpClientConfigurer httpClientConfigurer = HttpClientConfigurer.create("http://localhost:9393"); + + httpClientConfigurer.basicAuthCredentials("my_username", "my_password"); + restTemplate.setRequestFactory(httpClientConfigurer.buildClientHttpRequestFactory()); + + DataFlowTemplate dataFlowTemplate = new DataFlowTemplate("http://localhost:9393", restTemplate); +``` + +You can find a sample application as part of the[spring-cloud-dataflow-samples](https://github.com/spring-cloud/spring-cloud-dataflow-samples/tree/master/dataflow-template-example) repository +on GitHub. + +## Appendix B: “How-to” guides + +This section provides answers to some common ‘how do I do that…​’ questions that often arise when people use Spring Cloud Data Flow. + +If you have a specific problem that we do not cover here, you might want to check out [stackoverflow.com](https://stackoverflow.com/tags/spring-cloud-dataflow) to see if someone has already provided an answer. +That is also a great place to ask new questions (use the `spring-cloud-dataflow` tag). + +We are also more than happy to extend this section. If you want to add a “how-to”, you can send us a [pull request](https://github.com/spring-cloud/spring-cloud-dataflow). + +### B.1. Configure Maven Properties + +You can set the Maven properties, such as the local Maven repository location, remote Maven repositories, authentication credentials, and proxy server properties through command-line properties when you start the Data Flow server. +Alternatively, you can set the properties by setting the `SPRING_APPLICATION_JSON` environment property for the Data Flow server. + +The remote Maven repositories need to be configured explicitly if the applications are resolved by using the Maven repository, except for a `local` Data Flow server. +The other Data Flow server implementations (which use Maven resources for application artifacts resolution) have no default value for remote repositories. +The `local` server has `[repo.spring.io/libs-snapshot](https://repo.spring.io/libs-snapshot)` as the default remote repository. + +To pass the properties as command-line options, run the server with a command similar to the following: + +``` +$ java -jar .jar --maven.localRepository=mylocal +--maven.remote-repositories.repo1.url=https://repo1 +--maven.remote-repositories.repo1.auth.username=repo1user +--maven.remote-repositories.repo1.auth.password=repo1pass +--maven.remote-repositories.repo2.url=https://repo2 --maven.proxy.host=proxyhost +--maven.proxy.port=9018 --maven.proxy.auth.username=proxyuser +--maven.proxy.auth.password=proxypass +``` + +You can also use the `SPRING_APPLICATION_JSON` environment property: + +``` +export SPRING_APPLICATION_JSON='{ "maven": { "local-repository": "local","remote-repositories": { "repo1": { "url": "https://repo1", "auth": { "username": "repo1user", "password": "repo1pass" } }, +"repo2": { "url": "https://repo2" } }, "proxy": { "host": "proxyhost", "port": 9018, "auth": { "username": "proxyuser", "password": "proxypass" } } } }' +``` + +Here is the same content in nicely formatted JSON: + +``` +SPRING_APPLICATION_JSON='{ + "maven": { + "local-repository": "local", + "remote-repositories": { + "repo1": { + "url": "https://repo1", + "auth": { + "username": "repo1user", + "password": "repo1pass" + } + }, + "repo2": { + "url": "https://repo2" + } + }, + "proxy": { + "host": "proxyhost", + "port": 9018, + "auth": { + "username": "proxyuser", + "password": "proxypass" + } + } + } +}' +``` + +| |Depending on the Spring Cloud Data Flow server implementation, you may have to pass the environment properties by using the platform specific environment-setting capabilities. For instance, in Cloud Foundry, you would pass them as `cf set-env SPRING_APPLICATION_JSON`.| +|---|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +### B.2. Troubleshooting + +This section covers how to troubleshoot Spring Cloud Data Flow on your platform of choice. See the Troubleshooting sections of the microsite for [Stream](https://dataflow.spring.io/docs/stream-developer-guides/troubleshooting/) and [Batch](https://dataflow.spring.io/docs/batch-developer-guides/troubleshooting/) processing. + +### B.3. Frequently Asked Questions + +In this section, we review the frequently asked questions for Spring Cloud Data Flow. +See the [Frequently Asked Questions](https://dataflow.spring.io/docs/resources/faq/) section of the microsite for more information. + +## Appendix C: Building + +This appendix describes how to build Spring Cloud Data Flow. + +To build the source, you need to install JDK 1.8. + +The build uses the Maven wrapper so that you do not have to install a specific version of Maven. + +The main build command is as follows: + +``` +$ ./mvnw clean install +``` + +To speed up the build, you can add `-DskipTests` to avoid running the tests. + +| |You can also install Maven (\>=3.3.3) yourself and run the `mvn` command in place of `./mvnw` in the examples below.
If you do that, you also might need to add `-P spring` if your local Maven settings do not contain repository declarations for Spring pre-release artifacts.| +|---|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +| |You might need to increase the amount of memory available to Maven by setting a `MAVEN_OPTS` environment variable with a value similar to `-Xmx512m -XX:MaxPermSize=128m`.
We try to cover this in the `.mvn` configuration, so, if you find you have to do it to make a build succeed, please raise a ticket to get the settings added to source control.| +|---|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +### C.1. Documentation + +There is a `full` profile that generates documentation. You can build only the documentation by using the following command: + +``` +$ ./mvnw clean package -DskipTests -P full -pl {project-artifactId} -am +``` + +### C.2. Working with the Code + +If you do not have a favorite IDE, we recommend that you use [Spring Tools Suite](https://spring.io/tools) or [Eclipse](https://www.eclipse.org) when working with the code. +We use the [m2eclipse](https://www.eclipse.org/m2e/) Eclipse plugin for Maven support. +Other IDEs and tools generally also work without issue. + +#### C.2.1. Importing into Eclipse with m2eclipse + +We recommend the [m2eclipe](https://www.eclipse.org/m2e/) eclipse plugin when working with Eclipse. +If you do not already have m2eclipse installed, it is available from the Eclipse marketplace. + +Unfortunately, m2e does not yet support Maven 3.3. +Consequently, once the projects are imported into Eclipse, you also need to tell m2eclipse to use the `.settings.xml` file for the projects. +If you do not do this, you may see many different errors related to the POMs in the projects. +To do so: + +1. Open your Eclipse preferences. + +2. Expand the **Maven preferences**. + +3. Select **User Settings**. + +4. In the **User Settings** field, click **Browse** and navigate to the Spring Cloud project you imported. + +5. Select the `.settings.xml` file in that project. + +6. Click **Apply**. + +7. Click **OK**. + +| |Alternatively, you can copy the repository settings from Spring Cloud’s [`.settings.xml`](https://github.com/spring-cloud/spring-cloud-build/blob/master/.settings.xml) file into your own `~/.m2/settings.xml`.| +|---|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +#### C.2.2. Importing into Eclipse without m2eclipse + +If you prefer not to use m2eclipse, you can generate Eclipse project metadata by using the following command: + +``` +$ ./mvnw eclipse:eclipse +``` + +You can import the generated Eclipse projects by selecting **Import existing projects**from the **File** menu. + +## Appendix D: Contributing + +Spring Cloud is released under the non-restrictive Apache 2.0 license and follows a very standard Github development process, using Github tracker for issues and merging pull requests into the master branch. +If you want to contribute even something trivial, please do not hesitate, but do please follow the guidelines in this appendix. + +### D.1. Sign the Contributor License Agreement + +Before we accept a non-trivial (anything more than correcting a typographical error) patch or pull request, we need you to sign the [contributor’s agreement](https://cla.pivotal.io). +Signing the contributor’s agreement does not grant anyone commit rights to the main repository, but it does mean that we can accept your contributions, and you get an author credit if we do. +Active contributors might be asked to join the core team and be given the ability to merge pull requests. + +### D.2. Code Conventions and Housekeeping + +None of the following guidelines is essential for a pull request, but they all help your fellow developers understand and work with your code. +They can also be added after the original pull request but before a merge. + +* Use the Spring Framework code format conventions. If you use Eclipse, you can import formatter settings by using the `eclipse-code-formatter.xml` file from the [Spring Cloud Build](https://github.com/spring-cloud/spring-cloud-build/blob/master/spring-cloud-dependencies-parent/eclipse-code-formatter.xml) project. + If you use IntelliJ, you can use the [Eclipse Code Formatter Plugin](https://plugins.jetbrains.com/plugin/6546) to import the same file. + +* Make sure all new `.java` files have a simple Javadoc class comment with at least an `@author` tag identifying you, and preferably at least a paragraph describing the class’s purpose. + +* Add the ASF license header comment to all new `.java` files (to do so, copy it from existing files in the project). + +* Add yourself as an `@author` to the .java files that you modify substantially (more than cosmetic changes). + +* Add some Javadocs and, if you change the namespace, some XSD doc elements. + +* A few unit tests would help a lot as well. Someone has to do it, and your fellow developers appreciate the effort. + +* If no one else uses your branch, rebase it against the current master (or other target branch in the main project). + +* When writing a commit message, follow [these conventions](https://tbaggery.com/2008/04/19/a-note-about-git-commit-messages.html). + If you fix an existing issue, add `Fixes gh-XXXX` (where XXXX is the issue number) at the end of the commit message. + + \== Identity Providers + +This appendix contains information how specific providers can be set up to work +with Data Flow security. + +At this writing, Azure is the only identity provider. + +### D.3. Azure + +Azure AD (Active Directory) is a fully fledged identity provider that provide a wide range of features +around authentication and authorization. As with any other provider, it has its +own nuances, meaning care must be taken to set it up. + +In this section, we go through how OAuth2 setup is done for AD and +Spring Cloud Data Flow. + +| |You need full organization access rights to set up everything correctly.| +|---|------------------------------------------------------------------------| + +#### D.3.1. Creating a new AD Environment + +To get started, create a new Active Directory environment. Choose a +type as Azure Active Directory (not the b2c type) and then pick your organization name and +initial domain. The following image shows the settings: + +Create AD Environment + +#### D.3.2. Creating a New App Registration + +App registration is where OAuth clients are created to get used by OAuth +applications. At minimum, you need to create two clients, one for the +Data Flow and Skipper servers and one for the Data Flow shell, as these two have +slightly different configurations. Server applications can be considered to be +trusted applications while shell is not trusted (because users can see its full +configuration). + +NOTE: +We recommend using the same OAuth client for both the Data Flow and the Skipper servers. While +you can use different clients, it currently would not provide any value, as the +configurations needs to be the same. + +The following image shows the settings for creating a a new app registration: + +Create App Registration + +| |A client secret, when needed, is created under `Certificates & secrets` in AD.| +|---|------------------------------------------------------------------------------| + +#### D.3.3. Expose Dataflow APIs + +To prepare OAuth scopes, create one for each Data Flow security role. In this example, those would be + +* `api://dataflow-server/dataflow.create` + +* `api://dataflow-server/dataflow.deploy` + +* `api://dataflow-server/dataflow.destroy` + +* `api://dataflow-server/dataflow.manage` + +* `api://dataflow-server/dataflow.schedule` + +* `api://dataflow-server/dataflow.modify` + +* `api://dataflow-server/dataflow.view` + +The following image shows the APIs to expose: + +Expose APIs + +Previously created scopes needs to be added as API Permissions, as the following image shows: + +Api Permissions + +#### D.3.4. Creating a Privileged Client + +For the OAuth client, which is about to use password grants, the same API permissions need +to be created for the OAuth client as were used for the server (described in the previous section). + +| |All these permissions need to be granted with admin privileges.| +|---|---------------------------------------------------------------| + +The following image shows the privileged settings: + +Privileged Client + +| |Privileged client needs a client secret, which needs to be exposed to a client
configuration when used in a shell. If you do not want to expose that secret, use the[Creating a Public Client](#appendix-identity-provider-azure-pubclient) public client.| +|---|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +#### D.3.5. Creating a Public Client + +A public client is basically a client without a client secret and with its type set to public. + +The following image shows the configuration of a public client: + +Public Client + +#### D.3.6. Configuration Examples + +This section contains configuration examples for the Data Flow and Skipper servers and the shell. + +To starting a Data Flow server: + +``` +$ java -jar spring-cloud-dataflow-server.jar \ + --spring.config.additional-location=dataflow-azure.yml +``` + +dataflow-azure.yml + +``` +spring: + cloud: + dataflow: + security: + authorization: + provider-role-mappings: + dataflow-server: + map-oauth-scopes: true + role-mappings: + ROLE_VIEW: dataflow.view + ROLE_CREATE: dataflow.create + ROLE_MANAGE: dataflow.manage + ROLE_DEPLOY: dataflow.deploy + ROLE_DESTROY: dataflow.destroy + ROLE_MODIFY: dataflow.modify + ROLE_SCHEDULE: dataflow.schedule + security: + oauth2: + client: + registration: + dataflow-server: + provider: azure + redirect-uri: '{baseUrl}/login/oauth2/code/{registrationId}' + client-id: + client-secret: + scope: + - openid + - profile + - email + - offline_access + - api://dataflow-server/dataflow.view + - api://dataflow-server/dataflow.deploy + - api://dataflow-server/dataflow.destroy + - api://dataflow-server/dataflow.manage + - api://dataflow-server/dataflow.modify + - api://dataflow-server/dataflow.schedule + - api://dataflow-server/dataflow.create + provider: + azure: + issuer-uri: https://login.microsoftonline.com/799dcfde-b9e3-4dfc-ac25-659b326e0bcd/v2.0 + user-name-attribute: name + resourceserver: + jwt: + jwk-set-uri: https://login.microsoftonline.com/799dcfde-b9e3-4dfc-ac25-659b326e0bcd/discovery/v2.0/keys +``` + +To start a Skipper server: + +``` +$ java -jar spring-cloud-skipper-server.jar \ + --spring.config.additional-location=skipper-azure.yml +``` + +skipper-azure.yml + +``` +spring: + cloud: + skipper: + security: + authorization: + provider-role-mappings: + skipper-server: + map-oauth-scopes: true + role-mappings: + ROLE_VIEW: dataflow.view + ROLE_CREATE: dataflow.create + ROLE_MANAGE: dataflow.manage + ROLE_DEPLOY: dataflow.deploy + ROLE_DESTROY: dataflow.destroy + ROLE_MODIFY: dataflow.modify + ROLE_SCHEDULE: dataflow.schedule + security: + oauth2: + client: + registration: + skipper-server: + provider: azure + redirect-uri: '{baseUrl}/login/oauth2/code/{registrationId}' + client-id: + client-secret: + scope: + - openid + - profile + - email + - offline_access + - api://dataflow-server/dataflow.view + - api://dataflow-server/dataflow.deploy + - api://dataflow-server/dataflow.destroy + - api://dataflow-server/dataflow.manage + - api://dataflow-server/dataflow.modify + - api://dataflow-server/dataflow.schedule + - api://dataflow-server/dataflow.create + provider: + azure: + issuer-uri: https://login.microsoftonline.com/799dcfde-b9e3-4dfc-ac25-659b326e0bcd/v2.0 + user-name-attribute: name + resourceserver: + jwt: + jwk-set-uri: https://login.microsoftonline.com/799dcfde-b9e3-4dfc-ac25-659b326e0bcd/discovery/v2.0/keys +``` + +To start a shell and (optionally) pass credentials as options: + +``` +$ java -jar spring-cloud-dataflow-shell.jar \ + --spring.config.additional-location=dataflow-azure-shell.yml \ + --dataflow.username= \ + --dataflow.password= +``` + +dataflow-azure-shell.yml + +``` + security: + oauth2: + client: + registration: + dataflow-shell: + provider: azure + client-id: + client-secret: + authorization-grant-type: password + scope: + - offline_access + - api://dataflow-server/dataflow.create + - api://dataflow-server/dataflow.deploy + - api://dataflow-server/dataflow.destroy + - api://dataflow-server/dataflow.manage + - api://dataflow-server/dataflow.modify + - api://dataflow-server/dataflow.schedule + - api://dataflow-server/dataflow.view + provider: + azure: + issuer-uri: https://login.microsoftonline.com/799dcfde-b9e3-4dfc-ac25-659b326e0bcd/v2.0 +``` + +Starting a public shell and (optionally) pass credentials as options: + +``` +$ java -jar spring-cloud-dataflow-shell.jar \ + --spring.config.additional-location=dataflow-azure-shell-public.yml \ + --dataflow.username= \ + --dataflow.password= +``` + +dataflow-azure-shell-public.yml + +``` +spring: + security: + oauth2: + client: + registration: + dataflow-shell: + provider: azure + client-id: + authorization-grant-type: password + client-authentication-method: post + scope: + - offline_access + - api://dataflow-server/dataflow.create + - api://dataflow-server/dataflow.deploy + - api://dataflow-server/dataflow.destroy + - api://dataflow-server/dataflow.manage + - api://dataflow-server/dataflow.modify + - api://dataflow-server/dataflow.schedule + - api://dataflow-server/dataflow.view + provider: + azure: + issuer-uri: https://login.microsoftonline.com/799dcfde-b9e3-4dfc-ac25-659b326e0bcd/v2.0 +``` \ No newline at end of file diff --git a/docs/en/spring-cloud/README.md b/docs/en/spring-cloud/README.md new file mode 100644 index 0000000000000000000000000000000000000000..3459163893b871c32d10340cafed9e6f94fec65d --- /dev/null +++ b/docs/en/spring-cloud/README.md @@ -0,0 +1 @@ +# Spring Cloud \ No newline at end of file diff --git a/docs/en/spring-cloud/documentation-overview.md b/docs/en/spring-cloud/documentation-overview.md new file mode 100644 index 0000000000000000000000000000000000000000..dc0cff19447c54c1121f8ef4130984a76a073411 --- /dev/null +++ b/docs/en/spring-cloud/documentation-overview.md @@ -0,0 +1,30 @@ +# Spring Cloud Documentation + +## 1. About the Documentation + +The Spring Cloud reference guide is available as + +* [Multi-page HTML](https://docs.spring.io/spring-cloud/docs/2021.0.1/reference/html) + +* [Single-page HTML](https://docs.spring.io/spring-cloud/docs/2021.0.1/reference/htmlsingle) + +* [PDF](https://docs.spring.io/spring-cloud/docs/2021.0.1/reference/pdf/spring-cloud.pdf) + +Copies of this document may be made for your own use and for distribution to others, +provided that you do not charge any fee for such copies and further provided that each +copy contains this Copyright Notice, whether distributed in print or electronically. + +## 2. Getting Help + +If you have trouble with Spring Cloud, we would like to help. + +* Learn the Spring Cloud basics. If you are + starting out with Spring Cloud, try one of the [guides](https://spring.io/guides). + +* Ask a question. We monitor [stackoverflow.com](https://stackoverflow.com) for questions + tagged with [`spring-cloud`](https://stackoverflow.com/tags/spring-cloud). + +* Chat with us at [Spring Cloud Gitter](https://gitter.im/spring-cloud/spring-cloud) + +| |All of Spring Cloud is open source, including the documentation. If you find
problems with the docs or if you want to improve them, please get involved.| +|---|------------------------------------------------------------------------------------------------------------------------------------------------------------| \ No newline at end of file diff --git a/docs/en/spring-cloud/spring-cloud-build.md b/docs/en/spring-cloud/spring-cloud-build.md new file mode 100644 index 0000000000000000000000000000000000000000..37e735fd9e2ff5d8c24ddd8f9add6a17b514f47b --- /dev/null +++ b/docs/en/spring-cloud/spring-cloud-build.md @@ -0,0 +1,441 @@ +# Spring Cloud Build + +## [Building and Deploying](#_building_and_deploying) + +To install locally: + +``` +$ mvn install -s .settings.xml +``` + +and to deploy snapshots to repo.spring.io: + +``` +$ mvn deploy -DaltSnapshotDeploymentRepository=repo.spring.io::default::https://repo.spring.io/snapshot +``` + +for a RELEASE build use + +``` +$ mvn deploy -DaltReleaseDeploymentRepository=repo.spring.io::default::https://repo.spring.io/release +``` + +and for jcenter use + +``` +$ mvn deploy -DaltReleaseDeploymentRepository=bintray::default::https://api.bintray.com/maven/spring/jars/org.springframework.cloud:build +``` + +and for Maven Central use + +``` +$ mvn deploy -P central -DaltReleaseDeploymentRepository=sonatype-nexus-staging::default::https://oss.sonatype.org/service/local/staging/deploy/maven2 +``` + +(the "central" profile is available for all projects in Spring Cloud and it sets up the gpg jar signing, and the repository has to be specified separately for this project because it is a parent of the starter parent which users in turn have as their own parent). + +## [Contributing](#_contributing) + +Spring Cloud is released under the non-restrictive Apache 2.0 license, +and follows a very standard Github development process, using Github +tracker for issues and merging pull requests into master. If you want +to contribute even something trivial please do not hesitate, but +follow the guidelines below. + +### [Sign the Contributor License Agreement](#_sign_the_contributor_license_agreement) + +Before we accept a non-trivial patch or pull request we will need you to sign the[Contributor License Agreement](https://cla.pivotal.io/sign/spring). +Signing the contributor’s agreement does not grant anyone commit rights to the main +repository, but it does mean that we can accept your contributions, and you will get an +author credit if we do. Active contributors might be asked to join the core team, and +given the ability to merge pull requests. + +### [Code of Conduct](#_code_of_conduct) + +This project adheres to the Contributor Covenant [code of +conduct](https://github.com/spring-cloud/spring-cloud-build/blob/master/docs/src/main/asciidoc/code-of-conduct.adoc). By participating, you are expected to uphold this code. Please report +unacceptable behavior to [[email protected]](/cdn-cgi/l/email-protection#dba8aba9b2b5bcf6b8b4bfbef6b4bdf6b8b4b5bfaeb8af9babb2adb4afbab7f5b2b4). + +### [Code Conventions and Housekeeping](#_code_conventions_and_housekeeping) + +None of these is essential for a pull request, but they will all help. They can also be +added after the original pull request but before a merge. + +* Use the Spring Framework code format conventions. If you use Eclipse + you can import formatter settings using the`eclipse-code-formatter.xml` file from the[Spring + Cloud Build](https://raw.githubusercontent.com/spring-cloud/spring-cloud-build/master/spring-cloud-dependencies-parent/eclipse-code-formatter.xml) project. If using IntelliJ, you can use the[Eclipse Code Formatter + Plugin](https://plugins.jetbrains.com/plugin/6546) to import the same file. + +* Make sure all new `.java` files to have a simple Javadoc class comment with at least an`@author` tag identifying you, and preferably at least a paragraph on what the class is + for. + +* Add the ASF license header comment to all new `.java` files (copy from existing files + in the project) + +* Add yourself as an `@author` to the .java files that you modify substantially (more + than cosmetic changes). + +* Add some Javadocs and, if you change the namespace, some XSD doc elements. + +* A few unit tests would help a lot as well — someone has to do it. + +* If no-one else is using your branch, please rebase it against the current master (or + other target branch in the main project). + +* When writing a commit message please follow [these conventions](https://tbaggery.com/2008/04/19/a-note-about-git-commit-messages.html), + if you are fixing an existing issue please add `Fixes gh-XXXX` at the end of the commit + message (where XXXX is the issue number). + +### [Checkstyle](#_checkstyle) + +Spring Cloud Build comes with a set of checkstyle rules. You can find them in the `spring-cloud-build-tools` module. The most notable files under the module are: + +spring-cloud-build-tools/ + +``` +└── src +    ├── checkstyle +    │   └── checkstyle-suppressions.xml (3) +    └── main +    └── resources +    ├── checkstyle-header.txt (2) +    └── checkstyle.xml (1) +``` + +|**1**|Default Checkstyle rules | +|-----|-------------------------| +|**2**| File header setup | +|**3**|Default suppression rules| + +#### [Checkstyle configuration](#_checkstyle_configuration) + +Checkstyle rules are **disabled by default**. To add checkstyle to your project just define the following properties and plugins. + +pom.xml + +``` + +true (1) + true + (2) + true + (3) + + + + + (4) + io.spring.javaformat + spring-javaformat-maven-plugin + + (5) + org.apache.maven.plugins + maven-checkstyle-plugin + + + + + + (5) + org.apache.maven.plugins + maven-checkstyle-plugin + + + + +``` + +|**1**| Fails the build upon Checkstyle errors | +|-----|--------------------------------------------------------------------------------------------------------------| +|**2**| Fails the build upon Checkstyle violations | +|**3**| Checkstyle analyzes also the test sources | +|**4**|Add the Spring Java Format plugin that will reformat your code to pass most of the Checkstyle formatting rules| +|**5**| Add checkstyle plugin to your build and reporting phases | + +If you need to suppress some rules (e.g. line length needs to be longer), then it’s enough for you to define a file under `${project.root}/src/checkstyle/checkstyle-suppressions.xml` with your suppressions. Example: + +projectRoot/src/checkstyle/checkstyle-suppresions.xml + +``` + + + + + + +``` + +It’s advisable to copy the `${spring-cloud-build.rootFolder}/.editorconfig` and `${spring-cloud-build.rootFolder}/.springformat` to your project. That way, some default formatting rules will be applied. You can do so by running this script: + +``` +$ curl https://raw.githubusercontent.com/spring-cloud/spring-cloud-build/master/.editorconfig -o .editorconfig +$ touch .springformat +``` + +### [IDE setup](#_ide_setup) + +#### [Intellij IDEA](#_intellij_idea) + +In order to setup Intellij you should import our coding conventions, inspection profiles and set up the checkstyle plugin. +The following files can be found in the [Spring Cloud Build](https://github.com/spring-cloud/spring-cloud-build/tree/master/spring-cloud-build-tools) project. + +spring-cloud-build-tools/ + +``` +└── src +    ├── checkstyle +    │   └── checkstyle-suppressions.xml (3) +    └── main +    └── resources +    ├── checkstyle-header.txt (2) +    ├── checkstyle.xml (1) +    └── intellij +       ├── Intellij_Project_Defaults.xml (4) +       └── Intellij_Spring_Boot_Java_Conventions.xml (5) +``` + +|**1**| Default Checkstyle rules | +|-----|--------------------------------------------------------------------------| +|**2**| File header setup | +|**3**| Default suppression rules | +|**4**| Project defaults for Intellij that apply most of Checkstyle rules | +|**5**|Project style conventions for Intellij that apply most of Checkstyle rules| + +![Code style](https://raw.githubusercontent.com/spring-cloud/spring-cloud-build/master/docs/src/main/asciidoc/images/intellij-code-style.png) + +Figure 1. Code style + +Go to `File` → `Settings` → `Editor` → `Code style`. There click on the icon next to the `Scheme` section. There, click on the `Import Scheme` value and pick the `Intellij IDEA code style XML` option. Import the `spring-cloud-build-tools/src/main/resources/intellij/Intellij_Spring_Boot_Java_Conventions.xml` file. + +![Code style](https://raw.githubusercontent.com/spring-cloud/spring-cloud-build/master/docs/src/main/asciidoc/images/intellij-inspections.png) + +Figure 2. Inspection profiles + +Go to `File` → `Settings` → `Editor` → `Inspections`. There click on the icon next to the `Profile` section. There, click on the `Import Profile` and import the `spring-cloud-build-tools/src/main/resources/intellij/Intellij_Project_Defaults.xml` file. + +Checkstyle + +To have Intellij work with Checkstyle, you have to install the `Checkstyle` plugin. It’s advisable to also install the `Assertions2Assertj` to automatically convert the JUnit assertions + +![Checkstyle](https://raw.githubusercontent.com/spring-cloud/spring-cloud-build/master/docs/src/main/asciidoc/images/intellij-checkstyle.png) + +Go to `File` → `Settings` → `Other settings` → `Checkstyle`. There click on the `+` icon in the `Configuration file` section. There, you’ll have to define where the checkstyle rules should be picked from. In the image above, we’ve picked the rules from the cloned Spring Cloud Build repository. However, you can point to the Spring Cloud Build’s GitHub repository (e.g. for the `checkstyle.xml` : `[https://raw.githubusercontent.com/spring-cloud/spring-cloud-build/master/spring-cloud-build-tools/src/main/resources/checkstyle.xml](https://raw.githubusercontent.com/spring-cloud/spring-cloud-build/master/spring-cloud-build-tools/src/main/resources/checkstyle.xml)`). We need to provide the following variables: + +* `checkstyle.header.file` - please point it to the Spring Cloud Build’s, `spring-cloud-build-tools/src/main/resources/checkstyle-header.txt` file either in your cloned repo or via the `[https://raw.githubusercontent.com/spring-cloud/spring-cloud-build/master/spring-cloud-build-tools/src/main/resources/checkstyle-header.txt](https://raw.githubusercontent.com/spring-cloud/spring-cloud-build/master/spring-cloud-build-tools/src/main/resources/checkstyle-header.txt)` URL. + +* `checkstyle.suppressions.file` - default suppressions. Please point it to the Spring Cloud Build’s, `spring-cloud-build-tools/src/checkstyle/checkstyle-suppressions.xml` file either in your cloned repo or via the `[https://raw.githubusercontent.com/spring-cloud/spring-cloud-build/master/spring-cloud-build-tools/src/checkstyle/checkstyle-suppressions.xml](https://raw.githubusercontent.com/spring-cloud/spring-cloud-build/master/spring-cloud-build-tools/src/checkstyle/checkstyle-suppressions.xml)` URL. + +* `checkstyle.additional.suppressions.file` - this variable corresponds to suppressions in your local project. E.g. you’re working on `spring-cloud-contract`. Then point to the `project-root/src/checkstyle/checkstyle-suppressions.xml` folder. Example for `spring-cloud-contract` would be: `/home/username/spring-cloud-contract/src/checkstyle/checkstyle-suppressions.xml`. + +| |Remember to set the `Scan Scope` to `All sources` since we apply checkstyle rules for production and test sources.| +|---|------------------------------------------------------------------------------------------------------------------| + +### [Duplicate Finder](#_duplicate_finder) + +Spring Cloud Build brings along the `basepom:duplicate-finder-maven-plugin`, that enables flagging duplicate and conflicting classes and resources on the java classpath. + +#### [Duplicate Finder configuration](#_duplicate_finder_configuration) + +Duplicate finder is **enabled by default** and will run in the `verify` phase of your Maven build, but it will only take effect in your project if you add the `duplicate-finder-maven-plugin` to the `build` section of the projecst’s `pom.xml`. + +pom.xml + +``` + + + + org.basepom.maven + duplicate-finder-maven-plugin + + + +``` + +For other properties, we have set defaults as listed in the [plugin documentation](https://github.com/basepom/duplicate-finder-maven-plugin/wiki). + +You can easily override them but setting the value of the selected property prefixed with `duplicate-finder-maven-plugin`. For example, set `duplicate-finder-maven-plugin.skip` to `true` in order to skip duplicates check in your build. + +If you need to add `ignoredClassPatterns` or `ignoredResourcePatterns` to your setup, make sure to add them in the plugin configuration section of your project: + +``` + + + + org.basepom.maven + duplicate-finder-maven-plugin + + + org.joda.time.base.BaseDateTime + .*module-info + + + changelog.txt + + + + + +``` + +## [Flattening the POMs](#_flattening_the_poms) + +To avoid propagating build setup that is required to build a Spring Cloud project, we’re using the maven flatten plugin. It has the advantage of letting you use whatever features you need while publishing "clean" pom to the repository. + +In order to add it, add the `org.codehaus.mojo:flatten-maven-plugin` to your `pom.xml`. + +``` + + + + org.codehaus.mojo + flatten-maven-plugin + + + +``` + +## [Reusing the documentation](#_reusing_the_documentation) + +Spring Cloud Build publishes its `spring-cloud-build-docs` module that contains +helpful scripts (e.g. README generation ruby script) and css, xslt and images +for the Spring Cloud documentation. If you want to follow the same convention +approach of generating documentation just add these plugins to your `docs` module + +``` + + deploy (8) + + + + docs + + + + pl.project13.maven + git-commit-id-plugin (1) + + + org.apache.maven.plugins + maven-dependency-plugin (2) + + + org.apache.maven.plugins + maven-resources-plugin (3) + + + org.codehaus.mojo + exec-maven-plugin (4) + + + org.asciidoctor + asciidoctor-maven-plugin (5) + + + org.apache.maven.plugins + maven-antrun-plugin (6) + + + maven-deploy-plugin (7) + + + + + +``` + +|**1**| This plugin downloads sets up all the git information of the project | +|-----|----------------------------------------------------------------------------------------------------------------------------------------------------------| +|**2**| This plugin downloads the resources of the `spring-cloud-build-docs` module | +|**3**| This plugin unpacks the resources of the `spring-cloud-build-docs` module | +|**4**| This plugin generates an `adoc` file with all the configuration properties from the classpath | +|**5**| This plugin is required to parse the Asciidoctor documentation | +|**6**|This plugin is required to copy resources into proper final destinations and to generate main README.adoc and to assert that no files use unresolved links| +|**7**| This plugin ensures that the generated zip docs will get published | +|**8**| This property turns on the "deploy" phase for \<7\> | + +| |The order of plugin declaration is important!| +|---|---------------------------------------------| + +In order for the build to generate the `adoc` file with all your configuration properties, your `docs` module should contain all the dependencies on the classpath, that you would want to scan for configuration properties. The file will be output to `${docsModule}/src/main/asciidoc/_configprops.adoc` file (configurable via the `configprops.path` property). + +If you want to modify which of the configuration properties are put in the table, you can tweak the `configprops.inclusionPattern` pattern to include only a subset of the properties (e.g. `spring.sleuth.*`). + +Spring Cloud Build Docs comes with a set of attributes for asciidoctor that you can reuse. + +``` + + shared + true + + left + 4 + true + ${project.basedir}/[email protected] + ${project.basedir}/src/main/[email protected] + ${project.basedir}/target/[email protected] + + + + ${maven.multiModuleProjectDirectory}@ + + ${docs.main}@ + https://github.com/spring-cloud/${docs.main}@ + + https://raw.githubusercontent.com/spring-cloud/${docs.main}/${github-tag}@ + + https://github.com/spring-cloud/${docs.main}/tree/${github-tag}@ + + https://github.com/spring-cloud/${docs.main}/issues/@ + https://github.com/spring-cloud/${docs.main}/[email protected] + https://github.com/spring-cloud/${docs.main}/tree/[email protected] + + ${index-link}@ + + + + ${project.version}@ + ${project.version}@ + ${github-tag}@ + ${version-type}@ + https://docs.spring.io/${docs.main}/docs/${project.version}@ + ${github-raw}@ + ${project.version}@ + ${docs.main}@ + +``` + +## [Updating the guides](#_updating_the_guides) + +We assume that your project contains guides under the `guides` folder. + +``` +. +└── guides + ├── gs-guide1 + ├── gs-guide2 + └── gs-guide3 +``` + +This means that the project contains 3 guides that would +correspond to the following guides in Spring Guides org. + +* [https://github.com/spring-guides/gs-guide1](https://github.com/spring-guides/gs-guide1) + +* [https://github.com/spring-guides/gs-guide2](https://github.com/spring-guides/gs-guide2) + +* [https://github.com/spring-guides/gs-guide3](https://github.com/spring-guides/gs-guide3) + +If you deploy your project with the `-Pguides` profile like this + +``` +$ ./mvnw clean deploy -Pguides +``` + +what will happen is that for GA project versions, we will clone `gs-guide1`, `gs-guide2` and `gs-guide3` and update their contents with the ones being under your `guides` project. + +You can skip this by either not adding the `guides` profile, or passing the `-DskipGuides` system property when the profile is turned on. + +You can configure the project version passed to guides via the `guides-project.version` (defaults to `${project.version}`). The phase at which guides get updated can be configured by `guides-update.phase` (defaults to `deploy`). + diff --git a/docs/en/spring-cloud/spring-cloud-bus.md b/docs/en/spring-cloud/spring-cloud-bus.md new file mode 100644 index 0000000000000000000000000000000000000000..9d982a9f04c775b0294d4daec32ccbf1b35bbbd3 --- /dev/null +++ b/docs/en/spring-cloud/spring-cloud-bus.md @@ -0,0 +1,227 @@ +# Spring Cloud Bus + +## 1. Quick Start + +Spring Cloud Bus works by adding Spring Boot autconfiguration if it detects itself on the +classpath. To enable the bus, add `spring-cloud-starter-bus-amqp` or`spring-cloud-starter-bus-kafka` to your dependency management. Spring Cloud takes care of +the rest. Make sure the broker (RabbitMQ or Kafka) is available and configured. When +running on localhost, you need not do anything. If you run remotely, use Spring Cloud +Connectors or Spring Boot conventions to define the broker credentials, as shown in the +following example for Rabbit: + +application.yml + +``` +spring: + rabbitmq: + host: mybroker.com + port: 5672 + username: user + password: secret +``` + +The bus currently supports sending messages to all nodes listening or all nodes for a +particular service (as defined by Eureka). The `/bus/*` actuator namespace has some HTTP +endpoints. Currently, two are implemented. The first, `/bus/env`, sends key/value pairs to +update each node’s Spring Environment. The second, `/bus/refresh`, reloads each +application’s configuration, as though they had all been pinged on their `/refresh`endpoint. + +| |The Spring Cloud Bus starters cover Rabbit and Kafka, because those are the two most
common implementations. However, Spring Cloud Stream is quite flexible, and the binder
works with `spring-cloud-bus`.| +|---|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +## 2. Bus Endpoints + +Spring Cloud Bus provides two endpoints, `/actuator/busrefresh` and `/actuator/busenv`that correspond to individual actuator endpoints in Spring Cloud Commons,`/actuator/refresh` and `/actuator/env` respectively. + +### 2.1. Bus Refresh Endpoint + +The `/actuator/busrefresh` endpoint clears the `RefreshScope` cache and rebinds`@ConfigurationProperties`. See the [Refresh Scope](#refresh-scope) documentation for +more information. + +To expose the `/actuator/busrefresh` endpoint, you need to add following configuration to your +application: + +``` +management.endpoints.web.exposure.include=busrefresh +``` + +### 2.2. Bus Env Endpoint + +The `/actuator/busenv` endpoint updates each instances environment with the specified +key/value pair across multiple instances. + +To expose the `/actuator/busenv` endpoint, you need to add following configuration to your +application: + +``` +management.endpoints.web.exposure.include=busenv +``` + +The `/actuator/busenv` endpoint accepts `POST` requests with the following shape: + +``` +{ + "name": "key1", + "value": "value1" +} +``` + +## 3. Addressing an Instance + +Each instance of the application has a service ID, whose value can be set with`spring.cloud.bus.id` and whose value is expected to be a colon-separated list of +identifiers, in order from least specific to most specific. The default value is +constructed from the environment as a combination of the `spring.application.name` and`server.port` (or `spring.application.index`, if set). The default value of the ID is +constructed in the form of `app:index:id`, where: + +* `app` is the `vcap.application.name`, if it exists, or `spring.application.name` + +* `index` is the `vcap.application.instance_index`, if it exists,`spring.application.index`, `local.server.port`, `server.port`, or `0` (in that order). + +* `id` is the `vcap.application.instance_id`, if it exists, or a random value. + +The HTTP endpoints accept a “destination” path parameter, such as`/busrefresh/customers:9000`, where `destination` is a service ID. If the ID +is owned by an instance on the bus, it processes the message, and all other instances +ignore it. + +## 4. Addressing All Instances of a Service + +The “destination” parameter is used in a Spring `PathMatcher` (with the path separator +as a colon — `:`) to determine if an instance processes the message. Using the example +from earlier, `/busenv/customers:**` targets all instances of the +“customers” service regardless of the rest of the service ID. + +## 5. Service ID Must Be Unique + +The bus tries twice to eliminate processing an event — once from the original`ApplicationEvent` and once from the queue. To do so, it checks the sending service ID +against the current service ID. If multiple instances of a service have the same ID, +events are not processed. When running on a local machine, each service is on a different +port, and that port is part of the ID. Cloud Foundry supplies an index to differentiate. +To ensure that the ID is unique outside Cloud Foundry, set `spring.application.index` to +something unique for each instance of a service. + +## 6. Customizing the Message Broker + +Spring Cloud Bus uses [Spring Cloud Stream](https://cloud.spring.io/spring-cloud-stream) to +broadcast the messages. So, to get messages to flow, you need only include the binder +implementation of your choice in the classpath. There are convenient starters for the bus +with AMQP (RabbitMQ) and Kafka (`spring-cloud-starter-bus-[amqp|kafka]`). Generally +speaking, Spring Cloud Stream relies on Spring Boot autoconfiguration conventions for +configuring middleware. For instance, the AMQP broker address can be changed with`spring.rabbitmq.*` configuration properties. Spring Cloud Bus has a handful of +native configuration properties in `spring.cloud.bus.*` (for example,`spring.cloud.bus.destination` is the name of the topic to use as the external +middleware). Normally, the defaults suffice. + +To learn more about how to customize the message broker settings, consult the Spring Cloud +Stream documentation. + +## 7. Tracing Bus Events + +Bus events (subclasses of `RemoteApplicationEvent`) can be traced by setting`spring.cloud.bus.trace.enabled=true`. If you do so, the Spring Boot `TraceRepository`(if it is present) shows each event sent and all the acks from each service instance. The +following example comes from the `/trace` endpoint: + +``` +{ + "timestamp": "2015-11-26T10:24:44.411+0000", + "info": { + "signal": "spring.cloud.bus.ack", + "type": "RefreshRemoteApplicationEvent", + "id": "c4d374b7-58ea-4928-a312-31984def293b", + "origin": "stores:8081", + "destination": "*:**" + } + }, + { + "timestamp": "2015-11-26T10:24:41.864+0000", + "info": { + "signal": "spring.cloud.bus.sent", + "type": "RefreshRemoteApplicationEvent", + "id": "c4d374b7-58ea-4928-a312-31984def293b", + "origin": "customers:9000", + "destination": "*:**" + } + }, + { + "timestamp": "2015-11-26T10:24:41.862+0000", + "info": { + "signal": "spring.cloud.bus.ack", + "type": "RefreshRemoteApplicationEvent", + "id": "c4d374b7-58ea-4928-a312-31984def293b", + "origin": "customers:9000", + "destination": "*:**" + } +} +``` + +The preceding trace shows that a `RefreshRemoteApplicationEvent` was sent from`customers:9000`, broadcast to all services, and received (acked) by `customers:9000` and`stores:8081`. + +To handle the ack signals yourself, you could add an `@EventListener` for the`AckRemoteApplicationEvent` and `SentApplicationEvent` types to your app (and enable +tracing). Alternatively, you could tap into the `TraceRepository` and mine the data from +there. + +| |Any Bus application can trace acks. However, sometimes, it is
useful to do this in a central service that can do more complex
queries on the data or forward it to a specialized tracing service.| +|---|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +## 8. Broadcasting Your Own Events + +The Bus can carry any event of type `RemoteApplicationEvent`. The default transport is +JSON, and the deserializer needs to know which types are going to be used ahead of time. +To register a new type, you must put it in a subpackage of`org.springframework.cloud.bus.event`. + +To customise the event name, you can use `@JsonTypeName` on your custom class or rely on +the default strategy, which is to use the simple name of the class. + +| |Both the producer and the consumer need access to the class definition.| +|---|-----------------------------------------------------------------------| + +### 8.1. Registering events in custom packages + +If you cannot or do not want to use a subpackage of `org.springframework.cloud.bus.event`for your custom events, you must specify which packages to scan for events of type`RemoteApplicationEvent` by using the `@RemoteApplicationEventScan` annotation. Packages +specified with `@RemoteApplicationEventScan` include subpackages. + +For example, consider the following custom event, called `MyEvent`: + +``` +package com.acme; + +public class MyEvent extends RemoteApplicationEvent { + ... +} +``` + +You can register that event with the deserializer in the following way: + +``` +package com.acme; + +@Configuration +@RemoteApplicationEventScan +public class BusConfiguration { + ... +} +``` + +Without specifying a value, the package of the class where `@RemoteApplicationEventScan`is used is registered. In this example, `com.acme` is registered by using the package of`BusConfiguration`. + +You can also explicitly specify the packages to scan by using the `value`, `basePackages`or `basePackageClasses` properties on `@RemoteApplicationEventScan`, as shown in the +following example: + +``` +package com.acme; + +@Configuration +//@RemoteApplicationEventScan({"com.acme", "foo.bar"}) +//@RemoteApplicationEventScan(basePackages = {"com.acme", "foo.bar", "fizz.buzz"}) +@RemoteApplicationEventScan(basePackageClasses = BusConfiguration.class) +public class BusConfiguration { + ... +} +``` + +All of the preceding examples of `@RemoteApplicationEventScan` are equivalent, in that the`com.acme` package is registered by explicitly specifying the packages on`@RemoteApplicationEventScan`. + +| |You can specify multiple base packages to scan.| +|---|-----------------------------------------------| + +## 9. Configuration properties + +To see the list of all Bus related configuration properties please check [the Appendix page](appendix.html). + diff --git a/docs/en/spring-cloud/spring-cloud-circuitbreaker.md b/docs/en/spring-cloud/spring-cloud-circuitbreaker.md new file mode 100644 index 0000000000000000000000000000000000000000..8a88d36006e140b82d030070fb2a7a513f2f71ec --- /dev/null +++ b/docs/en/spring-cloud/spring-cloud-circuitbreaker.md @@ -0,0 +1,577 @@ +# Spring Cloud Circuit Breaker + +## 1. Usage Documentation + +The Spring Cloud CircuitBreaker project contains implementations for Resilience4J and Spring Retry. +The APIs implemented in Spring Cloud CircuitBreaker live in Spring Cloud Commons. The usage documentation +for these APIs are located in the [Spring Cloud Commons documentation](https://docs.spring.io/spring-cloud-commons/docs/current/reference/html/#spring-cloud-circuit-breaker). + +### 1.1. Configuring Resilience4J Circuit Breakers + +#### 1.1.1. Starters + +There are two starters for the Resilience4J implementations, one for reactive applications and one for non-reactive applications. + +* `org.springframework.cloud:spring-cloud-starter-circuitbreaker-resilience4j` - non-reactive applications + +* `org.springframework.cloud:spring-cloud-starter-circuitbreaker-reactor-resilience4j` - reactive applications + +#### 1.1.2. Auto-Configuration + +You can disable the Resilience4J auto-configuration by setting`spring.cloud.circuitbreaker.resilience4j.enabled` to `false`. + +#### 1.1.3. Default Configuration + +To provide a default configuration for all of your circuit breakers create a `Customize` bean that is passed a`Resilience4JCircuitBreakerFactory` or `ReactiveResilience4JCircuitBreakerFactory`. +The `configureDefault` method can be used to provide a default configuration. + +``` +@Bean +public Customizer defaultCustomizer() { + return factory -> factory.configureDefault(id -> new Resilience4JConfigBuilder(id) + .timeLimiterConfig(TimeLimiterConfig.custom().timeoutDuration(Duration.ofSeconds(4)).build()) + .circuitBreakerConfig(CircuitBreakerConfig.ofDefaults()) + .build()); +} +``` + +##### Reactive Example + +``` +@Bean +public Customizer defaultCustomizer() { + return factory -> factory.configureDefault(id -> new Resilience4JConfigBuilder(id) + .circuitBreakerConfig(CircuitBreakerConfig.ofDefaults()) + .timeLimiterConfig(TimeLimiterConfig.custom().timeoutDuration(Duration.ofSeconds(4)).build()).build()); +} +``` + +#### 1.1.4. Specific Circuit Breaker Configuration + +Similarly to providing a default configuration, you can create a `Customize` bean this is passed a`Resilience4JCircuitBreakerFactory` or `ReactiveResilience4JCircuitBreakerFactory`. + +``` +@Bean +public Customizer slowCustomizer() { + return factory -> factory.configure(builder -> builder.circuitBreakerConfig(CircuitBreakerConfig.ofDefaults()) + .timeLimiterConfig(TimeLimiterConfig.custom().timeoutDuration(Duration.ofSeconds(2)).build()), "slow"); +} +``` + +In addition to configuring the circuit breaker that is created you can also customize the circuit breaker after it has been created but before it is returned to the caller. +To do this you can use the `addCircuitBreakerCustomizer`method. +This can be useful for adding event handlers to Resilience4J circuit breakers. + +``` +@Bean +public Customizer slowCustomizer() { + return factory -> factory.addCircuitBreakerCustomizer(circuitBreaker -> circuitBreaker.getEventPublisher() + .onError(normalFluxErrorConsumer).onSuccess(normalFluxSuccessConsumer), "normalflux"); +} +``` + +##### Reactive Example + +``` +@Bean +public Customizer slowCustomizer() { + return factory -> { + factory.configure(builder -> builder + .timeLimiterConfig(TimeLimiterConfig.custom().timeoutDuration(Duration.ofSeconds(2)).build()) + .circuitBreakerConfig(CircuitBreakerConfig.ofDefaults()), "slow", "slowflux"); + factory.addCircuitBreakerCustomizer(circuitBreaker -> circuitBreaker.getEventPublisher() + .onError(normalFluxErrorConsumer).onSuccess(normalFluxSuccessConsumer), "normalflux"); + }; +} +``` + +#### 1.1.5. Circuit Breaker Properties Configuration + +You can configure `CircuitBreaker` and `TimeLimiter` instances in your application’s configuration properties file. +Property configuration has higher priority than Java `Customizer` configuration. + +``` +resilience4j.circuitbreaker: + instances: + backendA: + registerHealthIndicator: true + slidingWindowSize: 100 + backendB: + registerHealthIndicator: true + slidingWindowSize: 10 + permittedNumberOfCallsInHalfOpenState: 3 + slidingWindowType: TIME_BASED + recordFailurePredicate: io.github.robwin.exception.RecordFailurePredicate + +resilience4j.timelimiter: + instances: + backendA: + timeoutDuration: 2s + cancelRunningFuture: true + backendB: + timeoutDuration: 1s + cancelRunningFuture: false +``` + +For more information on Resilience4j property configuration, see [Resilience4J Spring Boot 2 Configuration](https://resilience4j.readme.io/docs/getting-started-3#configuration). + +#### 1.1.6. Bulkhead pattern supporting + +If `resilience4j-bulkhead` is on the classpath, Spring Cloud CircuitBreaker will wrap all methods with a Resilience4j Bulkhead. +You can disable the Resilience4j Bulkhead by setting `spring.cloud.circuitbreaker.bulkhead.resilience4j.enabled` to `false`. + +Spring Cloud CircuitBreaker Resilience4j provides two implementation of bulkhead pattern: + +* a `SemaphoreBulkhead` which uses Semaphores + +* a `FixedThreadPoolBulkhead` which uses a bounded queue and a fixed thread pool. + +By default, Spring Cloud CircuitBreaker Resilience4j uses `FixedThreadPoolBulkhead`. For more information on implementation +of Bulkhead patterns see the [Resilience4j Bulkhead](https://resilience4j.readme.io/docs/bulkhead). + +The `Customizer` can be used to provide a default `Bulkhead` and `ThreadPoolBulkhead` configuration. + +``` +@Bean +public Customizer defaultBulkheadCustomizer() { + return provider -> provider.configureDefault(id -> new Resilience4jBulkheadConfigurationBuilder() + .bulkheadConfig(BulkheadConfig.custom().maxConcurrentCalls(4).build()) + .threadPoolBulkheadConfig(ThreadPoolBulkheadConfig.custom().coreThreadPoolSize(1).maxThreadPoolSize(1).build()) + .build() +); +} +``` + +#### 1.1.7. Specific Bulkhead Configuration + +Similarly to proving a default 'Bulkhead' or 'ThreadPoolBulkhead' configuration, you can create a `Customize` bean this +is passed a `Resilience4jBulkheadProvider`. + +``` +@Bean +public Customizer slowBulkheadProviderCustomizer() { + return provider -> provider.configure(builder -> builder + .bulkheadConfig(BulkheadConfig.custom().maxConcurrentCalls(1).build()) + .threadPoolBulkheadConfig(ThreadPoolBulkheadConfig.ofDefaults()), "slowBulkhead"); +} +``` + +In addition to configuring the Bulkhead that is created you can also customize the bulkhead and thread pool bulkhead after they +have been created but before they are returned to caller. To do this you can use the `addBulkheadCustomizer` and `addThreadPoolBulkheadCustomizer`methods. + +##### Bulkhead Example + +``` +@Bean +public Customizer customizer() { + return provider -> provider.addBulkheadCustomizer(bulkhead -> bulkhead.getEventPublisher() + .onCallRejected(slowRejectedConsumer) + .onCallFinished(slowFinishedConsumer), "slowBulkhead"); +} +``` + +##### Thread Pool Bulkhead Example + +``` +@Bean +public Customizer slowThreadPoolBulkheadCustomizer() { + return provider -> provider.addThreadPoolBulkheadCustomizer(threadPoolBulkhead -> threadPoolBulkhead.getEventPublisher() + .onCallRejected(slowThreadPoolRejectedConsumer) + .onCallFinished(slowThreadPoolFinishedConsumer), "slowThreadPoolBulkhead"); +} +``` + +#### 1.1.8. Bulkhead Properties Configuration + +You can configure ThreadPoolBulkhead and SemaphoreBulkhead instances in your application’s configuration properties file. +Property configuration has higher priority than Java `Customizer` configuration. + +``` +resilience4j.thread-pool-bulkhead: + instances: + backendA: + maxThreadPoolSize: 1 + coreThreadPoolSize: 1 +resilience4j.bulkhead: + instances: + backendB: + maxConcurrentCalls: 10 +``` + +For more inforamtion on the Resilience4j property configuration, see [Resilience4J Spring Boot 2 Configuration](https://resilience4j.readme.io/docs/getting-started-3#configuration). + +#### 1.1.9. Collecting Metrics + +Spring Cloud Circuit Breaker Resilience4j includes auto-configuration to setup metrics collection as long as the right +dependencies are on the classpath. To enable metric collection you must include `org.springframework.boot:spring-boot-starter-actuator`, and `io.github.resilience4j:resilience4j-micrometer`. For more information on the metrics that +get produced when these dependencies are present, see the [Resilience4j documentation](https://resilience4j.readme.io/docs/micrometer). + +| |You don’t have to include `micrometer-core` directly as it is brought in by `spring-boot-starter-actuator`| +|---|----------------------------------------------------------------------------------------------------------| + +### 1.2. Configuring Spring Retry Circuit Breakers + +Spring Retry provides declarative retry support for Spring applications. +A subset of the project includes the ability to implement circuit breaker functionality. +Spring Retry provides a circuit breaker implementation via a combination of it’s[`CircuitBreakerRetryPolicy`](https://github.com/spring-projects/spring-retry/blob/master/src/main/java/org/springframework/retry/policy/CircuitBreakerRetryPolicy.java)and a [stateful retry](https://github.com/spring-projects/spring-retry#stateful-retry). +All circuit breakers created using Spring Retry will be created using the `CircuitBreakerRetryPolicy` and a[`DefaultRetryState`](https://github.com/spring-projects/spring-retry/blob/master/src/main/java/org/springframework/retry/support/DefaultRetryState.java). +Both of these classes can be configured using `SpringRetryConfigBuilder`. + +#### 1.2.1. Default Configuration + +To provide a default configuration for all of your circuit breakers create a `Customize` bean that is passed a`SpringRetryCircuitBreakerFactory`. +The `configureDefault` method can be used to provide a default configuration. + +``` +@Bean +public Customizer defaultCustomizer() { + return factory -> factory.configureDefault(id -> new SpringRetryConfigBuilder(id) + .retryPolicy(new TimeoutRetryPolicy()).build()); +} +``` + +#### 1.2.2. Specific Circuit Breaker Configuration + +Similarly to providing a default configuration, you can create a `Customize` bean this is passed a`SpringRetryCircuitBreakerFactory`. + +``` +@Bean +public Customizer slowCustomizer() { + return factory -> factory.configure(builder -> builder.retryPolicy(new SimpleRetryPolicy(1)).build(), "slow"); +} +``` + +In addition to configuring the circuit breaker that is created you can also customize the circuit breaker after it has been created but before it is returned to the caller. +To do this you can use the `addRetryTemplateCustomizers`method. +This can be useful for adding event handlers to the `RetryTemplate`. + +``` +@Bean +public Customizer slowCustomizer() { + return factory -> factory.addRetryTemplateCustomizers(retryTemplate -> retryTemplate.registerListener(new RetryListener() { + + @Override + public boolean open(RetryContext context, RetryCallback callback) { + return false; + } + + @Override + public void close(RetryContext context, RetryCallback callback, Throwable throwable) { + + } + + @Override + public void onError(RetryContext context, RetryCallback callback, Throwable throwable) { + + } + })); +} +``` + +## 2. Building + +### 2.1. Basic Compile and Test + +To build the source you will need to install JDK 17. + +Spring Cloud uses Maven for most build-related activities, and you +should be able to get off the ground quite quickly by cloning the +project you are interested in and typing + +``` +$ ./mvnw install +``` + +| |You can also install Maven (\>=3.3.3) yourself and run the `mvn` command
in place of `./mvnw` in the examples below. If you do that you also
might need to add `-P spring` if your local Maven settings do not
contain repository declarations for spring pre-release artifacts.| +|---|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +| |Be aware that you might need to increase the amount of memory
available to Maven by setting a `MAVEN_OPTS` environment variable with
a value like `-Xmx512m -XX:MaxPermSize=128m`. We try to cover this in
the `.mvn` configuration, so if you find you have to do it to make a
build succeed, please raise a ticket to get the settings added to
source control.| +|---|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +The projects that require middleware (i.e. Redis) for testing generally +require that a local instance of [Docker]([www.docker.com/get-started](https://www.docker.com/get-started)) is installed and running. + +### 2.2. Documentation + +The spring-cloud-build module has a "docs" profile, and if you switch +that on it will try to build asciidoc sources from`src/main/asciidoc`. As part of that process it will look for a`README.adoc` and process it by loading all the includes, but not +parsing or rendering it, just copying it to `${main.basedir}`(defaults to `$/tmp/releaser-1645116950347-0/spring-cloud-circuitbreaker/docs`, i.e. the root of the project). If there are +any changes in the README it will then show up after a Maven build as +a modified file in the correct place. Just commit it and push the change. + +### 2.3. Working with the code + +If you don’t have an IDE preference we would recommend that you use[Spring Tools Suite](https://www.springsource.com/developer/sts) or[Eclipse](https://eclipse.org) when working with the code. We use the[m2eclipse](https://eclipse.org/m2e/) eclipse plugin for maven support. Other IDEs and tools +should also work without issue as long as they use Maven 3.3.3 or better. + +#### 2.3.1. Activate the Spring Maven profile + +Spring Cloud projects require the 'spring' Maven profile to be activated to resolve +the spring milestone and snapshot repositories. Use your preferred IDE to set this +profile to be active, or you may experience build errors. + +#### 2.3.2. Importing into eclipse with m2eclipse + +We recommend the [m2eclipse](https://eclipse.org/m2e/) eclipse plugin when working with +eclipse. If you don’t already have m2eclipse installed it is available from the "eclipse +marketplace". + +| |Older versions of m2e do not support Maven 3.3, so once the
projects are imported into Eclipse you will also need to tell
m2eclipse to use the right profile for the projects. If you
see many different errors related to the POMs in the projects, check
that you have an up to date installation. If you can’t upgrade m2e,
add the "spring" profile to your `settings.xml`. Alternatively you can
copy the repository settings from the "spring" profile of the parent
pom into your `settings.xml`.| +|---|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +#### 2.3.3. Importing into eclipse without m2eclipse + +If you prefer not to use m2eclipse you can generate eclipse project metadata using the +following command: + +``` +$ ./mvnw eclipse:eclipse +``` + +The generated eclipse projects can be imported by selecting `import existing projects`from the `file` menu. + +## 3. Contributing + +Spring Cloud is released under the non-restrictive Apache 2.0 license, +and follows a very standard Github development process, using Github +tracker for issues and merging pull requests into master. If you want +to contribute even something trivial please do not hesitate, but +follow the guidelines below. + +### 3.1. Sign the Contributor License Agreement + +Before we accept a non-trivial patch or pull request we will need you to sign the[Contributor License Agreement](https://cla.pivotal.io/sign/spring). +Signing the contributor’s agreement does not grant anyone commit rights to the main +repository, but it does mean that we can accept your contributions, and you will get an +author credit if we do. Active contributors might be asked to join the core team, and +given the ability to merge pull requests. + +### 3.2. Code of Conduct + +This project adheres to the Contributor Covenant [code of +conduct](https://github.com/spring-cloud/spring-cloud-build/blob/master/docs/src/main/asciidoc/code-of-conduct.adoc). By participating, you are expected to uphold this code. Please report +unacceptable behavior to [[email protected]](/cdn-cgi/l/email-protection#4c3f3c3e25222b612f23282961232a612f232228392f380c3c253a23382d20622523). + +### 3.3. Code Conventions and Housekeeping + +None of these is essential for a pull request, but they will all help. They can also be +added after the original pull request but before a merge. + +* Use the Spring Framework code format conventions. If you use Eclipse + you can import formatter settings using the`eclipse-code-formatter.xml` file from the[Spring + Cloud Build](https://raw.githubusercontent.com/spring-cloud/spring-cloud-build/master/spring-cloud-dependencies-parent/eclipse-code-formatter.xml) project. If using IntelliJ, you can use the[Eclipse Code Formatter + Plugin](https://plugins.jetbrains.com/plugin/6546) to import the same file. + +* Make sure all new `.java` files to have a simple Javadoc class comment with at least an`@author` tag identifying you, and preferably at least a paragraph on what the class is + for. + +* Add the ASF license header comment to all new `.java` files (copy from existing files + in the project) + +* Add yourself as an `@author` to the .java files that you modify substantially (more + than cosmetic changes). + +* Add some Javadocs and, if you change the namespace, some XSD doc elements. + +* A few unit tests would help a lot as well — someone has to do it. + +* If no-one else is using your branch, please rebase it against the current master (or + other target branch in the main project). + +* When writing a commit message please follow [these conventions](https://tbaggery.com/2008/04/19/a-note-about-git-commit-messages.html), + if you are fixing an existing issue please add `Fixes gh-XXXX` at the end of the commit + message (where XXXX is the issue number). + +### 3.4. Checkstyle + +Spring Cloud Build comes with a set of checkstyle rules. You can find them in the `spring-cloud-build-tools` module. The most notable files under the module are: + +spring-cloud-build-tools/ + +``` +└── src +    ├── checkstyle +    │   └── checkstyle-suppressions.xml (3) +    └── main +    └── resources +    ├── checkstyle-header.txt (2) +    └── checkstyle.xml (1) +``` + +|**1**|Default Checkstyle rules | +|-----|-------------------------| +|**2**| File header setup | +|**3**|Default suppression rules| + +#### 3.4.1. Checkstyle configuration + +Checkstyle rules are **disabled by default**. To add checkstyle to your project just define the following properties and plugins. + +pom.xml + +``` + +true (1) + true + (2) + true + (3) + + + + + (4) + io.spring.javaformat + spring-javaformat-maven-plugin + + (5) + org.apache.maven.plugins + maven-checkstyle-plugin + + + + + + (5) + org.apache.maven.plugins + maven-checkstyle-plugin + + + + +``` + +|**1**| Fails the build upon Checkstyle errors | +|-----|--------------------------------------------------------------------------------------------------------------| +|**2**| Fails the build upon Checkstyle violations | +|**3**| Checkstyle analyzes also the test sources | +|**4**|Add the Spring Java Format plugin that will reformat your code to pass most of the Checkstyle formatting rules| +|**5**| Add checkstyle plugin to your build and reporting phases | + +If you need to suppress some rules (e.g. line length needs to be longer), then it’s enough for you to define a file under `${project.root}/src/checkstyle/checkstyle-suppressions.xml` with your suppressions. Example: + +projectRoot/src/checkstyle/checkstyle-suppresions.xml + +``` + + + + + + +``` + +It’s advisable to copy the `${spring-cloud-build.rootFolder}/.editorconfig` and `${spring-cloud-build.rootFolder}/.springformat` to your project. That way, some default formatting rules will be applied. You can do so by running this script: + +``` +$ curl https://raw.githubusercontent.com/spring-cloud/spring-cloud-build/master/.editorconfig -o .editorconfig +$ touch .springformat +``` + +### 3.5. IDE setup + +#### 3.5.1. Intellij IDEA + +In order to setup Intellij you should import our coding conventions, inspection profiles and set up the checkstyle plugin. +The following files can be found in the [Spring Cloud Build](https://github.com/spring-cloud/spring-cloud-build/tree/master/spring-cloud-build-tools) project. + +spring-cloud-build-tools/ + +``` +└── src +    ├── checkstyle +    │   └── checkstyle-suppressions.xml (3) +    └── main +    └── resources +    ├── checkstyle-header.txt (2) +    ├── checkstyle.xml (1) +    └── intellij +       ├── Intellij_Project_Defaults.xml (4) +       └── Intellij_Spring_Boot_Java_Conventions.xml (5) +``` + +|**1**| Default Checkstyle rules | +|-----|--------------------------------------------------------------------------| +|**2**| File header setup | +|**3**| Default suppression rules | +|**4**| Project defaults for Intellij that apply most of Checkstyle rules | +|**5**|Project style conventions for Intellij that apply most of Checkstyle rules| + +![Code style](https://raw.githubusercontent.com/spring-cloud/spring-cloud-build/master/docs/src/main/asciidoc/images/intellij-code-style.png) + +Figure 1. Code style + +Go to `File` → `Settings` → `Editor` → `Code style`. There click on the icon next to the `Scheme` section. There, click on the `Import Scheme` value and pick the `Intellij IDEA code style XML` option. Import the `spring-cloud-build-tools/src/main/resources/intellij/Intellij_Spring_Boot_Java_Conventions.xml` file. + +![Code style](https://raw.githubusercontent.com/spring-cloud/spring-cloud-build/master/docs/src/main/asciidoc/images/intellij-inspections.png) + +Figure 2. Inspection profiles + +Go to `File` → `Settings` → `Editor` → `Inspections`. There click on the icon next to the `Profile` section. There, click on the `Import Profile` and import the `spring-cloud-build-tools/src/main/resources/intellij/Intellij_Project_Defaults.xml` file. + +Checkstyle + +To have Intellij work with Checkstyle, you have to install the `Checkstyle` plugin. It’s advisable to also install the `Assertions2Assertj` to automatically convert the JUnit assertions + +![Checkstyle](https://raw.githubusercontent.com/spring-cloud/spring-cloud-build/master/docs/src/main/asciidoc/images/intellij-checkstyle.png) + +Go to `File` → `Settings` → `Other settings` → `Checkstyle`. There click on the `+` icon in the `Configuration file` section. There, you’ll have to define where the checkstyle rules should be picked from. In the image above, we’ve picked the rules from the cloned Spring Cloud Build repository. However, you can point to the Spring Cloud Build’s GitHub repository (e.g. for the `checkstyle.xml` : `[raw.githubusercontent.com/spring-cloud/spring-cloud-build/master/spring-cloud-build-tools/src/main/resources/checkstyle.xml](https://raw.githubusercontent.com/spring-cloud/spring-cloud-build/master/spring-cloud-build-tools/src/main/resources/checkstyle.xml)`). We need to provide the following variables: + +* `checkstyle.header.file` - please point it to the Spring Cloud Build’s, `spring-cloud-build-tools/src/main/resources/checkstyle-header.txt` file either in your cloned repo or via the `[raw.githubusercontent.com/spring-cloud/spring-cloud-build/master/spring-cloud-build-tools/src/main/resources/checkstyle-header.txt](https://raw.githubusercontent.com/spring-cloud/spring-cloud-build/master/spring-cloud-build-tools/src/main/resources/checkstyle-header.txt)` URL. + +* `checkstyle.suppressions.file` - default suppressions. Please point it to the Spring Cloud Build’s, `spring-cloud-build-tools/src/checkstyle/checkstyle-suppressions.xml` file either in your cloned repo or via the `[raw.githubusercontent.com/spring-cloud/spring-cloud-build/master/spring-cloud-build-tools/src/checkstyle/checkstyle-suppressions.xml](https://raw.githubusercontent.com/spring-cloud/spring-cloud-build/master/spring-cloud-build-tools/src/checkstyle/checkstyle-suppressions.xml)` URL. + +* `checkstyle.additional.suppressions.file` - this variable corresponds to suppressions in your local project. E.g. you’re working on `spring-cloud-contract`. Then point to the `project-root/src/checkstyle/checkstyle-suppressions.xml` folder. Example for `spring-cloud-contract` would be: `/home/username/spring-cloud-contract/src/checkstyle/checkstyle-suppressions.xml`. + +| |Remember to set the `Scan Scope` to `All sources` since we apply checkstyle rules for production and test sources.| +|---|------------------------------------------------------------------------------------------------------------------| + +### 3.6. Duplicate Finder + +Spring Cloud Build brings along the `basepom:duplicate-finder-maven-plugin`, that enables flagging duplicate and conflicting classes and resources on the java classpath. + +#### 3.6.1. Duplicate Finder configuration + +Duplicate finder is **enabled by default** and will run in the `verify` phase of your Maven build, but it will only take effect in your project if you add the `duplicate-finder-maven-plugin` to the `build` section of the projecst’s `pom.xml`. + +pom.xml + +``` + + + + org.basepom.maven + duplicate-finder-maven-plugin + + + +``` + +For other properties, we have set defaults as listed in the [plugin documentation](https://github.com/basepom/duplicate-finder-maven-plugin/wiki). + +You can easily override them but setting the value of the selected property prefixed with `duplicate-finder-maven-plugin`. For example, set `duplicate-finder-maven-plugin.skip` to `true` in order to skip duplicates check in your build. + +If you need to add `ignoredClassPatterns` or `ignoredResourcePatterns` to your setup, make sure to add them in the plugin configuration section of your project: + +``` + + + + org.basepom.maven + duplicate-finder-maven-plugin + + + org.joda.time.base.BaseDateTime + .*module-info + + + changelog.txt + + + + + +``` + diff --git a/docs/en/spring-cloud/spring-cloud-cli.md b/docs/en/spring-cloud/spring-cloud-cli.md new file mode 100644 index 0000000000000000000000000000000000000000..ab58a071971e47694e78554f0a1d8096b75c2e62 --- /dev/null +++ b/docs/en/spring-cloud/spring-cloud-cli.md @@ -0,0 +1,167 @@ +# Spring Boot Cloud CLI + +## [Installation](#_installation) + +To install, make +sure you have[Spring Boot CLI](https://github.com/spring-projects/spring-boot)(2.0.0 or better): + +``` +$ spring version +Spring CLI v2.2.3.RELEASE +``` + +E.g. for SDKMan users + +``` +$ sdk install springboot 2.2.3.RELEASE +$ sdk use springboot 2.2.3.RELEASE +``` + +and install the Spring Cloud plugin + +``` +$ mvn install +$ spring install org.springframework.cloud:spring-cloud-cli:2.2.0.RELEASE +``` + +| |**Prerequisites:** to use the encryption and decryption features
you need the full-strength JCE installed in your JVM (it’s not there by default).
You can download the "Java Cryptography Extension (JCE) Unlimited Strength Jurisdiction Policy Files"
from Oracle, and follow instructions for installation (essentially replace the 2 policy files
in the JRE lib/security directory with the ones that you downloaded).| +|---|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +## [Running Spring Cloud Services in Development](#_running_spring_cloud_services_in_development) + +The Launcher CLI can be used to run common services like Eureka, +Config Server etc. from the command line. To list the available +services you can do `spring cloud --list`, and to launch a default set +of services just `spring cloud`. To choose the services to deploy, +just list them on the command line, e.g. + +``` +$ spring cloud eureka configserver h2 kafka stubrunner zipkin +``` + +Summary of supported deployables: + +| Service | Name | Address | Description | +|------------|----------------|---------------------------------------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| eureka | Eureka Server | [http://localhost:8761](http://localhost:8761) | Eureka server for service registration and discovery. All the other services show up in its catalog by default. | +|configserver| Config Server | [http://localhost:8888](http://localhost:8888) | Spring Cloud Config Server running in the "native" profile and serving configuration from the local directory ./launcher | +| h2 | H2 Database |[http://localhost:9095](http://localhost:9095) (console), jdbc:h2:tcp://localhost:9096/{data}| Relation database service. Use a file path for `{data}` (e.g. `./target/test`) when you connect. Remember that you can add `;MODE=MYSQL` or `;MODE=POSTGRESQL` to connect with compatibility to other server types. | +| kafka | Kafka Broker | [http://localhost:9091](http://localhost:9091) (actuator endpoints), localhost:9092 | | +| dataflow |Dataflow Server | [http://localhost:9393](http://localhost:9393) | Spring Cloud Dataflow server with UI at /admin-ui. Connect the Dataflow shell to target at root path. | +| zipkin | Zipkin Server | [http://localhost:9411](http://localhost:9411) | Zipkin Server with UI for visualizing traces. Stores span data in memory and accepts them via HTTP POST of JSON data. | +| stubrunner |Stub Runner Boot| [http://localhost:8750](http://localhost:8750) |Downloads WireMock stubs, starts WireMock and feeds the started servers with stored stubs. Pass `stubrunner.ids` to pass stub coordinates and then go to `[http://localhost:8750/stubs](http://localhost:8750/stubs)`.| + +Each of these apps can be configured using a local YAML file with the same name (in the current +working directory or a subdirectory called "config" or in `~/.spring-cloud`). E.g. in `configserver.yml` you might want to +do something like this to locate a local git repository for the backend: + +configserver.yml + +``` +spring: + profiles: + active: git + cloud: + config: + server: + git: + uri: file://${user.home}/dev/demo/config-repo +``` + +E.g. in Stub Runner app you could fetch stubs from your local `.m2` in the following way. + +stubrunner.yml + +``` +stubrunner: + workOffline: true + ids: + - com.example:beer-api-producer:+:9876 +``` + +### [Adding Additional Applications](#_adding_additional_applications) + +Additional applications can be added to `./config/cloud.yml` (not`./config.yml` because that would replace the defaults), e.g. with + +config/cloud.yml + +``` +spring: + cloud: + launcher: + deployables: + source: + coordinates: maven://com.example:source:0.0.1-SNAPSHOT + port: 7000 + sink: + coordinates: maven://com.example:sink:0.0.1-SNAPSHOT + port: 7001 +``` + +when you list the apps: + +``` +$ spring cloud --list +source sink configserver dataflow eureka h2 kafka stubrunner zipkin +``` + +(notice the additional apps at the start of the list). + +## [Writing Groovy Scripts and Running Applications](#_writing_groovy_scripts_and_running_applications) + +Spring Cloud CLI has support for most of the Spring Cloud declarative +features, such as the `@Enable*` class of annotations. For example, +here is a fully functional Eureka server + +app.groovy + +``` +@EnableEurekaServer +class Eureka {} +``` + +which you can run from the command line like this + +``` +$ spring run app.groovy +``` + +To include additional dependencies, often it suffices just to add the +appropriate feature-enabling annotation, e.g. `@EnableConfigServer`,`@EnableOAuth2Sso` or `@EnableEurekaClient`. To manually include a +dependency you can use a `@Grab` with the special "Spring Boot" short +style artifact co-ordinates, i.e. with just the artifact ID (no need +for group or version information), e.g. to set up a client app to +listen on AMQP for management events from the Spring CLoud Bus: + +app.groovy + +``` +@Grab('spring-cloud-starter-bus-amqp') +@RestController +class Service { + @RequestMapping('/') + def home() { [message: 'Hello'] } +} +``` + +## [Encryption and Decryption](#_encryption_and_decryption) + +The Spring Cloud CLI comes with an "encrypt" and a "decrypt" +command. Both accept arguments in the same form with a key specified +as a mandatory "--key", e.g. + +``` +$ spring encrypt mysecret --key foo +682bc583f4641835fa2db009355293665d2647dade3375c0ee201de2a49f7bda +$ spring decrypt --key foo 682bc583f4641835fa2db009355293665d2647dade3375c0ee201de2a49f7bda +mysecret +``` + +To use a key in a file (e.g. an RSA public key for encyption) prepend +the key value with "@" and provide the file path, e.g. + +``` +$ spring encrypt mysecret --key @${HOME}/.ssh/id_rsa.pub +AQAjPgt3eFZQXwt8tsHAVv/QHiY5sI2dRcR+... +``` + diff --git a/docs/en/spring-cloud/spring-cloud-cloudfoundry.md b/docs/en/spring-cloud/spring-cloud-cloudfoundry.md new file mode 100644 index 0000000000000000000000000000000000000000..648c9981d5c3db9ffef873f6f887f54ec04d2c7d --- /dev/null +++ b/docs/en/spring-cloud/spring-cloud-cloudfoundry.md @@ -0,0 +1,55 @@ +# Spring Cloud for Cloud Foundry + +## 1. Discovery + +Here’s a Spring Cloud app with Cloud Foundry discovery: + +app.groovy + +``` +@Grab('org.springframework.cloud:spring-cloud-cloudfoundry') +@RestController +@EnableDiscoveryClient +class Application { + + @Autowired + DiscoveryClient client + + @RequestMapping('/') + String home() { + 'Hello from ' + client.getLocalServiceInstance() + } + +} +``` + +If you run it without any service bindings: + +``` +$ spring jar app.jar app.groovy +$ cf push -p app.jar +``` + +It will show its app name in the home page. + +The `DiscoveryClient` can lists all the apps in a space, according to +the credentials it is authenticated with, where the space defaults to +the one the client is running in (if any). If neither org nor space +are configured, they default per the user’s profile in Cloud Foundry. + +## 2. Single Sign On + +| |All of the OAuth2 SSO and resource server features moved to Spring Boot
in version 1.3. You can find documentation in the[Spring Boot user guide](https://docs.spring.io/spring-boot/docs/current/reference/htmlsingle/).| +|---|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +This project provides automatic binding from CloudFoundry service +credentials to the Spring Boot features. If you have a CloudFoundry +service called "sso", for instance, with credentials containing +"client\_id", "client\_secret" and "auth\_domain", it will bind +automatically to the Spring OAuth2 client that you enable with`@EnableOAuth2Sso` (from Spring Boot). The name of the service can be +parameterized using `spring.oauth2.sso.serviceId`. + +## 3. Configuration + +To see the list of all Spring Cloud Sloud Foundry related configuration properties please check [the Appendix page](appendix.html). + diff --git a/docs/en/spring-cloud/spring-cloud-commons.md b/docs/en/spring-cloud/spring-cloud-commons.md new file mode 100644 index 0000000000000000000000000000000000000000..5b60f04d6ece6bef44fe7ed2b1a6570b923fe9bb --- /dev/null +++ b/docs/en/spring-cloud/spring-cloud-commons.md @@ -0,0 +1,1402 @@ +# Cloud Native Applications + +## 1. Spring Cloud Context: Application Context Services + +Spring Boot has an opinionated view of how to build an application with Spring. +For instance, it has conventional locations for common configuration files and has endpoints for common management and monitoring tasks. +Spring Cloud builds on top of that and adds a few features that many components in a system would use or occasionally need. + +### 1.1. The Bootstrap Application Context + +A Spring Cloud application operates by creating a “bootstrap” context, which is a parent context for the main application. +This context is responsible for loading configuration properties from the external sources and for decrypting properties in the local external configuration files. +The two contexts share an `Environment`, which is the source of external properties for any Spring application. +By default, bootstrap properties (not `bootstrap.properties` but properties that are loaded during the bootstrap phase) are added with high precedence, so they cannot be overridden by local configuration. + +The bootstrap context uses a different convention for locating external configuration than the main application context. +Instead of `application.yml` (or `.properties`), you can use `bootstrap.yml`, keeping the external configuration for bootstrap and main context nicely separate. +The following listing shows an example: + +Example 1. bootstrap.yml + +``` +spring: + application: + name: foo + cloud: + config: + uri: ${SPRING_CONFIG_URI:http://localhost:8888} +``` + +If your application needs any application-specific configuration from the server, it is a good idea to set the `spring.application.name` (in `bootstrap.yml` or `application.yml`). +For the property `spring.application.name` to be used as the application’s context ID, you must set it in `bootstrap.[properties | yml]`. + +If you want to retrieve specific profile configuration, you should also set `spring.profiles.active` in `bootstrap.[properties | yml]`. + +You can disable the bootstrap process completely by setting `spring.cloud.bootstrap.enabled=false` (for example, in system properties). + +### 1.2. Application Context Hierarchies + +If you build an application context from `SpringApplication` or `SpringApplicationBuilder`, the Bootstrap context is added as a parent to that context. +It is a feature of Spring that child contexts inherit property sources and profiles from their parent, so the “main” application context contains additional property sources, compared to building the same context without Spring Cloud Config. +The additional property sources are: + +* “bootstrap”: If any `PropertySourceLocators` are found in the bootstrap context and if they have non-empty properties, an optional `CompositePropertySource` appears with high priority. + An example would be properties from the Spring Cloud Config Server. + See “[Customizing the Bootstrap Property Sources](#customizing-bootstrap-property-sources)” for how to customize the contents of this property source. + +* “applicationConfig: [classpath:bootstrap.yml]” (and related files if Spring profiles are active): If you have a `bootstrap.yml` (or `.properties`), those properties are used to configure the bootstrap context. + Then they get added to the child context when its parent is set. + They have lower precedence than the `application.yml` (or `.properties`) and any other property sources that are added to the child as a normal part of the process of creating a Spring Boot application. + See “[Changing the Location of Bootstrap Properties](#customizing-bootstrap-properties)” for how to customize the contents of these property sources. + +Because of the ordering rules of property sources, the “bootstrap” entries take precedence. +However, note that these do not contain any data from `bootstrap.yml`, which has very low precedence but can be used to set defaults. + +You can extend the context hierarchy by setting the parent context of any `ApplicationContext` you create — for example, by using its own interface or with the `SpringApplicationBuilder` convenience methods (`parent()`, `child()` and `sibling()`). +The bootstrap context is the parent of the most senior ancestor that you create yourself. +Every context in the hierarchy has its own “bootstrap” (possibly empty) property source to avoid promoting values inadvertently from parents down to their descendants. +If there is a config server, every context in the hierarchy can also (in principle) have a different `spring.application.name` and, hence, a different remote property source. +Normal Spring application context behavior rules apply to property resolution: properties from a child context override those in +the parent, by name and also by property source name. +(If the child has a property source with the same name as the parent, the value from the parent is not included in the child). + +Note that the `SpringApplicationBuilder` lets you share an `Environment` amongst the whole hierarchy, but that is not the default. +Thus, sibling contexts (in particular) do not need to have the same profiles or property sources, even though they may share common values with their parent. + +### 1.3. Changing the Location of Bootstrap Properties + +The `bootstrap.yml` (or `.properties`) location can be specified by setting `spring.cloud.bootstrap.name` (default: `bootstrap`), `spring.cloud.bootstrap.location` (default: empty) or `spring.cloud.bootstrap.additional-location` (default: empty) — for example, in System properties. + +Those properties behave like the `spring.config.*` variants with the same name. +With `spring.cloud.bootstrap.location` the default locations are replaced and only the specified ones are used. +To add locations to the list of default ones, `spring.cloud.bootstrap.additional-location` could be used. +In fact, they are used to set up the bootstrap `ApplicationContext` by setting those properties in its `Environment`. +If there is an active profile (from `spring.profiles.active` or through the `Environment` API in the context you are building), properties in that profile get loaded as well, the same as in a regular Spring Boot app — for example, from `bootstrap-development.properties` for a `development` profile. + +### 1.4. Overriding the Values of Remote Properties + +The property sources that are added to your application by the bootstrap context are often “remote” (from example, from Spring Cloud Config Server). +By default, they cannot be overridden locally. +If you want to let your applications override the remote properties with their own system properties or config files, the remote property source has to grant it permission by setting `spring.cloud.config.allowOverride=true` (it does not work to set this locally). +Once that flag is set, two finer-grained settings control the location of the remote properties in relation to system properties and the application’s local configuration: + +* `spring.cloud.config.overrideNone=true`: Override from any local property source. + +* `spring.cloud.config.overrideSystemProperties=false`: Only system properties, command line arguments, and environment variables (but not the local config files) should override the remote settings. + +### 1.5. Customizing the Bootstrap Configuration + +The bootstrap context can be set to do anything you like by adding entries to `/META-INF/spring.factories` under a key named `org.springframework.cloud.bootstrap.BootstrapConfiguration`. +This holds a comma-separated list of Spring `@Configuration` classes that are used to create the context. +Any beans that you want to be available to the main application context for autowiring can be created here. +There is a special contract for `@Beans` of type `ApplicationContextInitializer`. +If you want to control the startup sequence, you can mark classes with the `@Order` annotation (the default order is `last`). + +| |When adding custom `BootstrapConfiguration`, be careful that the classes you add are not `@ComponentScanned` by mistake into your “main” application context, where they might not be needed.
Use a separate package name for boot configuration classes and make sure that name is not already covered by your `@ComponentScan` or `@SpringBootApplication` annotated configuration classes.| +|---|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +The bootstrap process ends by injecting initializers into the main `SpringApplication` instance (which is the normal Spring Boot startup sequence, whether it runs as a standalone application or is deployed in an application server). +First, a bootstrap context is created from the classes found in `spring.factories`. +Then, all `@Beans` of type `ApplicationContextInitializer` are added to the main `SpringApplication` before it is started. + +### 1.6. Customizing the Bootstrap Property Sources + +The default property source for external configuration added by the bootstrap process is the Spring Cloud Config Server, but you can add additional sources by adding beans of type `PropertySourceLocator` to the bootstrap context (through `spring.factories`). +For instance, you can insert additional properties from a different server or from a database. + +As an example, consider the following custom locator: + +``` +@Configuration +public class CustomPropertySourceLocator implements PropertySourceLocator { + + @Override + public PropertySource locate(Environment environment) { + return new MapPropertySource("customProperty", + Collections.singletonMap("property.from.sample.custom.source", "worked as intended")); + } + +} +``` + +The `Environment` that is passed in is the one for the `ApplicationContext` about to be created — in other words, the one for which we supply additional property sources. +It already has its normal Spring Boot-provided property sources, so you can use those to locate a property source specific to this `Environment` (for example, by keying it on `spring.application.name`, as is done in the default Spring Cloud Config Server property source locator). + +If you create a jar with this class in it and then add a `META-INF/spring.factories` containing the following setting, the `customProperty` `PropertySource` appears in any application that includes that jar on its classpath: + +``` +org.springframework.cloud.bootstrap.BootstrapConfiguration=sample.custom.CustomPropertySourceLocator +``` + +### 1.7. Logging Configuration + +If you use Spring Boot to configure log settings, you should place this configuration in `bootstrap.[yml | properties]` if you would like it to apply to all events. + +| |For Spring Cloud to initialize logging configuration properly, you cannot use a custom prefix.
For example, using `custom.loggin.logpath` is not recognized by Spring Cloud when initializing the logging system.| +|---|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +### 1.8. Environment Changes + +The application listens for an `EnvironmentChangeEvent` and reacts to the change in a couple of standard ways (additional `ApplicationListeners` can be added as `@Beans` in the normal way). +When an `EnvironmentChangeEvent` is observed, it has a list of key values that have changed, and the application uses those to: + +* Re-bind any `@ConfigurationProperties` beans in the context. + +* Set the logger levels for any properties in `logging.level.*`. + +Note that the Spring Cloud Config Client does not, by default, poll for changes in the `Environment`. +Generally, we would not recommend that approach for detecting changes (although you could set it up with a`@Scheduled` annotation). +If you have a scaled-out client application, it is better to broadcast the `EnvironmentChangeEvent` to all the instances instead of having them polling for changes (for example, by using the [Spring Cloud Bus](https://github.com/spring-cloud/spring-cloud-bus)). + +The `EnvironmentChangeEvent` covers a large class of refresh use cases, as long as you can actually make a change to the `Environment` and publish the event. +Note that those APIs are public and part of core Spring). +You can verify that the changes are bound to `@ConfigurationProperties` beans by visiting the `/configprops` endpoint (a standard Spring Boot Actuator feature). +For instance, a `DataSource` can have its `maxPoolSize` changed at runtime (the default `DataSource` created by Spring Boot is a `@ConfigurationProperties` bean) and grow capacity dynamically. +Re-binding `@ConfigurationProperties` does not cover another large class of use cases, where you need more control over the refresh and where you need a change to be atomic over the whole `ApplicationContext`. +To address those concerns, we have `@RefreshScope`. + +### 1.9. Refresh Scope + +When there is a configuration change, a Spring `@Bean` that is marked as `@RefreshScope` gets special treatment. +This feature addresses the problem of stateful beans that get their configuration injected only when they are initialized. +For instance, if a `DataSource` has open connections when the database URL is changed through the `Environment`, you probably want the holders of those connections to be able to complete what they are doing. +Then, the next time something borrows a connection from the pool, it gets one with the new URL. + +Sometimes, it might even be mandatory to apply the `@RefreshScope` annotation on some beans that can be only initialized once. +If a bean is “immutable”, you have to either annotate the bean with `@RefreshScope` or specify the classname under the property key: `spring.cloud.refresh.extra-refreshable`. + +| |If you hava a `DataSource` bean that is a `HikariDataSource`, it can not be
refreshed. It is the default value for `spring.cloud.refresh.never-refreshable`. Choose a
different `DataSource` implementation if you need it to be refreshed.| +|---|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +Refresh scope beans are lazy proxies that initialize when they are used (that is, when a method is called), and the scope acts as a cache of initialized values. +To force a bean to re-initialize on the next method call, you must invalidate its cache entry. + +The `RefreshScope` is a bean in the context and has a public `refreshAll()` method to refresh all beans in the scope by clearing the target cache. +The `/refresh` endpoint exposes this functionality (over HTTP or JMX). +To refresh an individual bean by name, there is also a `refresh(String)` method. + +To expose the `/refresh` endpoint, you need to add following configuration to your application: + +``` +management: + endpoints: + web: + exposure: + include: refresh +``` + +| |`@RefreshScope` works (technically) on a `@Configuration` class, but it might lead to surprising behavior.
For example, it does not mean that all the `@Beans` defined in that class are themselves in `@RefreshScope`.
Specifically, anything that depends on those beans cannot rely on them being updated when a refresh is initiated, unless it is itself in `@RefreshScope`.
In that case, it is rebuilt on a refresh and its dependencies are re-injected.
At that point, they are re-initialized from the refreshed `@Configuration`).| +|---|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +### 1.10. Encryption and Decryption + +Spring Cloud has an `Environment` pre-processor for decrypting property values locally. +It follows the same rules as the Spring Cloud Config Server and has the same external configuration through `encrypt.*`. +Thus, you can use encrypted values in the form of `{cipher}*`, and, as long as there is a valid key, they are decrypted before the main application context gets the `Environment` settings. +To use the encryption features in an application, you need to include Spring Security RSA in your classpath (Maven co-ordinates: `org.springframework.security:spring-security-rsa`), and you also need the full strength JCE extensions in your JVM. + +If you get an exception due to "Illegal key size" and you use Sun’s JDK, you need to install the Java Cryptography Extension (JCE) Unlimited Strength Jurisdiction Policy Files. +See the following links for more information: + +* [Java 6 JCE](https://www.oracle.com/technetwork/java/javase/downloads/jce-6-download-429243.html) + +* [Java 7 JCE](https://www.oracle.com/technetwork/java/javase/downloads/jce-7-download-432124.html) + +* [Java 8 JCE](https://www.oracle.com/technetwork/java/javase/downloads/jce8-download-2133166.html) + +Extract the files into the JDK/jre/lib/security folder for whichever version of JRE/JDK x64/x86 you use. + +### 1.11. Endpoints + +For a Spring Boot Actuator application, some additional management endpoints are available. You can use: + +* `POST` to `/actuator/env` to update the `Environment` and rebind `@ConfigurationProperties` and log levels. + To enabled this endpoint you must set `management.endpoint.env.post.enabled=true`. + +* `/actuator/refresh` to re-load the boot strap context and refresh the `@RefreshScope` beans. + +* `/actuator/restart` to close the `ApplicationContext` and restart it (disabled by default). + +* `/actuator/pause` and `/actuator/resume` for calling the `Lifecycle` methods (`stop()` and `start()` on the `ApplicationContext`). + +| |If you disable the `/actuator/restart` endpoint then the `/actuator/pause` and `/actuator/resume` endpoints
will also be disabled since they are just a special case of `/actuator/restart`.| +|---|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +## 2. Spring Cloud Commons: Common Abstractions + +Patterns such as service discovery, load balancing, and circuit breakers lend themselves to a common abstraction layer that can be consumed by all Spring Cloud clients, independent of the implementation (for example, discovery with Eureka or Consul). + +### 2.1. The `@EnableDiscoveryClient` Annotation + +Spring Cloud Commons provides the `@EnableDiscoveryClient` annotation. +This looks for implementations of the `DiscoveryClient` and `ReactiveDiscoveryClient` interfaces with `META-INF/spring.factories`. +Implementations of the discovery client add a configuration class to `spring.factories` under the `org.springframework.cloud.client.discovery.EnableDiscoveryClient` key. +Examples of `DiscoveryClient` implementations include [Spring Cloud Netflix Eureka](https://cloud.spring.io/spring-cloud-netflix/), [Spring Cloud Consul Discovery](https://cloud.spring.io/spring-cloud-consul/), and [Spring Cloud Zookeeper Discovery](https://cloud.spring.io/spring-cloud-zookeeper/). + +Spring Cloud will provide both the blocking and reactive service discovery clients by default. +You can disable the blocking and/or reactive clients easily by setting `spring.cloud.discovery.blocking.enabled=false` or `spring.cloud.discovery.reactive.enabled=false`. +To completely disable service discovery you just need to set `spring.cloud.discovery.enabled=false`. + +By default, implementations of `DiscoveryClient` auto-register the local Spring Boot server with the remote discovery server. +This behavior can be disabled by setting `autoRegister=false` in `@EnableDiscoveryClient`. + +| |`@EnableDiscoveryClient` is no longer required.
You can put a `DiscoveryClient` implementation on the classpath to cause the Spring Boot application to register with the service discovery server.| +|---|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +#### 2.1.1. Health Indicators + +Commons auto-configures the following Spring Boot health indicators. + +##### DiscoveryClientHealthIndicator + +This health indicator is based on the currently registered `DiscoveryClient` implementation. + +* To disable entirely, set `spring.cloud.discovery.client.health-indicator.enabled=false`. + +* To disable the description field, set `spring.cloud.discovery.client.health-indicator.include-description=false`. + Otherwise, it can bubble up as the `description` of the rolled up `HealthIndicator`. + +* To disable service retrieval, set `spring.cloud.discovery.client.health-indicator.use-services-query=false`. + By default, the indicator invokes the client’s `getServices` method. In deployments with many registered services it may too + costly to retrieve all services during every check. This will skip the service retrieval and instead use the client’s `probe` method. + +##### DiscoveryCompositeHealthContributor + +This composite health indicator is based on all registered `DiscoveryHealthIndicator` beans. To disable, +set `spring.cloud.discovery.client.composite-indicator.enabled=false`. + +#### 2.1.2. Ordering `DiscoveryClient` instances + +`DiscoveryClient` interface extends `Ordered`. This is useful when using multiple discovery +clients, as it allows you to define the order of the returned discovery clients, similar to +how you can order the beans loaded by a Spring application. By default, the order of any `DiscoveryClient` is set to`0`. If you want to set a different order for your custom `DiscoveryClient` implementations, you just need to override +the `getOrder()` method so that it returns the value that is suitable for your setup. Apart from this, you can use +properties to set the order of the `DiscoveryClient`implementations provided by Spring Cloud, among others `ConsulDiscoveryClient`, `EurekaDiscoveryClient` and`ZookeeperDiscoveryClient`. In order to do it, you just need to set the`spring.cloud.{clientIdentifier}.discovery.order` (or `eureka.client.order` for Eureka) property to the desired value. + +#### 2.1.3. SimpleDiscoveryClient + +If there is no Service-Registry-backed `DiscoveryClient` in the classpath, `SimpleDiscoveryClient`instance, that uses properties to get information on service and instances, will be used. + +The information about the available instances should be passed to via properties in the following format:`spring.cloud.discovery.client.simple.instances.service1[0].uri=http://s11:8080`, where`spring.cloud.discovery.client.simple.instances` is the common prefix, then `service1` stands +for the ID of the service in question, while `[0]` indicates the index number of the instance +(as visible in the example, indexes start with `0`), and then the value of `uri` is +the actual URI under which the instance is available. + +### 2.2. ServiceRegistry + +Commons now provides a `ServiceRegistry` interface that provides methods such as `register(Registration)` and `deregister(Registration)`, which let you provide custom registered services.`Registration` is a marker interface. + +The following example shows the `ServiceRegistry` in use: + +``` +@Configuration +@EnableDiscoveryClient(autoRegister=false) +public class MyConfiguration { + private ServiceRegistry registry; + + public MyConfiguration(ServiceRegistry registry) { + this.registry = registry; + } + + // called through some external process, such as an event or a custom actuator endpoint + public void register() { + Registration registration = constructRegistration(); + this.registry.register(registration); + } +} +``` + +Each `ServiceRegistry` implementation has its own `Registry` implementation. + +* `ZookeeperRegistration` used with `ZookeeperServiceRegistry` + +* `EurekaRegistration` used with `EurekaServiceRegistry` + +* `ConsulRegistration` used with `ConsulServiceRegistry` + +If you are using the `ServiceRegistry` interface, you are going to need to pass the +correct `Registry` implementation for the `ServiceRegistry` implementation you +are using. + +#### 2.2.1. ServiceRegistry Auto-Registration + +By default, the `ServiceRegistry` implementation auto-registers the running service. +To disable that behavior, you can set: +\* `@EnableDiscoveryClient(autoRegister=false)` to permanently disable auto-registration. +\* `spring.cloud.service-registry.auto-registration.enabled=false` to disable the behavior through configuration. + +##### ServiceRegistry Auto-Registration Events + +There are two events that will be fired when a service auto-registers. The first event, called`InstancePreRegisteredEvent`, is fired before the service is registered. The second +event, called `InstanceRegisteredEvent`, is fired after the service is registered. You can register an`ApplicationListener`(s) to listen to and react to these events. + +| |These events will not be fired if the `spring.cloud.service-registry.auto-registration.enabled` property is set to `false`.| +|---|---------------------------------------------------------------------------------------------------------------------------| + +#### 2.2.2. Service Registry Actuator Endpoint + +Spring Cloud Commons provides a `/service-registry` actuator endpoint. +This endpoint relies on a `Registration` bean in the Spring Application Context. +Calling `/service-registry` with GET returns the status of the `Registration`. +Using POST to the same endpoint with a JSON body changes the status of the current `Registration` to the new value. +The JSON body has to include the `status` field with the preferred value. +Please see the documentation of the `ServiceRegistry` implementation you use for the allowed values when updating the status and the values returned for the status. +For instance, Eureka’s supported statuses are `UP`, `DOWN`, `OUT_OF_SERVICE`, and `UNKNOWN`. + +### 2.3. Spring RestTemplate as a Load Balancer Client + +You can configure a `RestTemplate` to use a Load-balancer client. +To create a load-balanced `RestTemplate`, create a `RestTemplate` `@Bean` and use the `@LoadBalanced` qualifier, as the following example shows: + +``` +@Configuration +public class MyConfiguration { + + @LoadBalanced + @Bean + RestTemplate restTemplate() { + return new RestTemplate(); + } +} + +public class MyClass { + @Autowired + private RestTemplate restTemplate; + + public String doOtherStuff() { + String results = restTemplate.getForObject("http://stores/stores", String.class); + return results; + } +} +``` + +| |A `RestTemplate` bean is no longer created through auto-configuration.
Individual applications must create it.| +|---|------------------------------------------------------------------------------------------------------------------| + +The URI needs to use a virtual host name (that is, a service name, not a host name). +The BlockingLoadBalancerClient is used to create a full physical address. + +| |To use a load-balanced `RestTemplate`, you need to have a load-balancer implementation in your classpath.
Add [Spring Cloud LoadBalancer starter](#spring-cloud-loadbalancer-starter) to your project in order to use it.| +|---|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +### 2.4. Spring WebClient as a Load Balancer Client + +You can configure `WebClient` to automatically use a load-balancer client. +To create a load-balanced `WebClient`, create a `WebClient.Builder` `@Bean` and use the `@LoadBalanced` qualifier, as follows: + +``` +@Configuration +public class MyConfiguration { + + @Bean + @LoadBalanced + public WebClient.Builder loadBalancedWebClientBuilder() { + return WebClient.builder(); + } +} + +public class MyClass { + @Autowired + private WebClient.Builder webClientBuilder; + + public Mono doOtherStuff() { + return webClientBuilder.build().get().uri("http://stores/stores") + .retrieve().bodyToMono(String.class); + } +} +``` + +The URI needs to use a virtual host name (that is, a service name, not a host name). +The Spring Cloud LoadBalancer is used to create a full physical address. + +| |If you want to use a `@LoadBalanced WebClient.Builder`, you need to have a load balancer
implementation in the classpath. We recommend that you add the[Spring Cloud LoadBalancer starter](#spring-cloud-loadbalancer-starter) to your project.
Then, `ReactiveLoadBalancer` is used underneath.| +|---|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +#### 2.4.1. Retrying Failed Requests + +A load-balanced `RestTemplate` can be configured to retry failed requests. +By default, this logic is disabled. +For the non-reactive version (with `RestTemplate`), you can enable it by adding [Spring Retry](https://github.com/spring-projects/spring-retry) to your application’s classpath. For the reactive version (with `WebTestClient), you need to set `spring.cloud.loadbalancer.retry.enabled=true`. + +If you would like to disable the retry logic with Spring Retry or Reactive Retry on the classpath, you can set `spring.cloud.loadbalancer.retry.enabled=false`. + +For the non-reactive implementation, if you would like to implement a `BackOffPolicy` in your retries, you need to create a bean of type `LoadBalancedRetryFactory` and override the `createBackOffPolicy()` method. + +For the reactive implementation, you just need to enable it by setting `spring.cloud.loadbalancer.retry.backoff.enabled` to `false`. + +You can set: + +* `spring.cloud.loadbalancer.retry.maxRetriesOnSameServiceInstance` - indicates how many times a request should be retried on the same `ServiceInstance` (counted separately for every selected instance) + +* `spring.cloud.loadbalancer.retry.maxRetriesOnNextServiceInstance` - indicates how many times a request should be retried a newly selected `ServiceInstance` + +* `spring.cloud.loadbalancer.retry.retryableStatusCodes` - the status codes on which to always retry a failed request. + +For the reactive implementation, you can additionally set: +- `spring.cloud.loadbalancer.retry.backoff.minBackoff` - Sets the minimum backoff duration (by default, 5 milliseconds) +- `spring.cloud.loadbalancer.retry.backoff.maxBackoff` - Sets the maximum backoff duration (by default, max long value of milliseconds) +- `spring.cloud.loadbalancer.retry.backoff.jitter` - Sets the jitter used for calculationg the actual backoff duration for each call (by default, 0.5). + +For the reactive implementation, you can also implement your own `LoadBalancerRetryPolicy` to have more detailed control over the load-balanced call retries. + +| |Individual Loadbalancer clients may be configured individually with the same properties as above except the prefix is `spring.cloud.loadbalancer.clients..*` where `clientId` is the name of the loadbalancer.| +|---|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +| |For load-balanced retries, by default, we wrap the `ServiceInstanceListSupplier` bean with `RetryAwareServiceInstanceListSupplier` to select a different instance from the one previously chosen, if available. You can disable this behavior by setting the value of `spring.cloud.loadbalancer.retry.avoidPreviousInstance` to `false`.| +|---|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +``` +@Configuration +public class MyConfiguration { + @Bean + LoadBalancedRetryFactory retryFactory() { + return new LoadBalancedRetryFactory() { + @Override + public BackOffPolicy createBackOffPolicy(String service) { + return new ExponentialBackOffPolicy(); + } + }; + } +} +``` + +If you want to add one or more `RetryListener` implementations to your retry functionality, you need to +create a bean of type `LoadBalancedRetryListenerFactory` and return the `RetryListener` array +you would like to use for a given service, as the following example shows: + +``` +@Configuration +public class MyConfiguration { + @Bean + LoadBalancedRetryListenerFactory retryListenerFactory() { + return new LoadBalancedRetryListenerFactory() { + @Override + public RetryListener[] createRetryListeners(String service) { + return new RetryListener[]{new RetryListener() { + @Override + public boolean open(RetryContext context, RetryCallback callback) { + //TODO Do you business... + return true; + } + + @Override + public void close(RetryContext context, RetryCallback callback, Throwable throwable) { + //TODO Do you business... + } + + @Override + public void onError(RetryContext context, RetryCallback callback, Throwable throwable) { + //TODO Do you business... + } + }}; + } + }; + } +} +``` + +### 2.5. Multiple `RestTemplate` Objects + +If you want a `RestTemplate` that is not load-balanced, create a `RestTemplate` bean and inject it. +To access the load-balanced `RestTemplate`, use the `@LoadBalanced` qualifier when you create your `@Bean`, as the following example shows: + +``` +@Configuration +public class MyConfiguration { + + @LoadBalanced + @Bean + RestTemplate loadBalanced() { + return new RestTemplate(); + } + + @Primary + @Bean + RestTemplate restTemplate() { + return new RestTemplate(); + } +} + +public class MyClass { +@Autowired +private RestTemplate restTemplate; + + @Autowired + @LoadBalanced + private RestTemplate loadBalanced; + + public String doOtherStuff() { + return loadBalanced.getForObject("http://stores/stores", String.class); + } + + public String doStuff() { + return restTemplate.getForObject("http://example.com", String.class); + } +} +``` + +| |Notice the use of the `@Primary` annotation on the plain `RestTemplate` declaration in the preceding example to disambiguate the unqualified `@Autowired` injection.| +|---|--------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +| |If you see errors such as `java.lang.IllegalArgumentException: Can not set org.springframework.web.client.RestTemplate field com.my.app.Foo.restTemplate to com.sun.proxy.$Proxy89`, try injecting `RestOperations` or setting `spring.aop.proxyTargetClass=true`.| +|---|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +### 2.6. Multiple WebClient Objects + +If you want a `WebClient` that is not load-balanced, create a `WebClient` bean and inject it. +To access the load-balanced `WebClient`, use the `@LoadBalanced` qualifier when you create your `@Bean`, as the following example shows: + +``` +@Configuration +public class MyConfiguration { + + @LoadBalanced + @Bean + WebClient.Builder loadBalanced() { + return WebClient.builder(); + } + + @Primary + @Bean + WebClient.Builder webClient() { + return WebClient.builder(); + } +} + +public class MyClass { + @Autowired + private WebClient.Builder webClientBuilder; + + @Autowired + @LoadBalanced + private WebClient.Builder loadBalanced; + + public Mono doOtherStuff() { + return loadBalanced.build().get().uri("http://stores/stores") + .retrieve().bodyToMono(String.class); + } + + public Mono doStuff() { + return webClientBuilder.build().get().uri("http://example.com") + .retrieve().bodyToMono(String.class); + } +} +``` + +### 2.7. Spring WebFlux `WebClient` as a Load Balancer Client + +The Spring WebFlux can work with both reactive and non-reactive `WebClient` configurations, as the topics describe: + +* [Spring WebFlux `WebClient` with `ReactorLoadBalancerExchangeFilterFunction`](#webflux-with-reactive-loadbalancer) + +* [[load-balancer-exchange-filter-functionload-balancer-exchange-filter-function]](#load-balancer-exchange-filter-functionload-balancer-exchange-filter-function) + +#### 2.7.1. Spring WebFlux `WebClient` with `ReactorLoadBalancerExchangeFilterFunction` + +You can configure `WebClient` to use the `ReactiveLoadBalancer`. +If you add [Spring Cloud LoadBalancer starter](#spring-cloud-loadbalancer-starter) to your project +and if `spring-webflux` is on the classpath, `ReactorLoadBalancerExchangeFilterFunction` is auto-configured. +The following example shows how to configure a `WebClient` to use reactive load-balancer: + +``` +public class MyClass { + @Autowired + private ReactorLoadBalancerExchangeFilterFunction lbFunction; + + public Mono doOtherStuff() { + return WebClient.builder().baseUrl("http://stores") + .filter(lbFunction) + .build() + .get() + .uri("/stores") + .retrieve() + .bodyToMono(String.class); + } +} +``` + +The URI needs to use a virtual host name (that is, a service name, not a host name). +The `ReactorLoadBalancer` is used to create a full physical address. + +#### 2.7.2. Spring WebFlux `WebClient` with a Non-reactive Load Balancer Client + +If `spring-webflux` is on the classpath, `LoadBalancerExchangeFilterFunction`is auto-configured. Note, however, that this +uses a non-reactive client under the hood. +The following example shows how to configure a `WebClient` to use load-balancer: + +``` +public class MyClass { + @Autowired + private LoadBalancerExchangeFilterFunction lbFunction; + + public Mono doOtherStuff() { + return WebClient.builder().baseUrl("http://stores") + .filter(lbFunction) + .build() + .get() + .uri("/stores") + .retrieve() + .bodyToMono(String.class); + } +} +``` + +The URI needs to use a virtual host name (that is, a service name, not a host name). +The `LoadBalancerClient` is used to create a full physical address. + +WARN: This approach is now deprecated. +We suggest that you use [WebFlux with reactive Load-Balancer](#webflux-with-reactive-loadbalancer)instead. + +### 2.8. Ignore Network Interfaces + +Sometimes, it is useful to ignore certain named network interfaces so that they can be excluded from Service Discovery registration (for example, when running in a Docker container). +A list of regular expressions can be set to cause the desired network interfaces to be ignored. +The following configuration ignores the `docker0` interface and all interfaces that start with `veth`: + +Example 2. application.yml + +``` +spring: + cloud: + inetutils: + ignoredInterfaces: + - docker0 + - veth.* +``` + +You can also force the use of only specified network addresses by using a list of regular expressions, as the following example shows: + +Example 3. bootstrap.yml + +``` +spring: + cloud: + inetutils: + preferredNetworks: + - 192.168 + - 10.0 +``` + +You can also force the use of only site-local addresses, as the following example shows: + +Example 4. application.yml + +``` +spring: + cloud: + inetutils: + useOnlySiteLocalInterfaces: true +``` + +See [Inet4Address.html.isSiteLocalAddress()](https://docs.oracle.com/javase/8/docs/api/java/net/Inet4Address.html#isSiteLocalAddress--) for more details about what constitutes a site-local address. + +### 2.9. HTTP Client Factories + +Spring Cloud Commons provides beans for creating both Apache HTTP clients (`ApacheHttpClientFactory`) and OK HTTP clients (`OkHttpClientFactory`). +The `OkHttpClientFactory` bean is created only if the OK HTTP jar is on the classpath. +In addition, Spring Cloud Commons provides beans for creating the connection managers used by both clients: `ApacheHttpClientConnectionManagerFactory` for the Apache HTTP client and `OkHttpClientConnectionPoolFactory` for the OK HTTP client. +If you would like to customize how the HTTP clients are created in downstream projects, you can provide your own implementation of these beans. +In addition, if you provide a bean of type `HttpClientBuilder` or `OkHttpClient.Builder`, the default factories use these builders as the basis for the builders returned to downstream projects. +You can also disable the creation of these beans by setting `spring.cloud.httpclientfactories.apache.enabled` or `spring.cloud.httpclientfactories.ok.enabled` to `false`. + +### 2.10. Enabled Features + +Spring Cloud Commons provides a `/features` actuator endpoint. +This endpoint returns features available on the classpath and whether they are enabled. +The information returned includes the feature type, name, version, and vendor. + +#### 2.10.1. Feature types + +There are two types of 'features': abstract and named. + +Abstract features are features where an interface or abstract class is defined and that an implementation the creates, such as `DiscoveryClient`, `LoadBalancerClient`, or `LockService`. +The abstract class or interface is used to find a bean of that type in the context. +The version displayed is `bean.getClass().getPackage().getImplementationVersion()`. + +Named features are features that do not have a particular class they implement. These features include “Circuit Breaker”, “API Gateway”, “Spring Cloud Bus”, and others. These features require a name and a bean type. + +#### 2.10.2. Declaring features + +Any module can declare any number of `HasFeature` beans, as the following examples show: + +``` +@Bean +public HasFeatures commonsFeatures() { + return HasFeatures.abstractFeatures(DiscoveryClient.class, LoadBalancerClient.class); +} + +@Bean +public HasFeatures consulFeatures() { + return HasFeatures.namedFeatures( + new NamedFeature("Spring Cloud Bus", ConsulBusAutoConfiguration.class), + new NamedFeature("Circuit Breaker", HystrixCommandAspect.class)); +} + +@Bean +HasFeatures localFeatures() { + return HasFeatures.builder() + .abstractFeature(Something.class) + .namedFeature(new NamedFeature("Some Other Feature", Someother.class)) + .abstractFeature(Somethingelse.class) + .build(); +} +``` + +Each of these beans should go in an appropriately guarded `@Configuration`. + +### 2.11. Spring Cloud Compatibility Verification + +Due to the fact that some users have problem with setting up Spring Cloud application, we’ve decided +to add a compatibility verification mechanism. It will break if your current setup is not compatible +with Spring Cloud requirements, together with a report, showing what exactly went wrong. + +At the moment we verify which version of Spring Boot is added to your classpath. + +Example of a report + +``` +*************************** +APPLICATION FAILED TO START +*************************** + +Description: + +Your project setup is incompatible with our requirements due to following reasons: + +- Spring Boot [2.1.0.RELEASE] is not compatible with this Spring Cloud release train + +Action: + +Consider applying the following actions: + +- Change Spring Boot version to one of the following versions [1.2.x, 1.3.x] . +You can find the latest Spring Boot versions here [https://spring.io/projects/spring-boot#learn]. +If you want to learn more about the Spring Cloud Release train compatibility, you can visit this page [https://spring.io/projects/spring-cloud#overview] and check the [Release Trains] section. +``` + +In order to disable this feature, set `spring.cloud.compatibility-verifier.enabled` to `false`. +If you want to override the compatible Spring Boot versions, just set the`spring.cloud.compatibility-verifier.compatible-boot-versions` property with a comma separated list +of compatible Spring Boot versions. + +## 3. Spring Cloud LoadBalancer + +Spring Cloud provides its own client-side load-balancer abstraction and implementation. For the load-balancing +mechanism, `ReactiveLoadBalancer` interface has been added and a **Round-Robin-based** and **Random** implementations +have been provided for it. In order to get instances to select from reactive `ServiceInstanceListSupplier`is used. Currently we support a service-discovery-based implementation of `ServiceInstanceListSupplier`that retrieves available instances from Service Discovery using a [Discovery Client](#discovery-client) available in the classpath. + +| |It is possible to disable Spring Cloud LoadBalancer by setting the value of `spring.cloud.loadbalancer.enabled` to `false`.| +|---|---------------------------------------------------------------------------------------------------------------------------| + +### 3.1. Switching between the load-balancing algorithms + +The `ReactiveLoadBalancer` implementation that is used by default is `RoundRobinLoadBalancer`. To switch to a different implementation, either for selected services or all of them, you can use the [custom LoadBalancer configurations mechanism](#custom-loadbalancer-configuration). + +For example, the following configuration can be passed via `@LoadBalancerClient` annotation to switch to using the `RandomLoadBalancer`: + +``` +public class CustomLoadBalancerConfiguration { + + @Bean + ReactorLoadBalancer randomLoadBalancer(Environment environment, + LoadBalancerClientFactory loadBalancerClientFactory) { + String name = environment.getProperty(LoadBalancerClientFactory.PROPERTY_NAME); + return new RandomLoadBalancer(loadBalancerClientFactory + .getLazyProvider(name, ServiceInstanceListSupplier.class), + name); + } +} +``` + +| |The classes you pass as `@LoadBalancerClient` or `@LoadBalancerClients` configuration arguments should either not be annotated with `@Configuration` or be outside component scan scope.| +|---|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +### 3.2. Spring Cloud LoadBalancer integrations + +In order to make it easy to use Spring Cloud LoadBalancer, we provide `ReactorLoadBalancerExchangeFilterFunction` that can be used with `WebClient` and `BlockingLoadBalancerClient` that works with `RestTemplate`. +You can see more information and examples of usage in the following sections: + +* [Spring RestTemplate as a Load Balancer Client](#rest-template-loadbalancer-client) + +* [Spring WebClient as a Load Balancer Client](#webclinet-loadbalancer-client) + +* [Spring WebFlux WebClient with `ReactorLoadBalancerExchangeFilterFunction`](#webflux-with-reactive-loadbalancer) + +### 3.3. Spring Cloud LoadBalancer Caching + +Apart from the basic `ServiceInstanceListSupplier` implementation that retrieves instances via `DiscoveryClient` each time it has to choose an instance, we provide two caching implementations. + +#### 3.3.1. ](#caffeine-backed-loadbalancer-cache-implementation)[Caffeine-backed LoadBalancer Cache Implementation + +If you have `com.github.ben-manes.caffeine:caffeine` in the classpath, Caffeine-based implementation will be used. +See the [LoadBalancerCacheConfiguration](#loadbalancer-cache-configuration) section for information on how to configure it. + +If you are using Caffeine, you can also override the default Caffeine Cache setup for the LoadBalancer by passing your own [Caffeine Specification](https://static.javadoc.io/com.github.ben-manes.caffeine/caffeine/2.2.2/com/github/benmanes/caffeine/cache/CaffeineSpec.html)in the `spring.cloud.loadbalancer.cache.caffeine.spec` property. + +WARN: Passing your own Caffeine specification will override any other LoadBalancerCache settings, including [General LoadBalancer Cache Configuration](#loadbalancer-cache-configuration) fields, such as `ttl` and `capacity`. + +#### 3.3.2. Default LoadBalancer Cache Implementation + +If you do not have Caffeine in the classpath, the `DefaultLoadBalancerCache`, which comes automatically with `spring-cloud-starter-loadbalancer`, will be used. +See the [LoadBalancerCacheConfiguration](#loadbalancer-cache-configuration) section for information on how to configure it. + +| |To use Caffeine instead of the default cache, add the `com.github.ben-manes.caffeine:caffeine` dependency to classpath.| +|---|-----------------------------------------------------------------------------------------------------------------------| + +#### 3.3.3. LoadBalancer Cache Configuration + +You can set your own `ttl` value (the time after write after which entries should be expired), expressed as `Duration`, by passing a `String` compliant with the [Spring Boot `String` to `Duration` converter syntax](https://docs.spring.io/spring-boot/docs/current/reference/html/spring-boot-features.html#boot-features-external-config-conversion-duration). +as the value of the `spring.cloud.loadbalancer.cache.ttl` property. +You can also set your own LoadBalancer cache initial capacity by setting the value of the `spring.cloud.loadbalancer.cache.capacity` property. + +The default setup includes `ttl` set to 35 seconds and the default `initialCapacity` is `256`. + +You can also altogether disable loadBalancer caching by setting the value of `spring.cloud.loadbalancer.cache.enabled`to `false`. + +| |Although the basic, non-cached, implementation is useful for prototyping and testing, it’s much less efficient than the cached versions, so we recommend always using the cached version in production. If the caching is already done by the `DiscoveryClient` implementation, for example `EurekaDiscoveryClient`, the load-balancer caching should be disabled to prevent double caching.| +|---|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +### 3.4. Zone-Based Load-Balancing + +To enable zone-based load-balancing, we provide the `ZonePreferenceServiceInstanceListSupplier`. +We use `DiscoveryClient`-specific `zone` configuration (for example, `eureka.instance.metadata-map.zone`) to pick the zone that the client tries to filter available service instances for. + +| |You can also override `DiscoveryClient`-specific zone setup by setting the value of `spring.cloud.loadbalancer.zone` property.| +|---|------------------------------------------------------------------------------------------------------------------------------| + +| |For the time being, only Eureka Discovery Client is instrumented to set the LoadBalancer zone. For other discovery client, set the `spring.cloud.loadbalancer.zone` property. More instrumentations coming shortly.| +|---|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +| |To determine the zone of a retrieved `ServiceInstance`, we check the value under the `"zone"` key in its metadata map.| +|---|----------------------------------------------------------------------------------------------------------------------| + +The `ZonePreferenceServiceInstanceListSupplier` filters retrieved instances and only returns the ones within the same zone. +If the zone is `null` or there are no instances within the same zone, it returns all the retrieved instances. + +In order to use the zone-based load-balancing approach, you will have to instantiate a `ZonePreferenceServiceInstanceListSupplier` bean in a [custom configuration](#custom-loadbalancer-configuration). + +We use delegates to work with `ServiceInstanceListSupplier` beans. +We suggest passing a `DiscoveryClientServiceInstanceListSupplier` delegate in the constructor of `ZonePreferenceServiceInstanceListSupplier` and, in turn, wrapping the latter with a `CachingServiceInstanceListSupplier` to leverage [LoadBalancer caching mechanism](#loadbalancer-caching). + +You could use this sample configuration to set it up: + +``` +public class CustomLoadBalancerConfiguration { + + @Bean + public ServiceInstanceListSupplier discoveryClientServiceInstanceListSupplier( + ConfigurableApplicationContext context) { + return ServiceInstanceListSupplier.builder() + .withDiscoveryClient() + .withZonePreference() + .withCaching() + .build(context); + } +} +``` + +### 3.5. Instance Health-Check for LoadBalancer + +It is possible to enable a scheduled HealthCheck for the LoadBalancer. The `HealthCheckServiceInstanceListSupplier`is provided for that. It regularly verifies if the instances provided by a delegate`ServiceInstanceListSupplier` are still alive and only returns the healthy instances, +unless there are none - then it returns all the retrieved instances. + +| |This mechanism is particularly helpful while using the `SimpleDiscoveryClient`. For the
clients backed by an actual Service Registry, it’s not necessary to use, as we already get
healthy instances after querying the external ServiceDiscovery.| +|---|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +| |This supplier is also recommended for setups with a small number of instances per service
in order to avoid retrying calls on a failing instance.| +|---|-----------------------------------------------------------------------------------------------------------------------------------------------------| + +| |If using any of the Service Discovery-backed suppliers, adding this health-check mechanism is usually not necessary, as we retrieve the health state of the instances directly
from the Service Registry.| +|---|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +| |The `HealthCheckServiceInstanceListSupplier` relies on having updated instances provided by a delegate flux. In the rare cases when you want to use a delegate that does not refresh the instances, even though the list of instances may change (such as the `DiscoveryClientServiceInstanceListSupplier` provided by us), you can set `spring.cloud.loadbalancer.health-check.refetch-instances` to `true` to have the instance list refreshed by the `HealthCheckServiceInstanceListSupplier`. You can then also adjust the refretch intervals by modifying the value of `spring.cloud.loadbalancer.health-check.refetch-instances-interval` and opt to disable the additional healthcheck repetitions by setting `spring.cloud.loadbalancer.health-check.repeat-health-check` to `false` as every instances refetch
will also trigger a healthcheck.| +|---|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +`HealthCheckServiceInstanceListSupplier` uses properties prefixed with`spring.cloud.loadbalancer.health-check`. You can set the `initialDelay` and `interval`for the scheduler. You can set the default path for the healthcheck URL by setting +the value of the `spring.cloud.loadbalancer.health-check.path.default` property. You can also set a specific value for any given service by setting the value of the `spring.cloud.loadbalancer.health-check.path.[SERVICE_ID]` property, substituting `[SERVICE_ID]` with the correct ID of your service. If the `[SERVICE_ID]` is not specified, `/actuator/health` is used by default. If the `[SERVICE_ID]` is set to `null` or empty as a value, then the health check will not be executed. You can also set a custom port for health-check requests by setting the value of `spring.cloud.loadbalancer.health-check.port`. If none is set, the port under which the requested service is available at the service instance. + +| |If you rely on the default path (`/actuator/health`), make sure you add `spring-boot-starter-actuator` to your collaborator’s dependencies, unless you are planning to add such an endpoint on your own.| +|---|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +In order to use the health-check scheduler approach, you will have to instantiate a `HealthCheckServiceInstanceListSupplier` bean in a [custom configuration](#custom-loadbalancer-configuration). + +We use delegates to work with `ServiceInstanceListSupplier` beans. +We suggest passing a `DiscoveryClientServiceInstanceListSupplier` delegate in the constructor of `HealthCheckServiceInstanceListSupplier`. + +You could use this sample configuration to set it up: + +``` +public class CustomLoadBalancerConfiguration { + + @Bean + public ServiceInstanceListSupplier discoveryClientServiceInstanceListSupplier( + ConfigurableApplicationContext context) { + return ServiceInstanceListSupplier.builder() + .withDiscoveryClient() + .withHealthChecks() + .build(context); + } + } +``` + +| |For the non-reactive stack, create this supplier with the `withBlockingHealthChecks()`.
You can also pass your own `WebClient` or `RestTemplate` instance to be used for the checks.| +|---|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +| |`HealthCheckServiceInstanceListSupplier` has its own caching mechanism based on Reactor Flux `replay()`. Therefore, if it’s being used, you may want to skip wrapping that supplier with `CachingServiceInstanceListSupplier`.| +|---|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +### 3.6. Same instance preference for LoadBalancer + +You can set up the LoadBalancer in such a way that it prefers the instance that was previously selected, if that instance is available. + +For that, you need to use `SameInstancePreferenceServiceInstanceListSupplier`. You can configure it either by setting the value of `spring.cloud.loadbalancer.configurations` to `same-instance-preference` or by providing your own `ServiceInstanceListSupplier` bean — for example: + +``` +public class CustomLoadBalancerConfiguration { + + @Bean + public ServiceInstanceListSupplier discoveryClientServiceInstanceListSupplier( + ConfigurableApplicationContext context) { + return ServiceInstanceListSupplier.builder() + .withDiscoveryClient() + .withSameInstancePreference() + .build(context); + } + } +``` + +| |This is also a replacement for Zookeeper `StickyRule`.| +|---|------------------------------------------------------| + +### 3.7. Request-based Sticky Session for LoadBalancer + +You can set up the LoadBalancer in such a way that it prefers the instance with `instanceId` provided in a request cookie. We currently support this if the request is being passed to the LoadBalancer through either `ClientRequestContext` or `ServerHttpRequestContext`, which are used by the SC LoadBalancer exchange filter functions and filters. + +For that, you need to use the `RequestBasedStickySessionServiceInstanceListSupplier`. You can configure it either by setting the value of `spring.cloud.loadbalancer.configurations` to `request-based-sticky-session` or by providing your own `ServiceInstanceListSupplier` bean — for example: + +``` +public class CustomLoadBalancerConfiguration { + + @Bean + public ServiceInstanceListSupplier discoveryClientServiceInstanceListSupplier( + ConfigurableApplicationContext context) { + return ServiceInstanceListSupplier.builder() + .withDiscoveryClient() + .withRequestBasedStickySession() + .build(context); + } + } +``` + +For that functionality, it is useful to have the selected service instance (which can be different from the one in the original request cookie if that one is not available) to be updated before sending the request forward. To do that, set the value of `spring.cloud.loadbalancer.sticky-session.add-service-instance-cookie` to `true`. + +By default, the name of the cookie is `sc-lb-instance-id`. You can modify it by changing the value of the `spring.cloud.loadbalancer.instance-id-cookie-name` property. + +| |This feature is currently supported for WebClient-backed load-balancing.| +|---|------------------------------------------------------------------------| + +### 3.8. Spring Cloud LoadBalancer Hints + +Spring Cloud LoadBalancer lets you set `String` hints that are passed to the LoadBalancer within the `Request` object and that can later be used in `ReactiveLoadBalancer` implementations that can handle them. + +You can set a default hint for all services by setting the value of the `spring.cloud.loadbalancer.hint.default` property. You can also set a specific value +for any given service by setting the value of the `spring.cloud.loadbalancer.hint.[SERVICE_ID]` property, substituting `[SERVICE_ID]` with the correct ID of your service. If the hint is not set by the user, `default` is used. + +### 3.9. Hint-Based Load-Balancing + +We also provide a `HintBasedServiceInstanceListSupplier`, which is a `ServiceInstanceListSupplier` implementation for hint-based instance selection. + +`HintBasedServiceInstanceListSupplier` checks for a hint request header (the default header-name is `X-SC-LB-Hint`, but you can modify it by changing the value of the `spring.cloud.loadbalancer.hint-header-name` property) and, if it finds a hint request header, uses the hint value passed in the header to filter service instances. + +If no hint header has been added, `HintBasedServiceInstanceListSupplier` uses [hint values from properties](#spring-cloud-loadbalancer-hints) to filter service instances. + +If no hint is set, either by the header or by properties, all service instances provided by the delegate are returned. + +While filtering, `HintBasedServiceInstanceListSupplier` looks for service instances that have a matching value set under the `hint` key in their `metadataMap`. If no matching instances are found, all instances provided by the delegate are returned. + +You could use the following sample configuration to set it up: + +``` +public class CustomLoadBalancerConfiguration { + + @Bean + public ServiceInstanceListSupplier discoveryClientServiceInstanceListSupplier( + ConfigurableApplicationContext context) { + return ServiceInstanceListSupplier.builder() + .withDiscoveryClient() + .withHints() + .withCaching() + .build(context); + } +} +``` + +### 3.10. Transform the load-balanced HTTP request + +You can use the selected `ServiceInstance` to transform the load-balanced HTTP Request. + +For `RestTemplate`, you need to implement and define `LoadBalancerRequestTransformer` as follows: + +``` +@Bean +public LoadBalancerRequestTransformer transformer() { + return new LoadBalancerRequestTransformer() { + @Override + public HttpRequest transformRequest(HttpRequest request, ServiceInstance instance) { + return new HttpRequestWrapper(request) { + @Override + public HttpHeaders getHeaders() { + HttpHeaders headers = new HttpHeaders(); + headers.putAll(super.getHeaders()); + headers.add("X-InstanceId", instance.getInstanceId()); + return headers; + } + }; + } + }; +} +``` + +For `WebClient`, you need to implement and define `LoadBalancerClientRequestTransformer` as follows: + +``` +@Bean +public LoadBalancerClientRequestTransformer transformer() { + return new LoadBalancerClientRequestTransformer() { + @Override + public ClientRequest transformRequest(ClientRequest request, ServiceInstance instance) { + return ClientRequest.from(request) + .header("X-InstanceId", instance.getInstanceId()) + .build(); + } + }; +} +``` + +If multiple transformers are defined, they are applied in the order in which Beans are defined. +Alternatively, you can use `LoadBalancerRequestTransformer.DEFAULT_ORDER` or `LoadBalancerClientRequestTransformer.DEFAULT_ORDER` to specify the order. + +### 3.11. Spring Cloud LoadBalancer Starter + +We also provide a starter that allows you to easily add Spring Cloud LoadBalancer in a Spring Boot app. +In order to use it, just add `org.springframework.cloud:spring-cloud-starter-loadbalancer` to your Spring Cloud dependencies in your build file. + +| |Spring Cloud LoadBalancer starter includes[Spring Boot Caching](https://docs.spring.io/spring-boot/docs/current/reference/html/boot-features-caching.html)and [Evictor](https://github.com/stoyanr/Evictor).| +|---|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +### 3.12. Passing Your Own Spring Cloud LoadBalancer Configuration + +You can also use the `@LoadBalancerClient` annotation to pass your own load-balancer client configuration, passing the name of the load-balancer client and the configuration class, as follows: + +``` +@Configuration +@LoadBalancerClient(value = "stores", configuration = CustomLoadBalancerConfiguration.class) +public class MyConfiguration { + + @Bean + @LoadBalanced + public WebClient.Builder loadBalancedWebClientBuilder() { + return WebClient.builder(); + } +} +``` + +TIP + +In order to make working on your own LoadBalancer configuration easier, we have added a `builder()` method to the `ServiceInstanceListSupplier` class. + +TIP + +You can also use our alternative predefined configurations in place of the default ones by setting the value of `spring.cloud.loadbalancer.configurations` property to `zone-preference` to use `ZonePreferenceServiceInstanceListSupplier` with caching or to `health-check` to use `HealthCheckServiceInstanceListSupplier` with caching. + +You can use this feature to instantiate different implementations of `ServiceInstanceListSupplier` or `ReactorLoadBalancer`, either written by you, or provided by us as alternatives (for example `ZonePreferenceServiceInstanceListSupplier`) to override the default setup. + +You can see an example of a custom configuration [here](#zoned-based-custom-loadbalancer-configuration). + +| |The annotation `value` arguments (`stores` in the example above) specifies the service id of the service that we should send the requests to with the given custom configuration.| +|---|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +You can also pass multiple configurations (for more than one load-balancer client) through the `@LoadBalancerClients` annotation, as the following example shows: + +``` +@Configuration +@LoadBalancerClients({@LoadBalancerClient(value = "stores", configuration = StoresLoadBalancerClientConfiguration.class), @LoadBalancerClient(value = "customers", configuration = CustomersLoadBalancerClientConfiguration.class)}) +public class MyConfiguration { + + @Bean + @LoadBalanced + public WebClient.Builder loadBalancedWebClientBuilder() { + return WebClient.builder(); + } +} +``` + +| |The classes you pass as `@LoadBalancerClient` or `@LoadBalancerClients` configuration arguments should either not be annotated with `@Configuration` or be outside component scan scope.| +|---|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +### 3.13. Spring Cloud LoadBalancer Lifecycle + +One type of bean that it may be useful to register using [Custom LoadBalancer configuration](#custom-loadbalancer-configuration) is `LoadBalancerLifecycle`. + +The `LoadBalancerLifecycle` beans provide callback methods, named `onStart(Request request)`, `onStartRequest(Request request, Response lbResponse)` and `onComplete(CompletionContext completionContext)`, that you should implement to specify what actions should take place before and after load-balancing. + +`onStart(Request request)` takes a `Request` object as a parameter. It contains data that is used to select an appropriate instance, including the downstream client request and [hint](#spring-cloud-loadbalancer-hints). `onStartRequest` also takes the `Request` object and, additionally, the `Response` object as parameters. On the other hand, a `CompletionContext` object is provided to the `onComplete(CompletionContext completionContext)` method. It contains the LoadBalancer `Response`, including the selected service instance, the `Status` of the request executed against that service instance and (if available) the response returned to the downstream client, and (if an exception has occurred) the corresponding `Throwable`. + +The `supports(Class requestContextClass, Class responseClass, +Class serverTypeClass)` method can be used to determine whether the processor in question handles objects of provided types. If not overridden by the user, it returns `true`. + +| |In the preceding method calls, `RC` means `RequestContext` type, `RES` means client response type, and `T` means returned server type.| +|---|--------------------------------------------------------------------------------------------------------------------------------------| + +### 3.14. Spring Cloud LoadBalancer Statistics + +We provide a `LoadBalancerLifecycle` bean called `MicrometerStatsLoadBalancerLifecycle`, which uses Micrometer to provide statistics for load-balanced calls. + +In order to get this bean added to your application context, +set the value of the `spring.cloud.loadbalancer.stats.micrometer.enabled` to `true` and have a `MeterRegistry` available (for example, by adding [Spring Boot Actuator](https://docs.spring.io/spring-boot/docs/current/reference/html/production-ready-features.html) to your project). + +`MicrometerStatsLoadBalancerLifecycle` registers the following meters in `MeterRegistry`: + +* `loadbalancer.requests.active`: A gauge that allows you to monitor the number of currently active requests for any service instance (service instance data available via tags); + +* `loadbalancer.requests.success`: A timer that measures the time of execution of any load-balanced requests that have ended in passing a response on to the underlying client; + +* `loadbalancer.requests.failed`: A timer that measures the time of execution of any load-balanced requests that have ended with an exception; + +* `loadbalancer.requests.discard`: A counter that measures the number of discarded load-balanced requests, i.e. requests where a service instance to run the request on has not been retrieved by the LoadBalancer. + +Additional information regarding the service instances, request data, and response data is added to metrics via tags whenever available. + +| |For some implementations, such as `BlockingLoadBalancerClient`, request and response data might not be available, as we establish generic types from arguments and might not be able to determine the types and read the data.| +|---|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +| |The meters are registered in the registry when at least one record is added for a given meter.| +|---|----------------------------------------------------------------------------------------------| + +| |You can further configure the behavior of those metrics (for example, add [publishing percentiles and histograms](https://micrometer.io/docs/concepts#_histograms_and_percentiles)) by [adding `MeterFilters`](https://docs.spring.io/spring-boot/docs/current/reference/html/production-ready-features.html#production-ready-metrics-per-meter-properties).| +|---|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +### 3.15. Configuring Individual LoadBalancerClients + +Individual Loadbalancer clients may be configured individually with a different prefix `spring.cloud.loadbalancer.clients..` **where `clientId` is the name of the loadbalancer. Default configuration values may be set in the `spring.cloud.loadbalancer.`** namespace and will be merged with the client specific values taking precedence + +Example 5. application.yml + +``` +spring: + cloud: + loadbalancer: + health-check: + initial-delay: 1s + clients: + myclient: + health-check: + interval: 30s +``` + +The above example will result in a merged health-check `@ConfigurationProperties` object with `initial-delay=1s` and `interval=30s`. + +The per-client configuration properties work for most of the properties, apart from the following global ones: + +* `spring.cloud.loadbalancer.enabled` - globally enables or disables load-balancing + +* `spring.cloud.loadbalancer.retry.enabled` - globally enables or disables load-balanced retries. If you enable it globally, you can still disable retries for specific clients using the `client`-prefixed properties, but not the other way round + +* `spring.cloud.loadbalancer.cache.enabled` - globally enables or disables LoadBalancer caching. If you enable it globally, you can still disable caching for specific clients by creating a [custom configuration](#custom-loadbalancer-configuration) that does not include the `CachingServiceInstanceListSupplier` in the `ServiceInstanceListSupplier` delegates hierarchy, but not the other way round. + +* `spring.cloud.loadbalancer.stats.micrometer.enabled` - globally enables or disables LoadBalancer Micrometer metrics + +| |For the properties where maps where already used, where you could specify a different value per-client without using the `clients` keyword (for example, `hints`, `health-check.path`), we have kept that behaviour in order to keep the library backwards compatible. It will be modified in the next major release.| +|---|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +## 4. Spring Cloud Circuit Breaker + +### 4.1. Introduction + +Spring Cloud Circuit breaker provides an abstraction across different circuit breaker implementations. +It provides a consistent API to use in your applications, letting you, the developer, choose the circuit breaker implementation that best fits your needs for your application. + +#### 4.1.1. Supported Implementations + +Spring Cloud supports the following circuit-breaker implementations: + +* [Resilience4J](https://github.com/resilience4j/resilience4j) + +* [Sentinel](https://github.com/alibaba/Sentinel) + +* [Spring Retry](https://github.com/spring-projects/spring-retry) + +### 4.2. Core Concepts + +To create a circuit breaker in your code, you can use the `CircuitBreakerFactory` API. When you include a Spring Cloud Circuit Breaker starter on your classpath, a bean that implements this API is automatically created for you. +The following example shows a simple example of how to use this API: + +``` +@Service +public static class DemoControllerService { + private RestTemplate rest; + private CircuitBreakerFactory cbFactory; + + public DemoControllerService(RestTemplate rest, CircuitBreakerFactory cbFactory) { + this.rest = rest; + this.cbFactory = cbFactory; + } + + public String slow() { + return cbFactory.create("slow").run(() -> rest.getForObject("/slow", String.class), throwable -> "fallback"); + } + +} +``` + +The `CircuitBreakerFactory.create` API creates an instance of a class called `CircuitBreaker`. +The `run` method takes a `Supplier` and a `Function`. +The `Supplier` is the code that you are going to wrap in a circuit breaker. +The `Function` is the fallback that is run if the circuit breaker is tripped. +The function is passed the `Throwable` that caused the fallback to be triggered. +You can optionally exclude the fallback if you do not want to provide one. + +#### 4.2.1. Circuit Breakers In Reactive Code + +If Project Reactor is on the class path, you can also use `ReactiveCircuitBreakerFactory` for your reactive code. +The following example shows how to do so: + +``` +@Service +public static class DemoControllerService { + private ReactiveCircuitBreakerFactory cbFactory; + private WebClient webClient; + + public DemoControllerService(WebClient webClient, ReactiveCircuitBreakerFactory cbFactory) { + this.webClient = webClient; + this.cbFactory = cbFactory; + } + + public Mono slow() { + return webClient.get().uri("/slow").retrieve().bodyToMono(String.class).transform( + it -> cbFactory.create("slow").run(it, throwable -> return Mono.just("fallback"))); + } +} +``` + +The `ReactiveCircuitBreakerFactory.create` API creates an instance of a class called `ReactiveCircuitBreaker`. +The `run` method takes a `Mono` or a `Flux` and wraps it in a circuit breaker. +You can optionally profile a fallback `Function`, which will be called if the circuit breaker is tripped and is passed the `Throwable`that caused the failure. + +### 4.3. Configuration + +You can configure your circuit breakers by creating beans of type `Customizer`. +The `Customizer` interface has a single method (called `customize`) that takes the `Object` to customize. + +For detailed information on how to customize a given implementation see +the following documentation: + +* [Resilience4J](../../../../spring-cloud-circuitbreaker/current/reference/html/spring-cloud-circuitbreaker.html#configuring-resilience4j-circuit-breakers) + +* [Sentinel](https://github.com/alibaba/spring-cloud-alibaba/blob/master/spring-cloud-alibaba-docs/src/main/asciidoc/circuitbreaker-sentinel.adoc#circuit-breaker-spring-cloud-circuit-breaker-with-sentinel—​configuring-sentinel-circuit-breakers) + +* [Spring Retry](../../../../../spring-cloud-circuitbreaker/docs/current/reference/html/spring-cloud-circuitbreaker.html#configuring-spring-retry-circuit-breakers) + +Some `CircuitBreaker` implementations such as `Resilience4JCircuitBreaker` call `customize` method every time `CircuitBreaker#run` is called. +It can be inefficient. In that case, you can use `CircuitBreaker#once` method. It is useful where calling `customize` many times doesn’t make sense, +for example, in case of [consuming Resilience4j’s events](https://resilience4j.readme.io/docs/circuitbreaker#section-consume-emitted-circuitbreakerevents). + +The following example shows the way for each `io.github.resilience4j.circuitbreaker.CircuitBreaker` to consume events. + +``` +Customizer.once(circuitBreaker -> { + circuitBreaker.getEventPublisher() + .onStateTransition(event -> log.info("{}: {}", event.getCircuitBreakerName(), event.getStateTransition())); +}, CircuitBreaker::getName) +``` + +## 5. CachedRandomPropertySource + +Spring Cloud Context provides a `PropertySource` that caches random values based on a key. Outside of the caching +functionality it works the same as Spring Boot’s [`RandomValuePropertySource`](https://github.com/spring-projects/spring-boot/blob/main/spring-boot-project/spring-boot/src/main/java/org/springframework/boot/env/RandomValuePropertySource.java). +This random value might be useful in the case where you want a random value that is consistent even after the Spring Application +context restarts. The property value takes the form of `cachedrandom.[yourkey].[type]` where `yourkey` is the key in the cache. The `type` value can +be any type supported by Spring Boot’s `RandomValuePropertySource`. + +``` +myrandom=${cachedrandom.appname.value} +``` + +## 6. Security + +### 6.1. Single Sign On + +| |All of the OAuth2 SSO and resource server features moved to Spring Boot
in version 1.3. You can find documentation in the[Spring Boot user guide](https://docs.spring.io/spring-boot/docs/current/reference/htmlsingle/).| +|---|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +#### 6.1.1. Client Token Relay + +If your app is a user facing OAuth2 client (i.e. has declared`@EnableOAuth2Sso` or `@EnableOAuth2Client`) then it has an`OAuth2ClientContext` in request scope from Spring Boot. You can +create your own `OAuth2RestTemplate` from this context and an +autowired `OAuth2ProtectedResourceDetails`, and then the context will +always forward the access token downstream, also refreshing the access +token automatically if it expires. (These are features of Spring +Security and Spring Boot.) + +#### 6.1.2. Resource Server Token Relay + +If your app has `@EnableResourceServer` you might want to relay the +incoming token downstream to other services. If you use a`RestTemplate` to contact the downstream services then this is just a +matter of how to create the template with the right context. + +If your service uses `UserInfoTokenServices` to authenticate incoming +tokens (i.e. it is using the `security.oauth2.user-info-uri`configuration), then you can simply create an `OAuth2RestTemplate`using an autowired `OAuth2ClientContext` (it will be populated by the +authentication process before it hits the backend code). Equivalently +(with Spring Boot 1.4), you could inject a`UserInfoRestTemplateFactory` and grab its `OAuth2RestTemplate` in +your configuration. For example: + +MyConfiguration.java + +``` +@Bean +public OAuth2RestTemplate restTemplate(UserInfoRestTemplateFactory factory) { + return factory.getUserInfoRestTemplate(); +} +``` + +This rest template will then have the same `OAuth2ClientContext`(request-scoped) that is used by the authentication filter, so you can +use it to send requests with the same access token. + +If your app is not using `UserInfoTokenServices` but is still a client +(i.e. it declares `@EnableOAuth2Client` or `@EnableOAuth2Sso`), then +with Spring Security Cloud any `OAuth2RestOperations` that the user +creates from an `@Autowired` `OAuth2Context` will also forward +tokens. This feature is implemented by default as an MVC handler +interceptor, so it only works in Spring MVC. If you are not using MVC +you could use a custom filter or AOP interceptor wrapping an`AccessTokenContextRelay` to provide the same feature. + +Here’s a basic +example showing the use of an autowired rest template created +elsewhere ("foo.com" is a Resource Server accepting the same tokens as +the surrounding app): + +MyController.java + +``` +@Autowired +private OAuth2RestOperations restTemplate; + +@RequestMapping("/relay") +public String relay() { + ResponseEntity response = + restTemplate.getForEntity("https://foo.com/bar", String.class); + return "Success! (" + response.getBody() + ")"; +} +``` + +If you don’t want to forward tokens (and that is a valid +choice, since you might want to act as yourself, rather than the +client that sent you the token), then you only need to create your own`OAuth2Context` instead of autowiring the default one. + +Feign clients will also pick up an interceptor that uses the`OAuth2ClientContext` if it is available, so they should also do a +token relay anywhere where a `RestTemplate` would. + +## 7. Configuration Properties + +To see the list of all Spring Cloud Commons related configuration properties please check [the Appendix page](appendix.html). + diff --git a/docs/en/spring-cloud/spring-cloud-config.md b/docs/en/spring-cloud/spring-cloud-config.md new file mode 100644 index 0000000000000000000000000000000000000000..c6663c69bc91364587dd4b117e70502e0c99b294 --- /dev/null +++ b/docs/en/spring-cloud/spring-cloud-config.md @@ -0,0 +1,1975 @@ +# Spring Cloud Config + +## [Quick Start](#_quick_start) + +This quick start walks through using both the server and the client of Spring Cloud Config Server. + +First, start the server, as follows: + +``` +$ cd spring-cloud-config-server +$ ../mvnw spring-boot:run +``` + +The server is a Spring Boot application, so you can run it from your IDE if you prefer to do so (the main class is `ConfigServerApplication`). + +Next try out a client, as follows: + +``` +$ curl localhost:8888/foo/development +{ + "name": "foo", + "profiles": [ + "development" + ] + .... + "propertySources": [ + { + "name": "https://github.com/spring-cloud-samples/config-repo/foo-development.properties", + "source": { + "bar": "spam", + "foo": "from foo development" + } + }, + { + "name": "https://github.com/spring-cloud-samples/config-repo/foo.properties", + "source": { + "foo": "from foo props", + "democonfigclient.message": "hello spring io" + } + }, + .... +``` + +The default strategy for locating property sources is to clone a git repository (at `spring.cloud.config.server.git.uri`) and use it to initialize a mini `SpringApplication`. +The mini-application’s `Environment` is used to enumerate property sources and publish them at a JSON endpoint. + +The HTTP service has resources in the following form: + +``` +/{application}/{profile}[/{label}] +/{application}-{profile}.yml +/{label}/{application}-{profile}.yml +/{application}-{profile}.properties +/{label}/{application}-{profile}.properties +``` + +For example: + +``` +curl localhost:8888/foo/development +curl localhost:8888/foo/development/master +curl localhost:8888/foo/development,db/master +curl localhost:8888/foo-development.yml +curl localhost:8888/foo-db.properties +curl localhost:8888/master/foo-db.properties +``` + +where `application` is injected as the `spring.config.name` in the `SpringApplication` (what is normally `application` in a regular Spring Boot app), `profile` is an active profile (or comma-separated list of properties), and `label` is an optional git label (defaults to `master`.) + +Spring Cloud Config Server pulls configuration for remote clients from various sources. The following example gets configuration from a git repository (which must be provided), as shown in the following example: + +``` +spring: + cloud: + config: + server: + git: + uri: https://github.com/spring-cloud-samples/config-repo +``` + +Other sources are any JDBC compatible database, Subversion, Hashicorp Vault, Credhub and local filesystems. + +### [Client Side Usage](#_client_side_usage) + +To use these features in an application, you can build it as a Spring Boot application that depends on spring-cloud-config-client (for an example, see the test cases for the config-client or the sample application). +The most convenient way to add the dependency is with a Spring Boot starter `org.springframework.cloud:spring-cloud-starter-config`. +There is also a parent pom and BOM (`spring-cloud-starter-parent`) for Maven users and a Spring IO version management properties file for Gradle and Spring CLI users. The following example shows a typical Maven configuration: + +pom.xml + +``` + + org.springframework.boot + spring-boot-starter-parent + {spring-boot-docs-version} + + + + + + + org.springframework.cloud + spring-cloud-dependencies + {spring-cloud-version} + pom + import + + + + + + + org.springframework.cloud + spring-cloud-starter-config + + + org.springframework.boot + spring-boot-starter-test + test + + + + + + + org.springframework.boot + spring-boot-maven-plugin + + + + + +``` + +Now you can create a standard Spring Boot application, such as the following HTTP server: + +``` +@SpringBootApplication +@RestController +public class Application { + + @RequestMapping("/") + public String home() { + return "Hello World!"; + } + + public static void main(String[] args) { + SpringApplication.run(Application.class, args); + } + +} +``` + +When this HTTP server runs, it picks up the external configuration from the default local config server (if it is running) on port 8888. +To modify the startup behavior, you can change the location of the config server by using `application.properties` as shown in the following example: + +``` +spring.config.import=optional:configserver:http://myconfigserver.com +``` + +By default, if no application name is set, `application` will be used. To modify the name, the following property can be added to the `application.properties` file: + +``` +spring.application.name: myapp +``` + +| |When setting the property `${spring.application.name}` do not prefix your app name with the reserved word `application-` to prevent issues resolving the correct property source.| +|---|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +The Config Server properties show up in the `/env` endpoint as a high-priority property source, as shown in the following example. + +``` +$ curl localhost:8080/env +{ + "activeProfiles": [], + { + "name": "servletContextInitParams", + "properties": {} + }, + { + "name": "configserver:https://github.com/spring-cloud-samples/config-repo/foo.properties", + "properties": { + "foo": { + "value": "bar", + "origin": "Config Server https://github.com/spring-cloud-samples/config-repo/foo.properties:2:12" + } + } + }, + ... +} +``` + +A property source called `configserver:/` contains the `foo` property with a value of `bar`. + +| |The URL in the property source name is the git repository, not the config server URL.| +|---|-------------------------------------------------------------------------------------| + +| |If you use Spring Cloud Config Client, you need to set the `spring.config.import` property in order to bind to Config Server. You can read more about it [in the Spring Cloud Config Reference Guide](https://docs.spring.io/spring-cloud-config/docs/current/reference/html/#config-data-import).| +|---|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +## [Spring Cloud Config Server](#_spring_cloud_config_server) + +Spring Cloud Config Server provides an HTTP resource-based API for external configuration (name-value pairs or equivalent YAML content). +The server is embeddable in a Spring Boot application, by using the `@EnableConfigServer` annotation. +Consequently, the following application is a config server: + +ConfigServer.java + +``` +@SpringBootApplication +@EnableConfigServer +public class ConfigServer { + public static void main(String[] args) { + SpringApplication.run(ConfigServer.class, args); + } +} +``` + +Like all Spring Boot applications, it runs on port 8080 by default, but you can switch it to the more conventional port 8888 in various ways. +The easiest, which also sets a default configuration repository, is by launching it with `spring.config.name=configserver` (there is a `configserver.yml` in the Config Server jar). +Another is to use your own `application.properties`, as shown in the following example: + +application.properties + +``` +server.port: 8888 +spring.cloud.config.server.git.uri: file://${user.home}/config-repo +``` + +where `${user.home}/config-repo` is a git repository containing YAML and properties files. + +| |On Windows, you need an extra "/" in the file URL if it is absolute with a drive prefix (for example,`[file:///${user.home}/config-repo](file:///${user.home}/config-repo)`).| +|---|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +| |The following listing shows a recipe for creating the git repository in the preceding example:

```
$ cd $HOME
$ mkdir config-repo
$ cd config-repo
$ git init .
$ echo info.foo: bar > application.properties
$ git add -A .
$ git commit -m "Add application.properties"
```| +|---|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +| |Using the local filesystem for your git repository is intended for testing only.
You should use a server to host your configuration repositories in production.| +|---|-------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +| |The initial clone of your configuration repository can be quick and efficient if you keep only text files in it.
If you store binary files, especially large ones, you may experience delays on the first request for configuration or encounter out of memory errors in the server.| +|---|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +### [Environment Repository](#_environment_repository) + +Where should you store the configuration data for the Config Server? +The strategy that governs this behaviour is the `EnvironmentRepository`, serving `Environment` objects. +This `Environment` is a shallow copy of the domain from the Spring `Environment` (including `propertySources` as the main feature). +The `Environment` resources are parametrized by three variables: + +* `{application}`, which maps to `spring.application.name` on the client side. + +* `{profile}`, which maps to `spring.profiles.active` on the client (comma-separated list). + +* `{label}`, which is a server side feature labelling a "versioned" set of config files. + +Repository implementations generally behave like a Spring Boot application, loading configuration files from a `spring.config.name` equal to the `{application}` parameter, and `spring.profiles.active` equal to the `{profiles}` parameter. +Precedence rules for profiles are also the same as in a regular Spring Boot application: Active profiles take precedence over defaults, and, if there are multiple profiles, the last one wins (similar to adding entries to a `Map`). + +The following sample client application has this bootstrap configuration: + +``` +spring: + application: + name: foo + profiles: + active: dev,mysql +``` + +(As usual with a Spring Boot application, these properties could also be set by environment variables or command line arguments). + +If the repository is file-based, the server creates an`Environment` from `application.yml` (shared between all clients) and`foo.yml` (with `foo.yml` taking precedence). +If the YAML files have documents inside them that point to Spring profiles, those are applied with higher precedence (in order of the profiles listed). +If there are profile-specific YAML (or properties) files, these are also applied with higher precedence than the defaults. +Higher precedence translates to a `PropertySource` listed earlier in the `Environment`. +(These same rules apply in a standalone Spring Boot application.) + +You can set spring.cloud.config.server.accept-empty to false so that Server would return a HTTP 404 status, if the application is not found.By default, this flag is set to true. + +#### [Git Backend](#_git_backend) + +The default implementation of `EnvironmentRepository` uses a Git backend, which is very convenient for managing upgrades and physical environments and for auditing changes. +To change the location of the repository, you can set the `spring.cloud.config.server.git.uri` configuration property in the Config Server (for example in `application.yml`). +If you set it with a `file:` prefix, it should work from a local repository so that you can get started quickly and easily without a server. However, in that case, the server operates directly on the local repository without cloning it (it does not matter if it is not bare because the Config Server never makes changes to the "remote" repository). +To scale the Config Server up and make it highly available, you need to have all instances of the server pointing to the same repository, so only a shared file system would work. +Even in that case, it is better to use the `ssh:` protocol for a shared filesystem repository, so that the server can clone it and use a local working copy as a cache. + +This repository implementation maps the `{label}` parameter of the HTTP resource to a git label (commit id, branch name, or tag). +If the git branch or tag name contains a slash (`/`), then the label in the HTTP URL should instead be specified with the special string `(_)` (to avoid ambiguity with other URL paths). +For example, if the label is `foo/bar`, replacing the slash would result in the following label: `foo(_)bar`. +The inclusion of the special string `(_)` can also be applied to the `{application}` parameter. +If you use a command-line client such as curl, be careful with the brackets in the URL — you should escape them from the shell with single quotes (''). + +##### [Skipping SSL Certificate Validation](#_skipping_ssl_certificate_validation) + +The configuration server’s validation of the Git server’s SSL certificate can be disabled by setting the `git.skipSslValidation` property to `true` (default is `false`). + +``` +spring: + cloud: + config: + server: + git: + uri: https://example.com/my/repo + skipSslValidation: true +``` + +##### [Setting HTTP Connection Timeout](#_setting_http_connection_timeout) + +You can configure the time, in seconds, that the configuration server will wait to acquire an HTTP connection. Use the `git.timeout` property. + +``` +spring: + cloud: + config: + server: + git: + uri: https://example.com/my/repo + timeout: 4 +``` + +##### [Placeholders in Git URI](#_placeholders_in_git_uri) + +Spring Cloud Config Server supports a git repository URL with placeholders for the `{application}` and `{profile}` (and `{label}` if you need it, but remember that the label is applied as a git label anyway). +So you can support a “one repository per application” policy by using a structure similar to the following: + +``` +spring: + cloud: + config: + server: + git: + uri: https://github.com/myorg/{application} +``` + +You can also support a “one repository per profile” policy by using a similar pattern but with`{profile}`. + +Additionally, using the special string "(\_)" within your `{application}` parameters can enable support for multiple +organizations, as shown in the following example: + +``` +spring: + cloud: + config: + server: + git: + uri: https://github.com/{application} +``` + +where `{application}` is provided at request time in the following format: `organization(_)application`. + +##### [Pattern Matching and Multiple Repositories](#_pattern_matching_and_multiple_repositories) + +Spring Cloud Config also includes support for more complex requirements with pattern +matching on the application and profile name. +The pattern format is a comma-separated list of `{application}/{profile}` names with wildcards (note that a pattern beginning with a wildcard may need to be quoted), as shown in the following example: + +``` +spring: + cloud: + config: + server: + git: + uri: https://github.com/spring-cloud-samples/config-repo + repos: + simple: https://github.com/simple/config-repo + special: + pattern: special*/dev*,*special*/dev* + uri: https://github.com/special/config-repo + local: + pattern: local* + uri: file:/home/configsvc/config-repo +``` + +If `{application}/{profile}` does not match any of the patterns, it uses the default URI defined under `spring.cloud.config.server.git.uri`. +In the above example, for the “simple” repository, the pattern is `simple/*` (it only matches one application named `simple` in all profiles). The “local” repository matches all application names beginning with `local` in all profiles (the `/*` suffix is added automatically to any pattern that does not have a profile matcher). + +| |The “one-liner” short cut used in the “simple” example can be used only if the only property to be set is the URI.
If you need to set anything else (credentials, pattern, and so on) you need to use the full form.| +|---|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +The `pattern` property in the repo is actually an array, so you can use a YAML array (or `[0]`, `[1]`, etc. suffixes in properties files) to bind to multiple patterns. +You may need to do so if you are going to run apps with multiple profiles, as shown in the following example: + +``` +spring: + cloud: + config: + server: + git: + uri: https://github.com/spring-cloud-samples/config-repo + repos: + development: + pattern: + - '*/development' + - '*/staging' + uri: https://github.com/development/config-repo + staging: + pattern: + - '*/qa' + - '*/production' + uri: https://github.com/staging/config-repo +``` + +| |Spring Cloud guesses that a pattern containing a profile that does not end in `*` implies that you actually want to match a list of profiles starting with this pattern (so `*/staging` is a shortcut for `["*/staging", "*/staging,*"]`, and so on).
This is common where, for instance, you need to run applications in the “development” profile locally but also the “cloud” profile remotely.| +|---|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +Every repository can also optionally store config files in sub-directories, and patterns to search for those directories can be specified as `search-paths`. +The following example shows a config file at the top level: + +``` +spring: + cloud: + config: + server: + git: + uri: https://github.com/spring-cloud-samples/config-repo + search-paths: + - foo + - bar* +``` + +In the preceding example, the server searches for config files in the top level and in the `foo/` sub-directory and also any sub-directory whose name begins with `bar`. + +By default, the server clones remote repositories when configuration +is first requested. +The server can be configured to clone the repositories at startup, as shown in the following top-level example: + +``` +spring: + cloud: + config: + server: + git: + uri: https://git/common/config-repo.git + repos: + team-a: + pattern: team-a-* + cloneOnStart: true + uri: https://git/team-a/config-repo.git + team-b: + pattern: team-b-* + cloneOnStart: false + uri: https://git/team-b/config-repo.git + team-c: + pattern: team-c-* + uri: https://git/team-a/config-repo.git +``` + +In the preceding example, the server clones team-a’s config-repo on startup, before it +accepts any requests. +All other repositories are not cloned until configuration from the repository is requested. + +| |Setting a repository to be cloned when the Config Server starts up can help to identify a misconfigured configuration source (such as an invalid repository URI) quickly, while the Config Server is starting up.
With `cloneOnStart` not enabled for a configuration source, the Config Server may start successfully with a misconfigured or invalid configuration source and not detect an error until an application requests configuration from that configuration source.| +|---|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +##### [Authentication](#_authentication) + +To use HTTP basic authentication on the remote repository, add the `username` and `password` properties separately (not in the URL), as shown in the following example: + +``` +spring: + cloud: + config: + server: + git: + uri: https://github.com/spring-cloud-samples/config-repo + username: trolley + password: strongpassword +``` + +If you do not use HTTPS and user credentials, SSH should also work out of the box when you store keys in the default directories (`~/.ssh`) and the URI points to an SSH location, such as `[[email protected]](/cdn-cgi/l/email-protection):configuration/cloud-configuration`. +It is important that an entry for the Git server be present in the `~/.ssh/known_hosts` file and that it is in `ssh-rsa` format. +Other formats (such as `ecdsa-sha2-nistp256`) are not supported. +To avoid surprises, you should ensure that only one entry is present in the `known_hosts` file for the Git server and that it matches the URL you provided to the config server. +If you use a hostname in the URL, you want to have exactly that (not the IP) in the `known_hosts` file. +The repository is accessed by using JGit, so any documentation you find on that should be applicable. +HTTPS proxy settings can be set in `~/.git/config` or (in the same way as for any other JVM process) with +system properties (`-Dhttps.proxyHost` and `-Dhttps.proxyPort`). + +| |If you do not know where your `~/.git` directory is, use `git config --global` to manipulate the settings (for example, `git config --global http.sslVerify false`).| +|---|--------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +JGit requires RSA keys in PEM format. Below is an example ssh-keygen (from openssh) command that will generate a key in the corect format: + +``` +ssh-keygen -m PEM -t rsa -b 4096 -f ~/config_server_deploy_key.rsa +``` + +Warning: When working with SSH keys, the expected ssh private-key must begin with ``-----BEGIN RSA PRIVATE KEY-----``. If the key starts with ``-----BEGIN OPENSSH PRIVATE KEY-----`` then the RSA key will not load when spring-cloud-config server is started. The error looks like: + +``` +- Error in object 'spring.cloud.config.server.git': codes [PrivateKeyIsValid.spring.cloud.config.server.git,PrivateKeyIsValid]; arguments [org.springframework.context.support.DefaultMessageSourceResolvable: codes [spring.cloud.config.server.git.,]; arguments []; default message []]; default message [Property 'spring.cloud.config.server.git.privateKey' is not a valid private key] +``` + +To correct the above error the RSA key must be converted to PEM format. An example using openssh is provided above for generating a new key in the appropriate format. + +##### [Authentication with AWS CodeCommit](#_authentication_with_aws_codecommit) + +Spring Cloud Config Server also supports [AWS CodeCommit](https://docs.aws.amazon.com/codecommit/latest/userguide/welcome.html) authentication. +AWS CodeCommit uses an authentication helper when using Git from the command line. +This helper is not used with the JGit library, so a JGit CredentialProvider for AWS CodeCommit is created if the Git URI matches the AWS CodeCommit pattern. +AWS CodeCommit URIs follow this pattern: + +``` +https//git-codecommit.${AWS_REGION}.amazonaws.com/v1/repos/${repo}. +``` + +If you provide a username and password with an AWS CodeCommit URI, they must be the [AWS accessKeyId and secretAccessKey](https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSGettingStartedGuide/AWSCredentials.html) that provide access to the repository. +If you do not specify a username and password, the accessKeyId and secretAccessKey are retrieved by using the [AWS Default Credential Provider Chain](https://docs.aws.amazon.com/sdk-for-java/v1/developer-guide/credentials.html). + +If your Git URI matches the CodeCommit URI pattern (shown earlier), you must provide valid AWS credentials in the username and password or in one of the locations supported by the default credential provider chain. +AWS EC2 instances may use [IAM Roles for EC2 Instances](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html). + +| |The `aws-java-sdk-core` jar is an optional dependency.
If the `aws-java-sdk-core` jar is not on your classpath, the AWS Code Commit credential provider is not created, regardless of the git server URI.| +|---|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +##### [Authentication with Google Cloud Source](#_authentication_with_google_cloud_source) + +Spring Cloud Config Server also supports authenticating against [Google Cloud Source](https://cloud.google.com/source-repositories/) repositories. + +If your Git URI uses the `http` or `https` protocol and the domain name is `source.developers.google.com`, the Google Cloud Source credentials provider will be used. A Google Cloud Source repository URI has the format `[https://source.developers.google.com/p/${GCP_PROJECT}/r/${REPO}](https://source.developers.google.com/p/${GCP_PROJECT}/r/${REPO})`. To obtain the URI for your repository, click on "Clone" in the Google Cloud Source UI, and select "Manually generated credentials". Do not generate any credentials, simply copy the displayed URI. + +The Google Cloud Source credentials provider will use Google Cloud Platform application default credentials. See [Google Cloud SDK documentation](https://cloud.google.com/sdk/gcloud/reference/auth/application-default/login) on how to create application default credentials for a system. This approach will work for user accounts in dev environments and for service accounts in production environments. + +| |`com.google.auth:google-auth-library-oauth2-http` is an optional dependency.
If the `google-auth-library-oauth2-http` jar is not on your classpath, the Google Cloud Source credential provider is not created, regardless of the git server URI.| +|---|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +##### [Git SSH configuration using properties](#_git_ssh_configuration_using_properties) + +By default, the JGit library used by Spring Cloud Config Server uses SSH configuration files such as `~/.ssh/known_hosts` and `/etc/ssh/ssh_config` when connecting to Git repositories by using an SSH URI. +In cloud environments such as Cloud Foundry, the local filesystem may be ephemeral or not easily accessible. +For those cases, SSH configuration can be set by using Java properties. +In order to activate property-based SSH configuration, the `spring.cloud.config.server.git.ignoreLocalSshSettings` property must be set to `true`, as shown in the following example: + +``` + spring: + cloud: + config: + server: + git: + uri: [email protected]:team/repo1.git + ignoreLocalSshSettings: true + hostKey: someHostKey + hostKeyAlgorithm: ssh-rsa + privateKey: | + -----BEGIN RSA PRIVATE KEY----- + MIIEpgIBAAKCAQEAx4UbaDzY5xjW6hc9jwN0mX33XpTDVW9WqHp5AKaRbtAC3DqX + IXFMPgw3K45jxRb93f8tv9vL3rD9CUG1Gv4FM+o7ds7FRES5RTjv2RT/JVNJCoqF + ol8+ngLqRZCyBtQN7zYByWMRirPGoDUqdPYrj2yq+ObBBNhg5N+hOwKjjpzdj2Ud + 1l7R+wxIqmJo1IYyy16xS8WsjyQuyC0lL456qkd5BDZ0Ag8j2X9H9D5220Ln7s9i + oezTipXipS7p7Jekf3Ywx6abJwOmB0rX79dV4qiNcGgzATnG1PkXxqt76VhcGa0W + DDVHEEYGbSQ6hIGSh0I7BQun0aLRZojfE3gqHQIDAQABAoIBAQCZmGrk8BK6tXCd + fY6yTiKxFzwb38IQP0ojIUWNrq0+9Xt+NsypviLHkXfXXCKKU4zUHeIGVRq5MN9b + BO56/RrcQHHOoJdUWuOV2qMqJvPUtC0CpGkD+valhfD75MxoXU7s3FK7yjxy3rsG + EmfA6tHV8/4a5umo5TqSd2YTm5B19AhRqiuUVI1wTB41DjULUGiMYrnYrhzQlVvj + 5MjnKTlYu3V8PoYDfv1GmxPPh6vlpafXEeEYN8VB97e5x3DGHjZ5UrurAmTLTdO8 + +AahyoKsIY612TkkQthJlt7FJAwnCGMgY6podzzvzICLFmmTXYiZ/28I4BX/mOSe + pZVnfRixAoGBAO6Uiwt40/PKs53mCEWngslSCsh9oGAaLTf/XdvMns5VmuyyAyKG + ti8Ol5wqBMi4GIUzjbgUvSUt+IowIrG3f5tN85wpjQ1UGVcpTnl5Qo9xaS1PFScQ + xrtWZ9eNj2TsIAMp/svJsyGG3OibxfnuAIpSXNQiJPwRlW3irzpGgVx/AoGBANYW + dnhshUcEHMJi3aXwR12OTDnaLoanVGLwLnkqLSYUZA7ZegpKq90UAuBdcEfgdpyi + PhKpeaeIiAaNnFo8m9aoTKr+7I6/uMTlwrVnfrsVTZv3orxjwQV20YIBCVRKD1uX + VhE0ozPZxwwKSPAFocpyWpGHGreGF1AIYBE9UBtjAoGBAI8bfPgJpyFyMiGBjO6z + FwlJc/xlFqDusrcHL7abW5qq0L4v3R+FrJw3ZYufzLTVcKfdj6GelwJJO+8wBm+R + gTKYJItEhT48duLIfTDyIpHGVm9+I1MGhh5zKuCqIhxIYr9jHloBB7kRm0rPvYY4 + VAykcNgyDvtAVODP+4m6JvhjAoGBALbtTqErKN47V0+JJpapLnF0KxGrqeGIjIRV + cYA6V4WYGr7NeIfesecfOC356PyhgPfpcVyEztwlvwTKb3RzIT1TZN8fH4YBr6Ee + KTbTjefRFhVUjQqnucAvfGi29f+9oE3Ei9f7wA+H35ocF6JvTYUsHNMIO/3gZ38N + CPjyCMa9AoGBAMhsITNe3QcbsXAbdUR00dDsIFVROzyFJ2m40i4KCRM35bC/BIBs + q0TY3we+ERB40U8Z2BvU61QuwaunJ2+uGadHo58VSVdggqAo0BSkH58innKKt96J + 69pcVH/4rmLbXdcmNYGm6iu+MlPQk4BUZknHSmVHIFdJ0EPupVaQ8RHT + -----END RSA PRIVATE KEY----- +``` + +The following table describes the SSH configuration properties. + +| Property Name | Remarks | +|----------------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| **ignoreLocalSshSettings** |If `true`, use property-based instead of file-based SSH config. Must be set at as `spring.cloud.config.server.git.ignoreLocalSshSettings`, **not** inside a repository definition.| +| **privateKey** | Valid SSH private key. Must be set if `ignoreLocalSshSettings` is true and Git URI is SSH format. | +| **hostKey** | Valid SSH host key. Must be set if `hostKeyAlgorithm` is also set. | +| **hostKeyAlgorithm** | One of `ssh-dss, ssh-rsa, ecdsa-sha2-nistp256, ecdsa-sha2-nistp384, or ecdsa-sha2-nistp521`. Must be set if `hostKey` is also set. | +| **strictHostKeyChecking** | `true` or `false`. If false, ignore errors with host key. | +| **knownHostsFile** | Location of custom `.known_hosts` file. | +|**preferredAuthentications**| Override server authentication method order. This should allow for evading login prompts if server has keyboard-interactive authentication before the `publickey` method. | + +##### [Placeholders in Git Search Paths](#_placeholders_in_git_search_paths) + +Spring Cloud Config Server also supports a search path with placeholders for the `{application}` and `{profile}` (and `{label}` if +you need it), as shown in the following example: + +``` +spring: + cloud: + config: + server: + git: + uri: https://github.com/spring-cloud-samples/config-repo + search-paths: '{application}' +``` + +The preceding listing causes a search of the repository for files in the same name as the directory (as well as the top level). +Wildcards are also valid in a search path with placeholders (any matching directory is included in the search). + +##### [Force pull in Git Repositories](#_force_pull_in_git_repositories) + +As mentioned earlier, Spring Cloud Config Server makes a clone of the remote git repository in case the local copy gets dirty (for example, +folder content changes by an OS process) such that Spring Cloud Config Server cannot update the local copy from remote repository. + +To solve this issue, there is a `force-pull` property that makes Spring Cloud Config Server force pull from the remote repository if the local copy is dirty, as shown in the following example: + +``` +spring: + cloud: + config: + server: + git: + uri: https://github.com/spring-cloud-samples/config-repo + force-pull: true +``` + +If you have a multiple-repositories configuration, you can configure the `force-pull` property per repository, as shown in the following example: + +``` +spring: + cloud: + config: + server: + git: + uri: https://git/common/config-repo.git + force-pull: true + repos: + team-a: + pattern: team-a-* + uri: https://git/team-a/config-repo.git + force-pull: true + team-b: + pattern: team-b-* + uri: https://git/team-b/config-repo.git + force-pull: true + team-c: + pattern: team-c-* + uri: https://git/team-a/config-repo.git +``` + +| |The default value for `force-pull` property is `false`.| +|---|-------------------------------------------------------| + +##### [Deleting untracked branches in Git Repositories](#_deleting_untracked_branches_in_git_repositories) + +As Spring Cloud Config Server has a clone of the remote git repository +after check-outing branch to local repo (e.g fetching properties by label) it will keep this branch +forever or till the next server restart (which creates new local repo). +So there could be a case when remote branch is deleted but local copy of it is still available for fetching. +And if Spring Cloud Config Server client service starts with `--spring.cloud.config.label=deletedRemoteBranch,master`it will fetch properties from `deletedRemoteBranch` local branch, but not from `master`. + +In order to keep local repository branches clean and up to remote - `deleteUntrackedBranches` property could be set. +It will make Spring Cloud Config Server **force** delete untracked branches from local repository. +Example: + +``` +spring: + cloud: + config: + server: + git: + uri: https://github.com/spring-cloud-samples/config-repo + deleteUntrackedBranches: true +``` + +| |The default value for `deleteUntrackedBranches` property is `false`.| +|---|--------------------------------------------------------------------| + +##### [Git Refresh Rate](#_git_refresh_rate) + +You can control how often the config server will fetch updated configuration data +from your Git backend by using `spring.cloud.config.server.git.refreshRate`. The +value of this property is specified in seconds. By default the value is 0, meaning +the config server will fetch updated configuration from the Git repo every time it +is requested. + +##### [Default Label](#_default_label) + +The default label used for Git is `main`. If you do not set `spring.cloud.config.server.git.defaultLabel` and a branch named `main`does not exist, the config server will by default also try to checkout a branch named `master`. If +you would like to disable to the fallback branch behavior you can set`spring.cloud.config.server.git.tryMasterBranch` to `false`. + +#### [Version Control Backend Filesystem Use](#_version_control_backend_filesystem_use) + +| |With VCS-based backends (git, svn), files are checked out or cloned to the local filesystem.
By default, they are put in the system temporary directory with a prefix of `config-repo-`.
On linux, for example, it could be `/tmp/config-repo-`.
Some operating systems [routinely clean out](https://serverfault.com/questions/377348/when-does-tmp-get-cleared/377349#377349) temporary directories.
This can lead to unexpected behavior, such as missing properties.
To avoid this problem, change the directory that Config Server uses by setting `spring.cloud.config.server.git.basedir` or `spring.cloud.config.server.svn.basedir` to a directory that does not reside in the system temp structure.| +|---|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +#### [File System Backend](#_file_system_backend) + +There is also a “native” profile in the Config Server that does not use Git but loads the config files from the local classpath or file system (any static URL you want to point to with `spring.cloud.config.server.native.searchLocations`). +To use the native profile, launch the Config Server with `spring.profiles.active=native`. + +| |Remember to use the `file:` prefix for file resources (the default without a prefix is usually the classpath).
As with any Spring Boot configuration, you can embed `${}`-style environment placeholders, but remember that absolute paths in Windows require an extra `/` (for example, `[file:///${user.home}/config-repo](file:///${user.home}/config-repo)`).| +|---|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +| |The default value of the `searchLocations` is identical to a local Spring Boot application (that is, `[classpath:/, classpath:/config,
file:./, file:./config]`).
This does not expose the `application.properties` from the server to all clients, because any property sources present in the server are removed before being sent to the client.| +|---|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +| |A filesystem backend is great for getting started quickly and for testing.
To use it in production, you need to be sure that the file system is reliable and shared across all instances of the Config Server.| +|---|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +The search locations can contain placeholders for `{application}`, `{profile}`, and `{label}`. +In this way, you can segregate the directories in the path and choose a strategy that makes sense for you (such as subdirectory per application or subdirectory per profile). + +If you do not use placeholders in the search locations, this repository also appends the `{label}` parameter of the HTTP resource to a suffix on the search path, so properties files are loaded from each search location **and** a subdirectory with the same name as the label (the labelled properties take precedence in the Spring Environment). +Thus, the default behaviour with no placeholders is the same as adding a search location ending with `/{label}/`. +For example, `file:/tmp/config` is the same as `file:/tmp/config,file:/tmp/config/{label}`. +This behavior can be disabled by setting `spring.cloud.config.server.native.addLabelLocations=false`. + +#### [Vault Backend](#vault-backend) + +Spring Cloud Config Server also supports [Vault](https://www.vaultproject.io) as a backend. + +Vault is a tool for securely accessing secrets. +A secret is anything that to which you want to tightly control access, such as API keys, passwords, certificates, and other sensitive information. Vault provides a unified interface to any secret while providing tight access control and recording a detailed audit log. + +For more information on Vault, see the [Vault quick start guide](https://learn.hashicorp.com/vault/?track=getting-started#getting-started). + +To enable the config server to use a Vault backend, you can run your config server with the `vault` profile. +For example, in your config server’s `application.properties`, you can add `spring.profiles.active=vault`. + +By default, Spring Cloud Config Server uses Token based Authentication to fetch config from Vault. +Vault also supports additional authentication methods like AppRole, LDAP, JWT, CloudFoundry, Kubernetes Auth. +In order to use any authentication method other than TOKEN or the X-Config-Token header, we need to have Spring Vault Core on the classpath so that Config Server can delegate authentication to that library. Please add the below dependencies to your Config Server App. + +`Maven (pom.xml)` + +``` + + + org.springframework.vault + spring-vault-core + + +``` + +`Gradle (build.gradle)` + +``` +dependencies { + implementation "org.springframework.vault:spring-vault-core" +} +``` + +By default, the config server assumes that your Vault server runs at `[http://127.0.0.1:8200](http://127.0.0.1:8200)`. +It also assumes that the name of backend is `secret` and the key is `application`. +All of these defaults can be configured in your config server’s `application.properties`. +The following table describes configurable Vault properties: + +| Name |Default Value| +|-----------------|-------------| +| host | 127.0.0.1 | +| port | 8200 | +| scheme | http | +| backend | secret | +| defaultKey | application | +|profileSeparator | , | +| kvVersion | 1 | +|skipSslValidation| false | +| timeout | 5 | +| namespace | null | + +| |All of the properties in the preceding table must be prefixed with `spring.cloud.config.server.vault` or placed in the correct Vault section of a composite configuration.| +|---|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +All configurable properties can be found in `org.springframework.cloud.config.server.environment.VaultEnvironmentProperties`. + +| |Vault 0.10.0 introduced a versioned key-value backend (k/v backend version 2) that exposes a different API than earlier versions, it now requires a `data/` between the mount path and the actual context path and wraps secrets in a `data` object. Setting `spring.cloud.config.server.vault.kv-version=2` will take this into account.| +|---|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +Optionally, there is support for the Vault Enterprise `X-Vault-Namespace` header. To have it sent to Vault set the `namespace` property. + +With your config server running, you can make HTTP requests to the server to retrieve +values from the Vault backend. +To do so, you need a token for your Vault server. + +First, place some data in you Vault, as shown in the following example: + +``` +$ vault kv put secret/application foo=bar baz=bam +$ vault kv put secret/myapp foo=myappsbar +``` + +Second, make an HTTP request to your config server to retrieve the values, as shown in the following example: + +`$ curl -X "GET" "http://localhost:8888/myapp/default" -H "X-Config-Token: yourtoken"` + +You should see a response similar to the following: + +``` +{ + "name":"myapp", + "profiles":[ + "default" + ], + "label":null, + "version":null, + "state":null, + "propertySources":[ + { + "name":"vault:myapp", + "source":{ + "foo":"myappsbar" + } + }, + { + "name":"vault:application", + "source":{ + "baz":"bam", + "foo":"bar" + } + } + ] +} +``` + +The default way for a client to provide the necessary authentication to let Config Server talk to Vault is to set the X-Config-Token header. +However, you can instead omit the header and configure the authentication in the server, by setting the same configuration properties as Spring Cloud Vault. +The property to set is `spring.cloud.config.server.vault.authentication`. +It should be set to one of the supported authentication methods. +You may also need to set other properties specific to the authentication method you use, by using the same property names as documented for `spring.cloud.vault` but instead using the `spring.cloud.config.server.vault` prefix. +See the [Spring Cloud Vault Reference Guide](https://cloud.spring.io/spring-cloud-vault/reference/html/#vault.config.authentication) for more detail. + +| |If you omit the X-Config-Token header and use a server property to set the authentication, the Config Server application needs an additional dependency on Spring Vault to enable the additional authentication options.
See the [Spring Vault Reference Guide](https://docs.spring.io/spring-vault/docs/current/reference/html/#dependencies) for how to add that dependency.| +|---|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +##### [Multiple Properties Sources](#_multiple_properties_sources) + +When using Vault, you can provide your applications with multiple properties sources. +For example, assume you have written data to the following paths in Vault: + +``` +secret/myApp,dev +secret/myApp +secret/application,dev +secret/application +``` + +Properties written to `secret/application` are available to [all applications using the Config Server](#_vault_server). +An application with the name, `myApp`, would have any properties written to `secret/myApp` and `secret/application` available to it. +When `myApp` has the `dev` profile enabled, properties written to all of the above paths would be available to it, with properties in the first path in the list taking priority over the others. + +#### [Accessing Backends Through a Proxy](#_accessing_backends_through_a_proxy) + +The configuration server can access a Git or Vault backend through an HTTP or HTTPS proxy. This behavior is controlled for either Git or Vault by settings under `proxy.http` and `proxy.https`. These settings are per repository, so if you are using a [composite environment repository](#composite-environment-repositories) you must configure proxy settings for each backend in the composite individually. If using a network which requires separate proxy servers for HTTP and HTTPS URLs, you can configure both the HTTP and the HTTPS proxy settings for a single backend. + +The following table describes the proxy configuration properties for both HTTP and HTTPS proxies. All of these properties must be prefixed by `proxy.http` or `proxy.https`. + +| Property Name | Remarks | +|-----------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| **host** | The host of the proxy. | +| **port** | The port with which to access the proxy. | +|**nonProxyHosts**|Any hosts which the configuration server should access outside the proxy. If values are provided for both `proxy.http.nonProxyHosts` and `proxy.https.nonProxyHosts`, the `proxy.http` value will be used.| +| **username** | The username with which to authenticate to the proxy. If values are provided for both `proxy.http.username` and `proxy.https.username`, the `proxy.http` value will be used. | +| **password** | The password with which to authenticate to the proxy. If values are provided for both `proxy.http.password` and `proxy.https.password`, the `proxy.http` value will be used. | + +The following configuration uses an HTTPS proxy to access a Git repository. + +``` +spring: + profiles: + active: git + cloud: + config: + server: + git: + uri: https://github.com/spring-cloud-samples/config-repo + proxy: + https: + host: my-proxy.host.io + password: myproxypassword + port: '3128' + username: myproxyusername + nonProxyHosts: example.com +``` + +#### [Sharing Configuration With All Applications](#_sharing_configuration_with_all_applications) + +Sharing configuration between all applications varies according to which approach you take, as described in the following topics: + +* [File Based Repositories](#spring-cloud-config-server-file-based-repositories) + +* [Vault Server](#spring-cloud-config-server-vault-server) + +##### [File Based Repositories](#spring-cloud-config-server-file-based-repositories) + +With file-based (git, svn, and native) repositories, resources with file names in `application*` (`application.properties`, `application.yml`, `application-*.properties`, and so on) are shared between all client applications. +You can use resources with these file names to configure global defaults and have them be overridden by application-specific files as necessary. + +The [property overrides](#property-overrides) feature can also be used for setting global defaults, with placeholders applications +allowed to override them locally. + +| |With the “native” profile (a local file system backend) , you should use an explicit search location that is not part of the server’s own configuration.
Otherwise, the `application*` resources in the default search locations get removed because they are part of the server.| +|---|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +##### [Vault Server](#spring-cloud-config-server-vault-server) + +When using Vault as a backend, you can share configuration with all applications by placing configuration in `secret/application`. +For example, if you run the following Vault command, all applications using the config server will have the properties `foo` and `baz` available to them: + +``` +$ vault write secret/application foo=bar baz=bam +``` + +##### [CredHub Server](#_credhub_server) + +When using CredHub as a backend, you can share configuration with all applications by placing configuration in `/application/` or by placing it in the `default` profile for the application. +For example, if you run the following CredHub command, all applications using the config server will have the properties `shared.color1` and `shared.color2` available to them: + +``` +credhub set --name "/application/profile/master/shared" --type=json +value: {"shared.color1": "blue", "shared.color": "red"} +``` + +``` +credhub set --name "/my-app/default/master/more-shared" --type=json +value: {"shared.word1": "hello", "shared.word2": "world"} +``` + +#### [AWS Secrets Manager](#_aws_secrets_manager) + +When using AWS Secrets Manager as a backend, you can share configuration with all applications by placing configuration in `/application/` or by placing it in the `default` profile for the application. +For example, if you add secrets with the following keys, all application using the config server will have the properties `shared.foo` and `shared.bar` available to them: + +``` +secret name = /secret/application-default/ +``` + +``` +secret value = +{ + shared.foo: foo, + shared.bar: bar +} +``` + +or + +``` +secret name = /secret/application/ +``` + +``` +secret value = +{ + shared.foo: foo, + shared.bar: bar +} +``` + +##### [AWS Parameter Store](#_aws_parameter_store) + +When using AWS Parameter Store as a backend, you can share configuration with all applications by placing properties within the `/application` hierarchy. + +For example, if you add parameters with the following names, all applications using the config server will have the properties `foo.bar` and `fred.baz` available to them: + +``` +/config/application/foo.bar +/config/application-default/fred.baz +``` + +#### [JDBC Backend](#_jdbc_backend) + +Spring Cloud Config Server supports JDBC (relational database) as a backend for configuration properties. +You can enable this feature by adding `spring-jdbc` to the classpath and using the `jdbc` profile or by adding a bean of type `JdbcEnvironmentRepository`. +If you include the right dependencies on the classpath (see the user guide for more details on that), Spring Boot configures a data source. + +You can disable autoconfiguration for `JdbcEnvironmentRepository` by setting the `spring.cloud.config.server.jdbc.enabled` property to `false`. + +The database needs to have a table called `PROPERTIES` with columns called `APPLICATION`, `PROFILE`, and `LABEL` (with the usual `Environment` meaning), plus `KEY` and `VALUE` for the key and value pairs in `Properties` style. +All fields are of type String in Java, so you can make them `VARCHAR` of whatever length you need. +Property values behave in the same way as they would if they came from Spring Boot properties files named `{application}-{profile}.properties`, including all the encryption and decryption, which will be applied as post-processing steps (that is, not in the repository implementation directly). + +#### [Redis Backend](#_redis_backend) + +Spring Cloud Config Server supports Redis as a backend for configuration properties. +You can enable this feature by adding a dependency to [Spring Data Redis](https://spring.io/projects/spring-data-redis). + +pom.xml + +``` + + + org.springframework.boot + spring-boot-starter-data-redis + + +``` + +The following configuration uses Spring Data `RedisTemplate` to access a Redis. We can use `spring.redis.*` properties to override default connection settings. + +``` +spring: + profiles: + active: redis + redis: + host: redis + port: 16379 +``` + +The properties should be stored as fields in a hash. The name of hash should be the same as `spring.application.name` property or conjunction of `spring.application.name` and `spring.profiles.active[n]`. + +``` +HMSET sample-app server.port "8100" sample.topic.name "test" test.property1 "property1" +``` + +After running the command visible above a hash should contain the following keys with values: + +``` +HGETALL sample-app +{ + "server.port": "8100", + "sample.topic.name": "test", + "test.property1": "property1" +} +``` + +| |When no profile is specified `default` will be used.| +|---|----------------------------------------------------| + +#### [AWS S3 Backend](#_aws_s3_backend) + +Spring Cloud Config Server supports AWS S3 as a backend for configuration properties. +You can enable this feature by adding a dependency to the [AWS Java SDK For Amazon S3](https://docs.aws.amazon.com/sdk-for-java/v1/developer-guide/examples-s3.html). + +pom.xml + +``` + + + com.amazonaws + aws-java-sdk-s3 + + +``` + +The following configuration uses the AWS S3 client to access configuration files. We can use `spring.cloud.config.server.awss3.*` properties to select the bucket where your configuration is stored. + +``` +spring: + profiles: + active: awss3 + cloud: + config: + server: + awss3: + region: us-east-1 + bucket: bucket1 +``` + +It is also possible to specify an AWS URL to [override the standard endpoint](https://aws.amazon.com/blogs/developer/using-new-regions-and-endpoints/) of your S3 service with `spring.cloud.config.server.awss3.endpoint`. This allows support for beta regions of S3, and other S3 compatible storage APIs. + +Credentials are found using the [Default AWS Credential Provider Chain](https://docs.aws.amazon.com/sdk-for-java/v1/developer-guide/credentials.html). Versioned and encrypted buckets are supported without further configuration. + +Configuration files are stored in your bucket as `{application}-{profile}.properties`, `{application}-{profile}.yml` or `{application}-{profile}.json`. An optional label can be provided to specify a directory path to the file. + +| |When no profile is specified `default` will be used.| +|---|----------------------------------------------------| + +#### [AWS Parameter Store Backend](#_aws_parameter_store_backend) + +Spring Cloud Config Server supports AWS Parameter Store as a backend for configuration properties. You can enable this feature by adding a dependency to the [AWS Java SDK for SSM](https://github.com/aws/aws-sdk-java/tree/master/aws-java-sdk-ssm). + +pom.xml + +``` + + com.amazonaws + aws-java-sdk-ssm + +``` + +The following configuration uses the AWS SSM client to access parameters. + +``` +spring: + profiles: + active: awsparamstore + cloud: + config: + server: + awsparamstore: + region: eu-west-2 + endpoint: https://ssm.eu-west-2.amazonaws.com + origin: aws:parameter: + prefix: /config/service + profile-separator: _ + recursive: true + decrypt-values: true + max-results: 5 +``` + +The following table describes the AWS Parameter Store configuration properties. + +| Property Name |Required| Default Value | Remarks | +|---------------------|--------|--------------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| **region** | no | |The region to be used by the AWS Parameter Store client. If it’s not explicitly set, the SDK tries to determine the region to use by using the [Default Region Provider Chain](https://docs.aws.amazon.com/sdk-for-java/v1/developer-guide/java-dg-region-selection.html#default-region-provider-chain).| +| **endpoint** | no | | The URL of the entry point for the AWS SSM client. This can be used to specify an alternate endpoint for the API requests. | +| **origin** | no |`aws:ssm:parameter:`| The prefix that is added to the property source’s name to show their provenance. | +| **prefix** | no | `/config` | Prefix indicating L1 level in the parameter hierarchy for every property loaded from the AWS Parameter Store. | +|**profile-separator**| no | `-` | String that separates an appended profile from the context name. | +| **recursive** | no | `true` | Flag to indicate the retrieval of all AWS parameters within a hierarchy. | +| **decrypt-values** | no | `true` | Flag to indicate the retrieval of all AWS parameters with their value decrypted. | +| **max-results** | no | `10` | The maximum number of items to return for an AWS Parameter Store API call. | + +AWS Parameter Store API credentials are determined using the [Default Credential Provider Chain](https://docs.aws.amazon.com/sdk-for-java/v1/developer-guide/credentials.html#credentials-default). +Versioned parameters are already supported with the default behaviour of returning the latest version. + +| |* When no application is specified `application` is the default, and when no profile is specified `default` is used.

* Valid values for `awsparamstore.prefix` must start with a forward slash followed by one or more valid path segments or be empty.

* Valid values for `awsparamstore.profile-separator` can only contain dots, dashes and underscores.

* Valid values for `awsparamstore.max-results` must be within the **[1, 10]** range.| +|---|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +#### [AWS Secrets Manager Backend](#_aws_secrets_manager_backend) + +Spring Cloud Config Server supports [AWS Secrets Manager](https://aws.amazon.com/secrets-manager/) as a backend for configuration properties. +You can enable this feature by adding a dependency to [AWS Java SDK for Secrets Manager](https://github.com/aws/aws-sdk-java/tree/master/aws-java-sdk-secretsmanager). + +pom.xml + +``` + + com.amazonaws + aws-java-sdk-secretsmanager + +``` + +The following configuration uses the AWS Secrets Manager client to access secrets. + +``` +spring: + profiles: + active: awssecretsmanager + cloud: + config: + server: + aws-secretsmanager: + region: us-east-1 + endpoint: https://us-east-1.console.aws.amazon.com/ + origin: aws:secrets: + prefix: /secret/foo + profileSeparator: _ +``` + +AWS Secrets Manager API credentials are determined using [Default Credential Provider Chain](https://docs.aws.amazon.com/sdk-for-java/v1/developer-guide/credentials.html#credentials-default). + +| |* When no application is specified `application` is the default, and when no profile is specified `default` is used.| +|---|--------------------------------------------------------------------------------------------------------------------| + +#### [CredHub Backend](#_credhub_backend) + +Spring Cloud Config Server supports [CredHub](https://docs.cloudfoundry.org/credhub) as a backend for configuration properties. +You can enable this feature by adding a dependency to [Spring CredHub](https://spring.io/projects/spring-credhub). + +pom.xml + +``` + + + org.springframework.credhub + spring-credhub-starter + + +``` + +The following configuration uses mutual TLS to access a CredHub: + +``` +spring: + profiles: + active: credhub + cloud: + config: + server: + credhub: + url: https://credhub:8844 +``` + +The properties should be stored as JSON, such as: + +``` +credhub set --name "/demo-app/default/master/toggles" --type=json +value: {"toggle.button": "blue", "toggle.link": "red"} +``` + +``` +credhub set --name "/demo-app/default/master/abs" --type=json +value: {"marketing.enabled": true, "external.enabled": false} +``` + +All client applications with the name `spring.cloud.config.name=demo-app` will have the following properties available to them: + +``` +{ + toggle.button: "blue", + toggle.link: "red", + marketing.enabled: true, + external.enabled: false +} +``` + +| |When no profile is specified `default` will be used and when no label is specified `master` will be used as a default value.
NOTE: Values added to `application` will be shared by all the applications.| +|---|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +##### [OAuth 2.0](#_oauth_2_0) + +You can authenticate with [OAuth 2.0](https://oauth.net/2/) using [UAA](https://docs.cloudfoundry.org/concepts/architecture/uaa.html) as a provider. + +pom.xml + +``` + + + org.springframework.security + spring-security-config + + + org.springframework.security + spring-security-oauth2-client + + +``` + +The following configuration uses OAuth 2.0 and UAA to access a CredHub: + +``` +spring: + profiles: + active: credhub + cloud: + config: + server: + credhub: + url: https://credhub:8844 + oauth2: + registration-id: credhub-client + security: + oauth2: + client: + registration: + credhub-client: + provider: uaa + client-id: credhub_config_server + client-secret: asecret + authorization-grant-type: client_credentials + provider: + uaa: + token-uri: https://uaa:8443/oauth/token +``` + +| |The used UAA client-id should have `credhub.read` as scope.| +|---|-----------------------------------------------------------| + +#### [Composite Environment Repositories](#composite-environment-repositories) + +In some scenarios, you may wish to pull configuration data from multiple environment repositories. +To do so, you can enable the `composite` profile in your configuration server’s application properties or YAML file. +If, for example, you want to pull configuration data from a Subversion repository as well as two Git repositories, you can set the following properties for your configuration server: + +``` +spring: + profiles: + active: composite + cloud: + config: + server: + composite: + - + type: svn + uri: file:///path/to/svn/repo + - + type: git + uri: file:///path/to/rex/git/repo + - + type: git + uri: file:///path/to/walter/git/repo +``` + +Using this configuration, precedence is determined by the order in which repositories are listed under the `composite` key. +In the above example, the Subversion repository is listed first, so a value found in the Subversion repository will override values found for the same property in one of the Git repositories. +A value found in the `rex` Git repository will be used before a value found for the same property in the `walter` Git repository. + +If you want to pull configuration data only from repositories that are each of distinct types, you can enable the corresponding profiles, rather than the `composite` profile, in your configuration server’s application properties or YAML file. +If, for example, you want to pull configuration data from a single Git repository and a single HashiCorp Vault server, you can set the following properties for your configuration server: + +``` +spring: + profiles: + active: git, vault + cloud: + config: + server: + git: + uri: file:///path/to/git/repo + order: 2 + vault: + host: 127.0.0.1 + port: 8200 + order: 1 +``` + +Using this configuration, precedence can be determined by an `order` property. +You can use the `order` property to specify the priority order for all your repositories. +The lower the numerical value of the `order` property, the higher priority it has. +The priority order of a repository helps resolve any potential conflicts between repositories that contain values for the same properties. + +| |If your composite environment includes a Vault server as in the previous example, you must include a Vault token in every request made to the configuration server. See [Vault Backend](#vault-backend).| +|---|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +| |Any type of failure when retrieving values from an environment repository results in a failure for the entire composite environment.
If you would like the composite to continue even when a repository fails you can set `spring.cloud.config.server.failOnCompositeError` to `false`.| +|---|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +| |When using a composite environment, it is important that all repositories contain the same labels.
If you have an environment similar to those in the preceding examples and you request configuration data with the `master` label but the Subversion repository does not contain a branch called `master`, the entire request fails.| +|---|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +##### [Custom Composite Environment Repositories](#_custom_composite_environment_repositories) + +In addition to using one of the environment repositories from Spring Cloud, you can also provide your own `EnvironmentRepository` bean to be included as part of a composite environment. +To do so, your bean must implement the `EnvironmentRepository` interface. +If you want to control the priority of your custom `EnvironmentRepository` within the composite environment, you should also implement the `Ordered` interface and override the `getOrdered` method. +If you do not implement the `Ordered` interface, your `EnvironmentRepository` is given the lowest priority. + +#### [Property Overrides](#property-overrides) + +The Config Server has an “overrides” feature that lets the operator provide configuration properties to all applications. +The overridden properties cannot be accidentally changed by the application with the normal Spring Boot hooks. +To declare overrides, add a map of name-value pairs to `spring.cloud.config.server.overrides`, as shown in the following example: + +``` +spring: + cloud: + config: + server: + overrides: + foo: bar +``` + +The preceding examples causes all applications that are config clients to read `foo=bar`, independent of their own configuration. + +| |A configuration system cannot force an application to use configuration data in any particular way.
Consequently, overrides are not enforceable.
However, they do provide useful default behavior for Spring Cloud Config clients.| +|---|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +| |Normally, Spring environment placeholders with `${}` can be escaped (and resolved on the client) by using backslash (`\`) to escape the `$` or the `{`.
For example, `\${app.foo:bar}` resolves to `bar`, unless the app provides its own `app.foo`.| +|---|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +| |In YAML, you do not need to escape the backslash itself.
However, in properties files, you do need to escape the backslash, when you configure the overrides on the server.| +|---|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +You can change the priority of all overrides in the client to be more like default values, letting applications supply their own values in environment variables or System properties, by setting the `spring.cloud.config.overrideNone=true` flag (the default is false) in the remote repository. + +### [Health Indicator](#_health_indicator) + +Config Server comes with a Health Indicator that checks whether the configured `EnvironmentRepository` is working. +By default, it asks the `EnvironmentRepository` for an application named `app`, the `default` profile, and the default label provided by the `EnvironmentRepository` implementation. + +You can configure the Health Indicator to check more applications along with custom profiles and custom labels, as shown in the following example: + +``` +spring: + cloud: + config: + server: + health: + repositories: + myservice: + label: mylabel + myservice-dev: + name: myservice + profiles: development +``` + +You can disable the Health Indicator by setting `management.health.config.enabled=false`. + +### [Security](#_security) + +You can secure your Config Server in any way that makes sense to you (from physical network security to OAuth2 bearer tokens), because Spring Security and Spring Boot offer support for many security arrangements. + +To use the default Spring Boot-configured HTTP Basic security, include Spring Security on the classpath (for example, through `spring-boot-starter-security`). +The default is a username of `user` and a randomly generated password. A random password is not useful in practice, so we recommend you configure the password (by setting `spring.security.user.password`) and encrypt it (see below for instructions on how to do that). + +### [Actuator and Security](#_actuator_and_security) + +| |Some platforms configure health checks or something similar and point to `/actuator/health` or other actuator endpoints. If actuator is not a dependency of config server, requests to `/actuator/` **would match the config server API `/{application}/{label}` possibly leaking secure information. Remember to add the `spring-boot-starter-actuator` dependency in this case and configure the users such that the user that makes calls to `/actuator/`** does not have access to the config server API at `/{application}/{label}`.| +|---|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +### [Encryption and Decryption](#_encryption_and_decryption) + +| |To use the encryption and decryption features you need the full-strength JCE installed in your JVM (it is not included by default).
You can download the “Java Cryptography Extension (JCE) Unlimited Strength Jurisdiction Policy Files” from Oracle and follow the installation instructions (essentially, you need to replace the two policy files in the JRE lib/security directory with the ones that you downloaded).| +|---|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +If the remote property sources contain encrypted content (values starting with `{cipher}`), they are decrypted before sending to clients over HTTP. +The main advantage of this setup is that the property values need not be in plain text when they are “at rest” (for example, in a git repository). +If a value cannot be decrypted, it is removed from the property source and an additional property is added with the same key but prefixed with `invalid` and a value that means “not applicable” (usually ``). +This is largely to prevent cipher text being used as a password and accidentally leaking. + +If you set up a remote config repository for config client applications, it might contain an `application.yml` similar to the following: + +application.yml + +``` +spring: + datasource: + username: dbuser + password: '{cipher}FKSAJDFGYOS8F7GLHAKERGFHLSAJ' +``` + +Encrypted values in `application.properties` file must not be wrapped in quotes. Otherwise, the value is not decrypted. The following example shows values that would work: + +application.properties + +``` +spring.datasource.username: dbuser +spring.datasource.password: {cipher}FKSAJDFGYOS8F7GLHAKERGFHLSAJ +``` + +You can safely push this plain text to a shared git repository, and the secret password remains protected. + +The server also exposes `/encrypt` and `/decrypt` endpoints (on the assumption that these are secured and only accessed by authorized agents). +If you edit a remote config file, you can use the Config Server to encrypt values by POSTing to the `/encrypt` endpoint, as shown in the following example: + +``` +$ curl localhost:8888/encrypt -s -d mysecret +682bc583f4641835fa2db009355293665d2647dade3375c0ee201de2a49f7bda +``` + +| |If you are testing with curl, then use `--data-urlencode` (instead of `-d`) and prefix the value to encrypt with `=` (curl requires this) or set an explicit `Content-Type: text/plain` to make sure curl encodes the data correctly when there are special characters ('+' is particularly tricky).| +|---|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +| |Be sure not to include any of the curl command statistics in the encrypted value, this is why the examples use the `-s` option to silence them. Outputting the value to a file can help avoid this problem.| +|---|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +The inverse operation is also available through `/decrypt` (provided the server is +configured with a symmetric key or a full key pair), as shown in the following example: + +``` +$ curl localhost:8888/decrypt -s -d 682bc583f4641835fa2db009355293665d2647dade3375c0ee201de2a49f7bda +mysecret +``` + +Take the encrypted value and add the `{cipher}` prefix before you put it in the YAML or properties file and before you commit and push it to a remote (potentially insecure) store. + +The `/encrypt` and `/decrypt` endpoints also both accept paths in the form of `/*/{application}/{profiles}`, which can be used to control cryptography on a per-application (name) and per-profile basis when clients call into the main environment resource. + +| |To control the cryptography in this granular way, you must also provide a `@Bean` of type `TextEncryptorLocator` that creates a different encryptor per name and profiles.
The one that is provided by default does not do so (all encryptions use the same key).| +|---|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +The `spring` command line client (with Spring Cloud CLI extensions +installed) can also be used to encrypt and decrypt, as shown in the following example: + +``` +$ spring encrypt mysecret --key foo +682bc583f4641835fa2db009355293665d2647dade3375c0ee201de2a49f7bda +$ spring decrypt --key foo 682bc583f4641835fa2db009355293665d2647dade3375c0ee201de2a49f7bda +mysecret +``` + +To use a key in a file (such as an RSA public key for encryption), prepend +the key value with "@" and provide the file path, as shown in the following example: + +``` +$ spring encrypt mysecret --key @${HOME}/.ssh/id_rsa.pub +AQAjPgt3eFZQXwt8tsHAVv/QHiY5sI2dRcR+... +``` + +| |The `--key` argument is mandatory (despite having a `--` prefix).| +|---|-----------------------------------------------------------------| + +### [Key Management](#_key_management) + +The Config Server can use a symmetric (shared) key or an asymmetric one (RSA key pair). +The asymmetric choice is superior in terms of security, but it is often more convenient to use a symmetric key since it is a single property value to configure in the `bootstrap.properties`. + +To configure a symmetric key, you need to set `encrypt.key` to a secret String (or use the `ENCRYPT_KEY` environment variable to keep it out of plain-text configuration files). + +| |You cannot configure an asymmetric key using `encrypt.key`.| +|---|-----------------------------------------------------------| + +To configure an asymmetric key use a keystore (e.g. as +created by the `keytool` utility that comes with the JDK). The +keystore properties are `encrypt.keyStore.*` with `*` equal to + +| Property | Description | +|---------------------------|--------------------------------------------------| +|`encrypt.keyStore.location`| Contains a `Resource` location | +|`encrypt.keyStore.password`| Holds the password that unlocks the keystore | +| `encrypt.keyStore.alias` | Identifies which key in the store to use | +| `encrypt.keyStore.type` |The type of KeyStore to create. Defaults to `jks`.| + +The encryption is done with the public key, and a private key is +needed for decryption. +Thus, in principle, you can configure only the public key in the server if you want to only encrypt (and are prepared to decrypt the values yourself locally with the private key). +In practice, you might not want to do decrypt locally, because it spreads the key management process around all the clients, instead of +concentrating it in the server. +On the other hand, it can be a useful option if your config server is relatively insecure and only a handful of clients need the encrypted properties. + +### [Creating a Key Store for Testing](#_creating_a_key_store_for_testing) + +To create a keystore for testing, you can use a command resembling the following: + +``` +$ keytool -genkeypair -alias mytestkey -keyalg RSA \ + -dname "CN=Web Server,OU=Unit,O=Organization,L=City,S=State,C=US" \ + -keypass changeme -keystore server.jks -storepass letmein +``` + +| |When using JDK 11 or above you may get the following warning when using the command above. In this case
you probably want to make sure the `keypass` and `storepass` values match.| +|---|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +``` +Warning: Different store and key passwords not supported for PKCS12 KeyStores. Ignoring user-specified -keypass value. +``` + +Put the `server.jks` file in the classpath (for instance) and then, in +your `bootstrap.yml`, for the Config Server, create the following settings: + +``` +encrypt: + keyStore: + location: classpath:/server.jks + password: letmein + alias: mytestkey + secret: changeme +``` + +### [Using Multiple Keys and Key Rotation](#_using_multiple_keys_and_key_rotation) + +In addition to the `{cipher}` prefix in encrypted property values, the Config Server looks for zero or more `{name:value}` prefixes before the start of the (Base64 encoded) cipher text. +The keys are passed to a `TextEncryptorLocator`, which can do whatever logic it needs to locate a `TextEncryptor` for the cipher. +If you have configured a keystore (`encrypt.keystore.location`), the default locator looks for keys with aliases supplied by the `key` prefix, with a cipher text like resembling the following: + +``` +foo: + bar: `{cipher}{key:testkey}...` +``` + +The locator looks for a key named "testkey". +A secret can also be supplied by using a `{secret:…​}` value in the prefix. +However, if it is not supplied, the default is to use the keystore password (which is what you get when you build a keystore and do not specify a secret). +If you do supply a secret, you should also encrypt the secret using a custom `SecretLocator`. + +When the keys are being used only to encrypt a few bytes of configuration data (that is, they are not being used elsewhere), key rotation is hardly ever necessary on cryptographic grounds. +However, you might occasionally need to change the keys (for example, in the event of a security breach). +In that case, all the clients would need to change their source config files (for example, in git) and use a new `{key:…​}` prefix in all the ciphers. +Note that the clients need to first check that the key alias is available in the Config Server keystore. + +| |If you want to let the Config Server handle all encryption as well as decryption, the `{name:value}` prefixes can also be added as plain text posted to the `/encrypt` endpoint, .| +|---|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +### [Serving Encrypted Properties](#_serving_encrypted_properties) + +Sometimes you want the clients to decrypt the configuration locally, instead of doing it in the server. +In that case, if you provide the `encrypt.*` configuration to locate a key, you can still have `/encrypt` and `/decrypt` endpoints, but you need to explicitly switch off the decryption of outgoing properties by placing `spring.cloud.config.server.encrypt.enabled=false` in `bootstrap.[yml|properties]`. +If you do not care about the endpoints, it should work if you do not configure either the key or the enabled flag. + +## [Serving Alternative Formats](#_serving_alternative_formats) + +The default JSON format from the environment endpoints is perfect for consumption by Spring applications, because it maps directly onto the `Environment` abstraction. +If you prefer, you can consume the same data as YAML or Java properties by adding a suffix (".yml", ".yaml" or ".properties") to the resource path. +This can be useful for consumption by applications that do not care about the structure of the JSON endpoints or the extra metadata they provide (for example, an application that is not using Spring might benefit from the simplicity of this approach). + +The YAML and properties representations have an additional flag (provided as a boolean query parameter called `resolvePlaceholders`) to signal that placeholders in the source documents (in the standard Spring `${…​}` form) should be resolved in the output before rendering, where possible. +This is a useful feature for consumers that do not know about the Spring placeholder conventions. + +| |There are limitations in using the YAML or properties formats, mainly in relation to the loss of metadata.
For example, the JSON is structured as an ordered list of property sources, with names that correlate with the source.
The YAML and properties forms are coalesced into a single map, even if the origin of the values has multiple sources, and the names of the original source files are lost.
Also, the YAML representation is not necessarily a faithful representation of the YAML source in a backing repository either. It is constructed from a list of flat property sources, and assumptions have to be made about the form of the keys.| +|---|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +## [Serving Plain Text](#_serving_plain_text) + +Instead of using the `Environment` abstraction (or one of the alternative representations of it in YAML or properties format), your applications might need generic plain-text configuration files that are tailored to their environment. +The Config Server provides these through an additional endpoint at `/{application}/{profile}/{label}/{path}`, where `application`, `profile`, and `label` have the same meaning as the regular environment endpoint, but `path` is a path to a file name (such as `log.xml`). +The source files for this endpoint are located in the same way as for the environment endpoints. +The same search path is used for properties and YAML files. +However, instead of aggregating all matching resources, only the first one to match is returned. + +After a resource is located, placeholders in the normal format (`${…​}`) are resolved by using the effective `Environment` for the supplied application name, profile, and label. +In this way, the resource endpoint is tightly integrated with the environment endpoints. + +| |As with the source files for environment configuration, the `profile` is used to resolve the file name.
So, if you want a profile-specific file, `/*/development/*/logback.xml` can be resolved by a file called `logback-development.xml` (in preference to `logback.xml`).| +|---|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +| |If you do not want to supply the `label` and let the server use the default label, you can supply a `useDefaultLabel` request parameter.
Consequently, the preceding example for the `default` profile could be `/sample/default/nginx.conf?useDefaultLabel`.| +|---|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +At present, Spring Cloud Config can serve plaintext for git, SVN, native backends, and AWS S3. +The support for git, SVN, and native backends is identical. AWS S3 works a bit differently. +The following sections show how each one works: + +* [Git, SVN, and Native Backends](#spring-cloud-config-serving-plain-text-git-svn-native-backends) + +* [AWS S3](#spring-cloud-config-serving-plain-text-aws-s3) + +### [Git, SVN, and Native Backends](#spring-cloud-config-serving-plain-text-git-svn-native-backends) + +Consider the following example for a GIT or SVN repository or a native backend: + +``` +application.yml +nginx.conf +``` + +The `nginx.conf` might resemble the following listing: + +``` +server { + listen 80; + server_name ${nginx.server.name}; +} +``` + +`application.yml` might resemble the following listing: + +``` +nginx: + server: + name: example.com +--- +spring: + profiles: development +nginx: + server: + name: develop.com +``` + +The `/sample/default/master/nginx.conf` resource might be as follows: + +``` +server { + listen 80; + server_name example.com; +} +``` + +`/sample/development/master/nginx.conf` might be as follows: + +``` +server { + listen 80; + server_name develop.com; +} +``` + +### [AWS S3](#spring-cloud-config-serving-plain-text-aws-s3) + +To enable serving plain text for AWS s3, the Config Server application needs to include a dependency on Spring Cloud AWS. +For details on how to set up that dependency, see the[Spring Cloud AWS Reference Guide](https://cloud.spring.io/spring-cloud-static/spring-cloud-aws/2.1.3.RELEASE/single/spring-cloud-aws.html#_spring_cloud_aws_maven_dependency_management). +Then you need to configure Spring Cloud AWS, as described in the[Spring Cloud AWS Reference Guide](https://cloud.spring.io/spring-cloud-static/spring-cloud-aws/2.1.3.RELEASE/single/spring-cloud-aws.html#_configuring_credentials). + +### [Decrypting Plain Text](#_decrypting_plain_text) + +By default, encrypted values in plain text files are not decrypted. In order to enable decryption for plain text files, set `spring.cloud.config.server.encrypt.enabled=true` and `spring.cloud.config.server.encrypt.plainTextEncrypt=true` in `bootstrap.[yml|properties]` + +| |Decrypting plain text files is only supported for YAML, JSON, and properties file extensions.| +|---|---------------------------------------------------------------------------------------------| + +If this feature is enabled, and an unsupported file extention is requested, any encrypted values in the file will not be decrypted. + +## [Embedding the Config Server](#_embedding_the_config_server) + +The Config Server runs best as a standalone application. +However, if need be, you can embed it in another application. +To do so, use the `@EnableConfigServer` annotation. +An optional property named `spring.cloud.config.server.bootstrap` can be useful in this case. +It is a flag to indicate whether the server should configure itself from its own remote repository. +By default, the flag is off, because it can delay startup. +However, when embedded in another application, it makes sense to initialize the same way as any other application. +When setting `spring.cloud.config.server.bootstrap` to `true` you must also use a [composite environment repository configuration](#composite-environment-repositories). +For example + +``` +spring: + application: + name: configserver + profiles: + active: composite + cloud: + config: + server: + composite: + - type: native + search-locations: ${HOME}/Desktop/config + bootstrap: true +``` + +| |If you use the bootstrap flag, the config server needs to have its name and repository URI configured in `bootstrap.yml`.| +|---|-------------------------------------------------------------------------------------------------------------------------| + +To change the location of the server endpoints, you can (optionally) set `spring.cloud.config.server.prefix` (for example, `/config`), to serve the resources under a prefix. +The prefix should start but not end with a `/`. +It is applied to the `@RequestMappings` in the Config Server (that is, underneath the Spring Boot `server.servletPath` and `server.contextPath` prefixes). + +If you want to read the configuration for an application directly from the backend repository (instead of from the config server), you +basically want an embedded config server with no endpoints. +You can switch off the endpoints entirely by not using the `@EnableConfigServer` annotation (set `spring.cloud.config.server.bootstrap=true`). + +## [Push Notifications and Spring Cloud Bus](#_push_notifications_and_spring_cloud_bus) + +Many source code repository providers (such as Github, Gitlab, Gitea, Gitee, Gogs, or Bitbucket) notify you of changes in a repository through a webhook. +You can configure the webhook through the provider’s user interface as a URL and a set of events in which you are interested. +For instance, [Github](https://developer.github.com/v3/activity/events/types/#pushevent) uses a POST to the webhook with a JSON body containing a list of commits and a header (`X-Github-Event`) set to `push`. +If you add a dependency on the `spring-cloud-config-monitor` library and activate the Spring Cloud Bus in your Config Server, then a `/monitor` endpoint is enabled. + +When the webhook is activated, the Config Server sends a `RefreshRemoteApplicationEvent` targeted at the applications it thinks might have changed. +The change detection can be strategized. +However, by default, it looks for changes in files that match the application name (for example, `foo.properties` is targeted at the `foo` application, while `application.properties` is targeted at all applications). +The strategy to use when you want to override the behavior is `PropertyPathNotificationExtractor`, which accepts the request headers and body as parameters and returns a list of file paths that changed. + +The default configuration works out of the box with Github, Gitlab, Gitea, Gitee, Gogs or Bitbucket. +In addition to the JSON notifications from Github, Gitlab, Gitee, or Bitbucket, you can trigger a change notification by POSTing to `/monitor` with form-encoded body parameters in the pattern of `path={application}`. +Doing so broadcasts to applications matching the `{application}` pattern (which can contain wildcards). + +| |The `RefreshRemoteApplicationEvent` is transmitted only if the `spring-cloud-bus` is activated in both the Config Server and in the client application.| +|---|-------------------------------------------------------------------------------------------------------------------------------------------------------| + +| |The default configuration also detects filesystem changes in local git repositories. In that case, the webhook is not used. However, as soon as you edit a config file, a refresh is broadcast.| +|---|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +## [Spring Cloud Config Client](#_spring_cloud_config_client) + +A Spring Boot application can take immediate advantage of the Spring Config Server (or other external property sources provided by the application developer). +It also picks up some additional useful features related to `Environment` change events. + +### [Spring Boot Config Data Import](#config-data-import) + +Spring Boot 2.4 introduced a new way to import configuration data via the `spring.config.import` property. This is now the default way to bind to Config Server. + +To optionally connect to config server set the following in application.properties: + +application.properties + +``` +spring.config.import=optional:configserver: +``` + +This will connect to the Config Server at the default location of "http://localhost:8888". Removing the `optional:` prefix will cause the Config Client to fail if it is unable to connect to Config Server. To change the location of Config Server either set `spring.cloud.config.uri` or add the url to the `spring.config.import` statement such as, `spring.config.import=optional:configserver:http://myhost:8888`. The location in the import property has precedence over the uri property. + +| |A `bootstrap` file (properties or yaml) is **not** needed for the Spring Boot Config Data method of import via `spring.config.import`.| +|---|--------------------------------------------------------------------------------------------------------------------------------------| + +### [Config First Bootstrap](#config-first-bootstrap) + +To use the legacy bootstrap way of connecting to Config Server, bootstrap must be enabled via a property or the `spring-cloud-starter-bootstrap` starter. The property is `spring.cloud.bootstrap.enabled=true`. It must be set as a System Property or environment variable. +Once bootstrap has been enabled any application with Spring Cloud Config Client on the classpath will connect to Config Server as follows: +When a config client starts, it binds to the Config Server (through the `spring.cloud.config.uri` bootstrap configuration property) and initializes Spring `Environment` with remote property sources. + +The net result of this behavior is that all client applications that want to consume the Config Server need a `bootstrap.yml` (or an environment variable) with the server address set in `spring.cloud.config.uri` (it defaults to "http://localhost:8888"). + +#### [Discovery First Lookup](#discovery-first-bootstrap) + +| |Unless you are using [config first bootstrap](#config-first-bootstrap), you will need to have a `spring.config.import` property in your configuration properties with an `optional:` prefix.
For example, `spring.config.import=optional:configserver:`.| +|---|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +If you use a `DiscoveryClient` implementation, such as Spring Cloud Netflix and Eureka Service Discovery or Spring Cloud Consul, you can have the Config Server register with the Discovery Service. + +If you prefer to use `DiscoveryClient` to locate the Config Server, you can do so by setting `spring.cloud.config.discovery.enabled=true` (the default is `false`). +For example, with Spring Cloud Netflix, you need to define the Eureka server address (for example, in `eureka.client.serviceUrl.defaultZone`). +The price for using this option is an extra network round trip on startup, to locate the service registration. +The benefit is that, as long as the Discovery Service is a fixed point, the Config Server can change its coordinates. +The default service ID is `configserver`, but you can change that on the client by setting `spring.cloud.config.discovery.serviceId` (and on the server, in the usual way for a service, such as by setting `spring.application.name`). + +The discovery client implementations all support some kind of metadata map (for example, we have `eureka.instance.metadataMap` for Eureka). +Some additional properties of the Config Server may need to be configured in its service registration metadata so that clients can connect correctly. +If the Config Server is secured with HTTP Basic, you can configure the credentials as `user` and `password`. +Also, if the Config Server has a context path, you can set `configPath`. +For example, the following YAML file is for a Config Server that is a Eureka client: + +``` +eureka: + instance: + ... + metadataMap: + user: osufhalskjrtl + password: lviuhlszvaorhvlo5847 + configPath: /config +``` + +#### [Discovery First Bootstrap Using Eureka And WebClient](#_discovery_first_bootstrap_using_eureka_and_webclient) + +If you use the Eureka `DiscoveryClient` from Spring Cloud Netflix and also want to use `WebClient` instead of Jersey or `RestTemplate`, +you need to include `WebClient` on your classpath as well as set `eureka.client.webclient.enabled=true`. + +### [Config Client Fail Fast](#config-client-fail-fast) + +In some cases, you may want to fail startup of a service if it cannot connect to the Config Server. +If this is the desired behavior, set the bootstrap configuration property `spring.cloud.config.fail-fast=true` to make the client halt with an Exception. + +| |To get similar functionality using `spring.config.import`, simply omit the `optional:` prefix.| +|---|----------------------------------------------------------------------------------------------| + +### [Config Client Retry](#config-client-retry) + +If you expect that the config server may occasionally be unavailable when your application starts, you can make it keep trying after a failure. +First, you need to set `spring.cloud.config.fail-fast=true`. +Then you need to add `spring-retry` and `spring-boot-starter-aop` to your classpath. +The default behavior is to retry six times with an initial backoff interval of 1000ms and an exponential multiplier of 1.1 for subsequent backoffs. +You can configure these properties (and others) by setting the `spring.cloud.config.retry.*` configuration properties. + +| |To take full control of the retry behavior and are using legacy bootstrap, add a `@Bean` of type `RetryOperationsInterceptor` with an ID of `configServerRetryInterceptor`.
Spring Retry has a `RetryInterceptorBuilder` that supports creating one.| +|---|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +### [Config Client Retry with spring.config.import](#_config_client_retry_with_spring_config_import) + +Retry works with the Spring Boot `spring.config.import` statement and the normal properties work. However, if the import statement is in a profile, such as `application-prod.properties`, then you need a different way to configure retry. Configuration needs to be placed as url parameters on the import statement. + +application-prod.properties + +``` +spring.config.import=configserver:http://configserver.example.com?fail-fast=true&max-attempts=10&max-interval=1500&multiplier=1.2&initial-interval=1100" +``` + +This sets `spring.cloud.config.fail-fast=true` (notice the missing prefix above) and all the available `spring.cloud.config.retry.*` configuration properties. + +### [Locating Remote Configuration Resources](#_locating_remote_configuration_resources) + +The Config Service serves property sources from `/{application}/{profile}/{label}`, where the default bindings in the client app are as follows: + +* "application" = `${spring.application.name}` + +* "profile" = `${spring.profiles.active}` (actually `Environment.getActiveProfiles()`) + +* "label" = "master" + +| |When setting the property `${spring.application.name}` do not prefix your app name with the reserved word `application-` to prevent issues resolving the correct property source.| +|---|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +You can override all of them by setting `spring.cloud.config.*` (where `*` is `name`, `profile` or `label`). +The `label` is useful for rolling back to previous versions of configuration. +With the default Config Server implementation, it can be a git label, branch name, or commit ID. +Label can also be provided as a comma-separated list. +In that case, the items in the list are tried one by one until one succeeds. +This behavior can be useful when working on a feature branch. +For instance, you might want to align the config label with your branch but make it optional (in that case, use `spring.cloud.config.label=myfeature,develop`). + +### [Specifying Multiple Urls for the Config Server](#_specifying_multiple_urls_for_the_config_server) + +To ensure high availability when you have multiple instances of Config Server deployed and expect one or more instances to be unavailable from time to time, you can either specify multiple URLs (as a comma-separated list under the `spring.cloud.config.uri` property) or have all your instances register in a Service Registry like Eureka ( if using Discovery-First Bootstrap mode ). Note that doing so ensures high availability only when the Config Server is not running (that is, when the application has exited) or when a connection timeout has occurred. For example, if the Config Server returns a 500 (Internal Server Error) response or the Config Client receives a 401 from the Config Server (due to bad credentials or other causes), the Config Client does not try to fetch properties from other URLs. An error of that kind indicates a user issue rather than an availability problem. + +If you use HTTP basic security on your Config Server, it is currently possible to support per-Config Server auth credentials only if you embed the credentials in each URL you specify under the `spring.cloud.config.uri` property. If you use any other kind of security mechanism, you cannot (currently) support per-Config Server authentication and authorization. + +### [Configuring Timeouts](#_configuring_timeouts) + +If you want to configure timeout thresholds: + +* Read timeouts can be configured by using the property `spring.cloud.config.request-read-timeout`. + +* Connection timeouts can be configured by using the property `spring.cloud.config.request-connect-timeout`. + +### [Security](#_security_2) + +If you use HTTP Basic security on the server, clients need to know the password (and username if it is not the default). +You can specify the username and password through the config server URI or via separate username and password properties, as shown in the following example: + +``` +spring: + cloud: + config: + uri: https://user:[email protected] +``` + +The following example shows an alternate way to pass the same information: + +``` +spring: + cloud: + config: + uri: https://myconfig.mycompany.com + username: user + password: secret +``` + +The `spring.cloud.config.password` and `spring.cloud.config.username` values override anything that is provided in the URI. + +If you deploy your apps on Cloud Foundry, the best way to provide the password is through service credentials (such as in the URI, since it does not need to be in a config file). +The following example works locally and for a user-provided service on Cloud Foundry named `configserver`: + +``` +spring: + cloud: + config: + uri: ${vcap.services.configserver.credentials.uri:http://user:[email protected]:8888} +``` + +If config server requires client side TLS certificate, you can configure client side TLS certificate and trust store via properties, as shown in following example: + +``` +spring: + cloud: + config: + uri: https://myconfig.myconfig.com + tls: + enabled: true + key-store: + key-store-type: PKCS12 + key-store-password: + key-password: + trust-store: + trust-store-type: PKCS12 + trust-store-password: +``` + +The `spring.cloud.config.tls.enabled` needs to be true to enable config client side TLS. When `spring.cloud.config.tls.trust-store` is omitted, a JVM default trust store is used. The default value for `spring.cloud.config.tls.key-store-type` and `spring.cloud.config.tls.trust-store-type` is PKCS12. When password properties are omitted, empty password is assumed. + +If you use another form of security, you might need to [provide a `RestTemplate`](#custom-rest-template) to the `ConfigServicePropertySourceLocator` (for example, by grabbing it in the bootstrap context and injecting it). + +#### [Health Indicator](#_health_indicator_2) + +The Config Client supplies a Spring Boot Health Indicator that attempts to load configuration from the Config Server. +The health indicator can be disabled by setting `health.config.enabled=false`. +The response is also cached for performance reasons. +The default cache time to live is 5 minutes. +To change that value, set the `health.config.time-to-live` property (in milliseconds). + +#### [Providing A Custom RestTemplate](#custom-rest-template) + +In some cases, you might need to customize the requests made to the config server from the client. +Typically, doing so involves passing special `Authorization` headers to authenticate requests to the server. +To provide a custom `RestTemplate`: + +1. Create a new configuration bean with an implementation of `PropertySourceLocator`, as shown in the following example: + +CustomConfigServiceBootstrapConfiguration.java + +``` +@Configuration +public class CustomConfigServiceBootstrapConfiguration { + @Bean + public ConfigServicePropertySourceLocator configServicePropertySourceLocator() { + ConfigClientProperties clientProperties = configClientProperties(); + ConfigServicePropertySourceLocator configServicePropertySourceLocator = new ConfigServicePropertySourceLocator(clientProperties); + configServicePropertySourceLocator.setRestTemplate(customRestTemplate(clientProperties)); + return configServicePropertySourceLocator; + } +} +``` + +| |For a simplified approach to adding `Authorization` headers, the `spring.cloud.config.headers.*` property can be used instead.| +|---|------------------------------------------------------------------------------------------------------------------------------| + +1. In `resources/META-INF`, create a file called`spring.factories` and specify your custom configuration, as shown in the following example: + +spring.factories + +``` +org.springframework.cloud.bootstrap.BootstrapConfiguration = com.my.config.client.CustomConfigServiceBootstrapConfiguration +``` + +#### [Vault](#_vault) + +When using Vault as a backend to your config server, the client needs to supply a token for the server to retrieve values from Vault. +This token can be provided within the client by setting `spring.cloud.config.token`in `bootstrap.yml`, as shown in the following example: + +``` +spring: + cloud: + config: + token: YourVaultToken +``` + +### [Nested Keys In Vault](#_nested_keys_in_vault) + +Vault supports the ability to nest keys in a value stored in Vault, as shown in the following example: + +`echo -n '{"appA": {"secret": "appAsecret"}, "bar": "baz"}' | vault write secret/myapp -` + +This command writes a JSON object to your Vault. +To access these values in Spring, you would use the traditional dot(`.`) annotation, as shown in the following example + +``` +@Value("${appA.secret}") +String name = "World"; +``` + +The preceding code would sets the value of the `name` variable to `appAsecret`. + diff --git a/docs/en/spring-cloud/spring-cloud-consul.md b/docs/en/spring-cloud/spring-cloud-consul.md new file mode 100644 index 0000000000000000000000000000000000000000..a56f248ac7649db09e0a2222ca7892525b0738d9 --- /dev/null +++ b/docs/en/spring-cloud/spring-cloud-consul.md @@ -0,0 +1,782 @@ +# Spring Cloud Consul + +## 1. Quick Start + +This quick start walks through using Spring Cloud Consul for Service Discovery and Distributed Configuration. + +First, run Consul Agent on your machine. Then you can access it and use it as a Service Registry and Configuration source with Spring Cloud Consul. + +### 1.1. Discovery Client Usage + +To use these features in an application, you can build it as a Spring Boot application that depends on `spring-cloud-consul-core`. +The most convenient way to add the dependency is with a Spring Boot starter: `org.springframework.cloud:spring-cloud-starter-consul-discovery`. +We recommend using dependency management and `spring-boot-starter-parent`. +The following example shows a typical Maven configuration: + +pom.xml + +``` + + + org.springframework.boot + spring-boot-starter-parent + {spring-boot-version} + + + + + + org.springframework.cloud + spring-cloud-starter-consul-discovery + + + org.springframework.boot + spring-boot-starter-test + test + + + + + + org.springframework.cloud + spring-cloud-dependencies + ${spring-cloud.version} + pom + import + + + + + + + org.springframework.boot + spring-boot-maven-plugin + + + + +``` + +The following example shows a typical Gradle setup: + +build.gradle + +``` +plugins { + id 'org.springframework.boot' version ${spring-boot-version} + id 'io.spring.dependency-management' version ${spring-dependency-management-version} + id 'java' +} + +repositories { + mavenCentral() +} + +dependencies { + implementation 'org.springframework.cloud:spring-cloud-starter-consul-discovery' + testImplementation 'org.springframework.boot:spring-boot-starter-test' +} +dependencyManagement { + imports { + mavenBom "org.springframework.cloud:spring-cloud-dependencies:${springCloudVersion}" + } +} +``` + +Now you can create a standard Spring Boot application, such as the following HTTP server: + +``` +@SpringBootApplication +@RestController +public class Application { + + @GetMapping("/") + public String home() { + return "Hello World!"; + } + + public static void main(String[] args) { + SpringApplication.run(Application.class, args); + } + +} +``` + +When this HTTP server runs, it connects to Consul Agent running at the default local 8500 port. +To modify the startup behavior, you can change the location of Consul Agent by using `application.properties`, as shown in the following example: + +``` +spring: + cloud: + consul: + host: localhost + port: 8500 +``` + +You can now use `DiscoveryClient`, `@LoadBalanced RestTemplate`, or `@LoadBalanced WebClient.Builder` to retrieve services and instances data from Consul, as shown in the following example: + +``` +@Autowired +private DiscoveryClient discoveryClient; + +public String serviceUrl() { + List list = discoveryClient.getInstances("STORES"); + if (list != null && list.size() > 0 ) { + return list.get(0).getUri().toString(); + } + return null; +} +``` + +### 1.2. Distributed Configuration Usage + +To use these features in an application, you can build it as a Spring Boot application that depends on `spring-cloud-consul-core` and `spring-cloud-consul-config`. +The most convenient way to add the dependency is with a Spring Boot starter: `org.springframework.cloud:spring-cloud-starter-consul-config`. +We recommend using dependency management and `spring-boot-starter-parent`. +The following example shows a typical Maven configuration: + +pom.xml + +``` + + + org.springframework.boot + spring-boot-starter-parent + {spring-boot-version} + + + + + + org.springframework.cloud + spring-cloud-starter-consul-config + + + org.springframework.boot + spring-boot-starter-test + test + + + + + + org.springframework.cloud + spring-cloud-dependencies + ${spring-cloud.version} + pom + import + + + + + + + org.springframework.boot + spring-boot-maven-plugin + + + + +``` + +The following example shows a typical Gradle setup: + +build.gradle + +``` +plugins { + id 'org.springframework.boot' version ${spring-boot-version} + id 'io.spring.dependency-management' version ${spring-dependency-management-version} + id 'java' +} + +repositories { + mavenCentral() +} + +dependencies { + implementation 'org.springframework.cloud:spring-cloud-starter-consul-config' + testImplementation 'org.springframework.boot:spring-boot-starter-test' +} +dependencyManagement { + imports { + mavenBom "org.springframework.cloud:spring-cloud-dependencies:${springCloudVersion}" + } +} +``` + +Now you can create a standard Spring Boot application, such as the following HTTP server: + +``` +@SpringBootApplication +@RestController +public class Application { + + @GetMapping("/") + public String home() { + return "Hello World!"; + } + + public static void main(String[] args) { + SpringApplication.run(Application.class, args); + } + +} +``` + +The application retrieves configuration data from Consul. + +| |If you use Spring Cloud Consul Config, you need to set the `spring.config.import` property in order to bind to Consul.
You can read more about it in the [Spring Boot Config Data Import section](#config-data-import).| +|---|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +## 2. Install Consul + +Please see the [installation documentation](https://www.consul.io/intro/getting-started/install.html) for instructions on how to install Consul. + +## 3. Consul Agent + +A Consul Agent client must be available to all Spring Cloud Consul applications. By default, the Agent client is expected to be at `localhost:8500`. See the [Agent documentation](https://consul.io/docs/agent/basics.html) for specifics on how to start an Agent client and how to connect to a cluster of Consul Agent Servers. For development, after you have installed consul, you may start a Consul Agent using the following command: + +``` +./src/main/bash/local_run_consul.sh +``` + +This will start an agent in server mode on port 8500, with the ui available at [localhost:8500](http://localhost:8500) + +## 4. Service Discovery with Consul + +Service Discovery is one of the key tenets of a microservice based architecture. Trying to hand configure each client or some form of convention can be very difficult to do and can be very brittle. Consul provides Service Discovery services via an [HTTP API](https://www.consul.io/docs/agent/http.html) and [DNS](https://www.consul.io/docs/agent/dns.html). Spring Cloud Consul leverages the HTTP API for service registration and discovery. This does not prevent non-Spring Cloud applications from leveraging the DNS interface. Consul Agents servers are run in a [cluster](https://www.consul.io/docs/internals/architecture.html) that communicates via a [gossip protocol](https://www.consul.io/docs/internals/gossip.html) and uses the [Raft consensus protocol](https://www.consul.io/docs/internals/consensus.html). + +### 4.1. How to activate + +To activate Consul Service Discovery use the starter with group `org.springframework.cloud` and artifact id `spring-cloud-starter-consul-discovery`. See the [Spring Cloud Project page](https://projects.spring.io/spring-cloud/) for details on setting up your build system with the current Spring Cloud Release Train. + +### 4.2. Registering with Consul + +When a client registers with Consul, it provides meta-data about itself such as host and port, id, name and tags. An HTTP [Check](https://www.consul.io/docs/agent/checks.html) is created by default that Consul hits the `/actuator/health` endpoint every 10 seconds. If the health check fails, the service instance is marked as critical. + +Example Consul client: + +``` +@SpringBootApplication +@RestController +public class Application { + + @RequestMapping("/") + public String home() { + return "Hello world"; + } + + public static void main(String[] args) { + new SpringApplicationBuilder(Application.class).web(true).run(args); + } + +} +``` + +(i.e. utterly normal Spring Boot app). If the Consul client is located somewhere other than `localhost:8500`, the configuration is required to locate the client. Example: + +application.yml + +``` +spring: + cloud: + consul: + host: localhost + port: 8500 +``` + +| |If you use [Spring Cloud Consul Config](#spring-cloud-consul-config), and you have set `spring.cloud.bootstrap.enabled=true` or `spring.config.use-legacy-processing=true` or use `spring-cloud-starter-bootstrap`, then the above values will need to be placed in `bootstrap.yml` instead of `application.yml`.| +|---|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +The default service name, instance id and port, taken from the `Environment`, are `${spring.application.name}`, the Spring Context ID and `${server.port}` respectively. + +To disable the Consul Discovery Client you can set `spring.cloud.consul.discovery.enabled` to `false`. Consul Discovery Client will also be disabled when `spring.cloud.discovery.enabled` is set to `false`. + +To disable the service registration you can set `spring.cloud.consul.discovery.register` to `false`. + +#### 4.2.1. Registering Management as a Separate Service + +When management server port is set to something different than the application port, by setting `management.server.port` property, management service will be registered as a separate service than the application service. For example: + +application.yml + +``` +spring: + application: + name: myApp +management: + server: + port: 4452 +``` + +Above configuration will register following 2 services: + +* Application Service: + +``` +ID: myApp +Name: myApp +``` + +* Management Service: + +``` +ID: myApp-management +Name: myApp-management +``` + +Management service will inherit its `instanceId` and `serviceName` from the application service. For example: + +application.yml + +``` +spring: + application: + name: myApp +management: + server: + port: 4452 +spring: + cloud: + consul: + discovery: + instance-id: custom-service-id + serviceName: myprefix-${spring.application.name} +``` + +Above configuration will register following 2 services: + +* Application Service: + +``` +ID: custom-service-id +Name: myprefix-myApp +``` + +* Management Service: + +``` +ID: custom-service-id-management +Name: myprefix-myApp-management +``` + +Further customization is possible via following properties: + +``` +/** Port to register the management service under (defaults to management port) */ +spring.cloud.consul.discovery.management-port + +/** Suffix to use when registering management service (defaults to "management" */ +spring.cloud.consul.discovery.management-suffix + +/** Tags to use when registering management service (defaults to "management" */ +spring.cloud.consul.discovery.management-tags +``` + +#### 4.2.2. HTTP Health Check + +The health check for a Consul instance defaults to "/actuator/health", which is the default location of the health endpoint in a Spring Boot Actuator application. You need to change this, even for an Actuator application, if you use a non-default context path or servlet path (e.g. `server.servletPath=/foo`) or management endpoint path (e.g. `management.server.servlet.context-path=/admin`). + +The interval that Consul uses to check the health endpoint may also be configured. "10s" and "1m" represent 10 seconds and 1 minute respectively. + +This example illustrates the above (see the `spring.cloud.consul.discovery.health-check-*` properties in [the appendix page](appendix.html) for more options). + +application.yml + +``` +spring: + cloud: + consul: + discovery: + healthCheckPath: ${management.server.servlet.context-path}/actuator/health + healthCheckInterval: 15s +``` + +You can disable the HTTP health check entirely by setting `spring.cloud.consul.discovery.register-health-check=false`. + +##### Applying Headers + +Headers can be applied to health check requests. For example, if you’re trying to register a [Spring Cloud Config](https://cloud.spring.io/spring-cloud-config/) server that uses [Vault Backend](https://github.com/spring-cloud/spring-cloud-config/blob/master/docs/src/main/asciidoc/spring-cloud-config.adoc#vault-backend): + +application.yml + +``` +spring: + cloud: + consul: + discovery: + health-check-headers: + X-Config-Token: 6442e58b-d1ea-182e-cfa5-cf9cddef0722 +``` + +According to the HTTP standard, each header can have more than one values, in which case, an array can be supplied: + +application.yml + +``` +spring: + cloud: + consul: + discovery: + health-check-headers: + X-Config-Token: + - "6442e58b-d1ea-182e-cfa5-cf9cddef0722" + - "Some other value" +``` + +#### 4.2.3. Actuator Health Indicator(s) + +If the service instance is a Spring Boot Actuator application, it may be provided the following Actuator health indicators. + +##### DiscoveryClientHealthIndicator + +When Consul Service Discovery is active, a [DiscoverClientHealthIndicator](https://cloud.spring.io/spring-cloud-commons/2.2.x/reference/html/#health-indicator) is configured and made available to the Actuator health endpoint. +See [here](https://cloud.spring.io/spring-cloud-commons/2.2.x/reference/html/#health-indicator) for configuration options. + +##### ConsulHealthIndicator + +An indicator is configured that verifies the health of the `ConsulClient`. + +By default, it retrieves the Consul leader node status and all registered services. +In deployments that have many registered services it may be costly to retrieve all services on every health check. +To skip the service retrieval and only check the leader node status set `spring.cloud.consul.health-indicator.include-services-query=false`. + +To disable the indicator set `management.health.consul.enabled=false`. + +| |When the application runs in [bootstrap context mode](https://cloud.spring.io/spring-cloud-commons/2.2.x/reference/html/#the-bootstrap-application-context) (the default),
this indicator is loaded into the bootstrap context and is not made available to the Actuator health endpoint.| +|---|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +#### 4.2.4. Metadata + +Consul supports metadata on services. Spring Cloud’s `ServiceInstance` has a `Map metadata` field which is populated from a services `meta` field. To populate the `meta` field set values on `spring.cloud.consul.discovery.metadata` or `spring.cloud.consul.discovery.management-metadata` properties. + +application.yml + +``` +spring: + cloud: + consul: + discovery: + metadata: + myfield: myvalue + anotherfield: anothervalue +``` + +The above configuration will result in a service who’s meta field contains `myfield→myvalue` and `anotherfield→anothervalue`. + +##### Generated Metadata + +The Consul Auto Registration will generate a few entries automatically. + +| Key | Value | +|---------------------------------------------------------------------------------------|-------------------------------------------------------------------------------------------------------------------------| +| 'group' |Property `spring.cloud.consul.discovery.instance-group`. This values is only generated if `instance-group` is not empty.'| +| 'secure' | True if property `spring.cloud.consul.discovery.scheme` equals 'https', otherwise false. | +|Property `spring.cloud.consul.discovery.default-zone-metadata-name`, defaults to 'zone'| Property `spring.cloud.consul.discovery.instance-zone`. This values is only generated if `instance-zone` is not empty.' | + +| |Older versions of Spring Cloud Consul populated the `ServiceInstance.getMetadata()` method from Spring Cloud Commons by parsing the `spring.cloud.consul.discovery.tags` property. This is no longer supported, please migrate to using the `spring.cloud.consul.discovery.metadata` map.| +|---|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +#### 4.2.5. Making the Consul Instance ID Unique + +By default a consul instance is registered with an ID that is equal to its Spring Application Context ID. By default, the Spring Application Context ID is `${spring.application.name}:comma,separated,profiles:${server.port}`. For most cases, this will allow multiple instances of one service to run on one machine. If further uniqueness is required, Using Spring Cloud you can override this by providing a unique identifier in `spring.cloud.consul.discovery.instanceId`. For example: + +application.yml + +``` +spring: + cloud: + consul: + discovery: + instanceId: ${spring.application.name}:${vcap.application.instance_id:${spring.application.instance_id:${random.value}}} +``` + +With this metadata, and multiple service instances deployed on localhost, the random value will kick in there to make the instance unique. In Cloudfoundry the `vcap.application.instance_id` will be populated automatically in a Spring Boot application, so the random value will not be needed. + +### 4.3. Looking up services + +#### 4.3.1. Using Load-balancer + +Spring Cloud has support for [Feign](https://github.com/spring-cloud/spring-cloud-netflix/blob/master/docs/src/main/asciidoc/spring-cloud-netflix.adoc#spring-cloud-feign) (a REST client builder) and also [Spring `RestTemplate`](https://docs.spring.io/spring-cloud-commons/docs/current/reference/html/#rest-template-loadbalancer-client)for looking up services using the logical service names/ids instead of physical URLs. Both Feign and the discovery-aware RestTemplate utilize [Spring Cloud LoadBalancer](https://docs.spring.io/spring-cloud-commons/docs/current/reference/html/#spring-cloud-loadbalancer) for client-side load balancing. + +If you want to access service STORES using the RestTemplate simply declare: + +``` +@LoadBalanced +@Bean +public RestTemplate loadbalancedRestTemplate() { + return new RestTemplate(); +} +``` + +and use it like this (notice how we use the STORES service name/id from Consul instead of a fully qualified domainname): + +``` +@Autowired +RestTemplate restTemplate; + +public String getFirstProduct() { + return this.restTemplate.getForObject("https://STORES/products/1", String.class); +} +``` + +If you have Consul clusters in multiple datacenters and you want to access a service in another datacenter a service name/id alone is not enough. In that case +you use property `spring.cloud.consul.discovery.datacenters.STORES=dc-west` where `STORES` is the service name/id and `dc-west` is the datacenter +where the STORES service lives. + +| |Spring Cloud now also offers support for[Spring Cloud LoadBalancer](https://cloud.spring.io/spring-cloud-commons/reference/html/#_spring_resttemplate_as_a_load_balancer_client).| +|---|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +#### 4.3.2. Using the DiscoveryClient + +You can also use the `org.springframework.cloud.client.discovery.DiscoveryClient` which provides a simple API for discovery clients that is not specific to Netflix, e.g. + +``` +@Autowired +private DiscoveryClient discoveryClient; + +public String serviceUrl() { + List list = discoveryClient.getInstances("STORES"); + if (list != null && list.size() > 0 ) { + return list.get(0).getUri(); + } + return null; +} +``` + +### 4.4. Consul Catalog Watch + +The Consul Catalog Watch takes advantage of the ability of consul to [watch services](https://www.consul.io/docs/agent/watches.html#services). The Catalog Watch makes a blocking Consul HTTP API call to determine if any services have changed. If there is new service data a Heartbeat Event is published. + +To change the frequency of when the Config Watch is called change `spring.cloud.consul.config.discovery.catalog-services-watch-delay`. The default value is 1000, which is in milliseconds. The delay is the amount of time after the end of the previous invocation and the start of the next. + +To disable the Catalog Watch set `spring.cloud.consul.discovery.catalogServicesWatch.enabled=false`. + +The watch uses a Spring `TaskScheduler` to schedule the call to consul. By default it is a `ThreadPoolTaskScheduler` with a `poolSize` of 1. To change the `TaskScheduler`, create a bean of type `TaskScheduler` named with the `ConsulDiscoveryClientConfiguration.CATALOG_WATCH_TASK_SCHEDULER_NAME` constant. + +## 5. Distributed Configuration with Consul + +Consul provides a [Key/Value Store](https://consul.io/docs/agent/http/kv.html) for storing configuration and other metadata. Spring Cloud Consul Config is an alternative to the [Config Server and Client](https://github.com/spring-cloud/spring-cloud-config). Configuration is loaded into the Spring Environment during the special "bootstrap" phase. Configuration is stored in the `/config` folder by default. Multiple `PropertySource` instances are created based on the application’s name and the active profiles that mimics the Spring Cloud Config order of resolving properties. For example, an application with the name "testApp" and with the "dev" profile will have the following property sources created: + +``` +config/testApp,dev/ +config/testApp/ +config/application,dev/ +config/application/ +``` + +The most specific property source is at the top, with the least specific at the bottom. Properties in the `config/application` folder are applicable to all applications using consul for configuration. Properties in the `config/testApp` folder are only available to the instances of the service named "testApp". + +Configuration is currently read on startup of the application. Sending a HTTP POST to `/refresh` will cause the configuration to be reloaded. [Config Watch](#spring-cloud-consul-config-watch) will also automatically detect changes and reload the application context. + +### 5.1. How to activate + +To get started with Consul Configuration use the starter with group `org.springframework.cloud` and artifact id `spring-cloud-starter-consul-config`. See the [Spring Cloud Project page](https://projects.spring.io/spring-cloud/) for details on setting up your build system with the current Spring Cloud Release Train. + +### 5.2. Spring Boot Config Data Import + +Spring Boot 2.4 introduced a new way to import configuration data via the `spring.config.import` property. This is now the default way to get configuration from Consul. + +To optionally connect to Consul set the following in application.properties: + +application.properties + +``` +spring.config.import=optional:consul: +``` + +This will connect to the Consul Agent at the default location of "http://localhost:8500". Removing the `optional:` prefix will cause Consul Config to fail if it is unable to connect to Consul. To change the connection properties of Consul Config either set `spring.cloud.consul.host` and `spring.cloud.consul.port` or add the host/port pair to the `spring.config.import` statement such as, `spring.config.import=optional:consul:myhost:8500`. The location in the import property has precedence over the host and port propertie. + +Consul Config will try to load values from four automatic contexts based on `spring.cloud.consul.config.name` (which defaults to the value of the `spring.application.name` property) and `spring.cloud.consul.config.default-context` (which defaults to `application`). If you want to specify the contexts rather than using the computed ones, you can add that information to the `spring.config.import` statement. + +application.properties + +``` +spring.config.import=optional:consul:myhost:8500/contextone;/context/two +``` + +This will optionally load configuration only from `/contextone` and `/context/two`. + +| |A `bootstrap` file (properties or yaml) is **not** needed for the Spring Boot Config Data method of import via `spring.config.import`.| +|---|--------------------------------------------------------------------------------------------------------------------------------------| + +### 5.3. Customizing + +Consul Config may be customized using the following properties: + +``` +spring: + cloud: + consul: + config: + enabled: true + prefix: configuration + defaultContext: apps + profileSeparator: '::' +``` + +| |If you have set `spring.cloud.bootstrap.enabled=true` or `spring.config.use-legacy-processing=true`, or included `spring-cloud-starter-bootstrap`, then the above values will need to be placed in `bootstrap.yml` instead of `application.yml`.| +|---|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +* `enabled` setting this value to "false" disables Consul Config + +* `prefix` sets the base folder for configuration values + +* `defaultContext` sets the folder name used by all applications + +* `profileSeparator` sets the value of the separator used to separate the profile name in property sources with profiles + +### 5.4. Config Watch + +The Consul Config Watch takes advantage of the ability of consul to [watch a key prefix](https://www.consul.io/docs/agent/watches.html#keyprefix). The Config Watch makes a blocking Consul HTTP API call to determine if any relevant configuration data has changed for the current application. If there is new configuration data a Refresh Event is published. This is equivalent to calling the `/refresh` actuator endpoint. + +To change the frequency of when the Config Watch is called change `spring.cloud.consul.config.watch.delay`. The default value is 1000, which is in milliseconds. The delay is the amount of time after the end of the previous invocation and the start of the next. + +To disable the Config Watch set `spring.cloud.consul.config.watch.enabled=false`. + +The watch uses a Spring `TaskScheduler` to schedule the call to consul. By default it is a `ThreadPoolTaskScheduler` with a `poolSize` of 1. To change the `TaskScheduler`, create a bean of type `TaskScheduler` named with the `ConsulConfigAutoConfiguration.CONFIG_WATCH_TASK_SCHEDULER_NAME` constant. + +### 5.5. YAML or Properties with Config + +It may be more convenient to store a blob of properties in YAML or Properties format as opposed to individual key/value pairs. Set the `spring.cloud.consul.config.format` property to `YAML` or `PROPERTIES`. For example to use YAML: + +``` +spring: + cloud: + consul: + config: + format: YAML +``` + +| |If you have set `spring.cloud.bootstrap.enabled=true` or `spring.config.use-legacy-processing=true`, or included `spring-cloud-starter-bootstrap`, then the above values will need to be placed in `bootstrap.yml` instead of `application.yml`.| +|---|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +YAML must be set in the appropriate `data` key in consul. Using the defaults above the keys would look like: + +``` +config/testApp,dev/data +config/testApp/data +config/application,dev/data +config/application/data +``` + +You could store a YAML document in any of the keys listed above. + +You can change the data key using `spring.cloud.consul.config.data-key`. + +### 5.6. git2consul with Config + +git2consul is a Consul community project that loads files from a git repository to individual keys into Consul. By default the names of the keys are names of the files. YAML and Properties files are supported with file extensions of `.yml` and `.properties` respectively. Set the `spring.cloud.consul.config.format` property to `FILES`. For example: + +bootstrap.yml + +``` +spring: + cloud: + consul: + config: + format: FILES +``` + +Given the following keys in `/config`, the `development` profile and an application name of `foo`: + +``` +.gitignore +application.yml +bar.properties +foo-development.properties +foo-production.yml +foo.properties +master.ref +``` + +the following property sources would be created: + +``` +config/foo-development.properties +config/foo.properties +config/application.yml +``` + +The value of each key needs to be a properly formatted YAML or Properties file. + +### 5.7. Fail Fast + +It may be convenient in certain circumstances (like local development or certain test scenarios) to not fail if consul isn’t available for configuration. Setting `spring.cloud.consul.config.fail-fast=false` will cause the configuration module to log a warning rather than throw an exception. This will allow the application to continue startup normally. + +| |If you have set `spring.cloud.bootstrap.enabled=true` or `spring.config.use-legacy-processing=true`, or included `spring-cloud-starter-bootstrap`, then the above values will need to be placed in `bootstrap.yml` instead of `application.yml`.| +|---|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +## 6. Consul Retry + +If you expect that the consul agent may occasionally be unavailable when +your app starts, you can ask it to keep trying after a failure. You need to add`spring-retry` and `spring-boot-starter-aop` to your classpath. The default +behaviour is to retry 6 times with an initial backoff interval of 1000ms and an +exponential multiplier of 1.1 for subsequent backoffs. You can configure these +properties (and others) using `spring.cloud.consul.retry.*` configuration properties. +This works with both Spring Cloud Consul Config and Discovery registration. + +| |To take full control of the retry add a `@Bean` of type`RetryOperationsInterceptor` with id "consulRetryInterceptor". Spring
Retry has a `RetryInterceptorBuilder` that makes it easy to create one.| +|---|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +## 7. Spring Cloud Bus with Consul + +### 7.1. How to activate + +To get started with the Consul Bus use the starter with group `org.springframework.cloud` and artifact id `spring-cloud-starter-consul-bus`. See the [Spring Cloud Project page](https://projects.spring.io/spring-cloud/) for details on setting up your build system with the current Spring Cloud Release Train. + +See the [Spring Cloud Bus](https://cloud.spring.io/spring-cloud-bus/) documentation for the available actuator endpoints and howto send custom messages. + +## 8. Circuit Breaker with Hystrix + +Applications can use the Hystrix Circuit Breaker provided by the Spring Cloud Netflix project by including this starter in the projects pom.xml: `spring-cloud-starter-hystrix`. Hystrix doesn’t depend on the Netflix Discovery Client. The `@EnableHystrix` annotation should be placed on a configuration class (usually the main class). Then methods can be annotated with `@HystrixCommand` to be protected by a circuit breaker. See [the documentation](https://projects.spring.io/spring-cloud/spring-cloud.html#_circuit_breaker_hystrix_clients) for more details. + +## 9. Hystrix metrics aggregation with Turbine and Consul + +Turbine (provided by the Spring Cloud Netflix project), aggregates multiple instances Hystrix metrics streams, so the dashboard can display an aggregate view. Turbine uses the `DiscoveryClient` interface to lookup relevant instances. To use Turbine with Spring Cloud Consul, configure the Turbine application in a manner similar to the following examples: + +pom.xml + +``` + + org.springframework.cloud + spring-cloud-netflix-turbine + + + org.springframework.cloud + spring-cloud-starter-consul-discovery + +``` + +Notice that the Turbine dependency is not a starter. The turbine starter includes support for Netflix Eureka. + +application.yml + +``` +spring.application.name: turbine +applications: consulhystrixclient +turbine: + aggregator: + clusterConfig: ${applications} + appConfig: ${applications} +``` + +The `clusterConfig` and `appConfig` sections must match, so it’s useful to put the comma-separated list of service ID’s into a separate configuration property. + +Turbine.java + +``` +@EnableTurbine +@SpringBootApplication +public class Turbine { + public static void main(String[] args) { + SpringApplication.run(DemoturbinecommonsApplication.class, args); + } +} +``` + +## 10. Configuration Properties + +To see the list of all Consul related configuration properties please check [the Appendix page](appendix.html). + diff --git a/docs/en/spring-cloud/spring-cloud-contract.md b/docs/en/spring-cloud/spring-cloud-contract.md new file mode 100644 index 0000000000000000000000000000000000000000..fb18cc9abedfbe7dd2bbdef633af04ab29489e15 --- /dev/null +++ b/docs/en/spring-cloud/spring-cloud-contract.md @@ -0,0 +1,16 @@ +# Spring Cloud Contract Reference Documentation + +Adam Dudczak, Mathias Düsterhöft, Marcin Grzejszczak, Dennis Kieselhorst, Jakub Kubryński, Karol Lassak, Olga Maciaszek-Sharma, Mariusz Smykuła, Dave Syer, Jay Bryant + +The reference documentation consists of the following sections: + +| [Legal](legal.html#legal-information) | Legal information. | +|----------------------------------------------------------------------------|------------------------------------------------------------------------------------------------| +|[Documentation Overview](documentation-overview.html#contract-documentation)| About the Documentation, Getting Help, First Steps, and more. | +| [Getting Started](getting-started.html#getting-started) |Introducing Spring Cloud Contract, Developing Your First Spring Cloud Contract-based Application| +| [Using Spring Cloud Contract](using.html#using) | Spring Cloud Contract usage examples and workflows. | +| [Spring Cloud Contract Features](project-features.html#features) |Contract DSL, Messaging, Spring Cloud Contract Stub Runner, and Spring Cloud Contract WireMock. | +| [Build Tools](project-features.html#features-build-tools) | Maven Plugin, Gradle Plugin, and Docker. | +| [“How-to” Guides](howto.html#howto) | Stubs versioning, Pact integration, Debugging, and more. | +| [Appendices](appendix.html#appendix) | Properties, Metadata, Configuration, Dependencies, and more. | + diff --git a/docs/en/spring-cloud/spring-cloud-function.md b/docs/en/spring-cloud/spring-cloud-function.md new file mode 100644 index 0000000000000000000000000000000000000000..5dbc4147d195284f14e8c8ba803c2e2d89f9124f --- /dev/null +++ b/docs/en/spring-cloud/spring-cloud-function.md @@ -0,0 +1,21 @@ +# Spring Cloud Function Reference Documentation + +Mark Fisher, Dave Syer, Oleg Zhurakousky, Anshul Mehra + +**3.2.2** + +The reference documentation consists of the following sections: + +| [Reference Guide](spring-cloud-function.html) |Spring Cloud Function Reference| +|------------------------------------------------------------------------------------------------------------------------------------------|-------------------------------| +|[Cloud Events](https://github.com/spring-cloud/spring-cloud-function/tree/master/spring-cloud-function-samples/function-sample-cloudevent)| Cloud Events | +| [RSocket](https://github.com/spring-cloud/spring-cloud-function/tree/master/spring-cloud-function-rsocket) | RSocket | +| [AWS Adapter](aws.html) | AWS Adapter Reference | +| [Azure Adapter](azure.html) | Azure Adapter Reference | +| [GCP Adapter](gcp.html) | GCP Adapter Reference | + +Relevant Links: + +|[Reactor](https://projectreactor.io/)|Project Reactor| +|-------------------------------------|---------------| + diff --git a/docs/en/spring-cloud/spring-cloud-gateway.md b/docs/en/spring-cloud/spring-cloud-gateway.md new file mode 100644 index 0000000000000000000000000000000000000000..974a208c74013d2d52db31012639c9502788b730 --- /dev/null +++ b/docs/en/spring-cloud/spring-cloud-gateway.md @@ -0,0 +1,2688 @@ +# Spring Cloud Gateway + +## 1. How to Include Spring Cloud Gateway + +To include Spring Cloud Gateway in your project, use the starter with a group ID of `org.springframework.cloud` and an artifact ID of `spring-cloud-starter-gateway`. +See the [Spring Cloud Project page](https://projects.spring.io/spring-cloud/) for details on setting up your build system with the current Spring Cloud Release Train. + +If you include the starter, but you do not want the gateway to be enabled, set `spring.cloud.gateway.enabled=false`. + +| |Spring Cloud Gateway is built on [Spring Boot 2.x](https://spring.io/projects/spring-boot#learn), [Spring WebFlux](https://docs.spring.io/spring/docs/current/spring-framework-reference/web-reactive.html), and [Project Reactor](https://projectreactor.io/docs).
As a consequence, many of the familiar synchronous libraries (Spring Data and Spring Security, for example) and patterns you know may not apply when you use Spring Cloud Gateway.
If you are unfamiliar with these projects, we suggest you begin by reading their documentation to familiarize yourself with some of the new concepts before working with Spring Cloud Gateway.| +|---|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +| |Spring Cloud Gateway requires the Netty runtime provided by Spring Boot and Spring Webflux.
It does not work in a traditional Servlet Container or when built as a WAR.| +|---|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +## 2. Glossary + +* **Route**: The basic building block of the gateway. + It is defined by an ID, a destination URI, a collection of predicates, and a collection of filters. A route is matched if the aggregate predicate is true. + +* **Predicate**: This is a [Java 8 Function Predicate](https://docs.oracle.com/javase/8/docs/api/java/util/function/Predicate.html). The input type is a [Spring Framework `ServerWebExchange`](https://docs.spring.io/spring/docs/5.0.x/javadoc-api/org/springframework/web/server/ServerWebExchange.html). + This lets you match on anything from the HTTP request, such as headers or parameters. + +* **Filter**: These are instances of [`GatewayFilter`](https://github.com/spring-cloud/spring-cloud-gateway/tree/main/spring-cloud-gateway-server/src/main/java/org/springframework/cloud/gateway/filter/GatewayFilter.java) that have been constructed with a specific factory. + Here, you can modify requests and responses before or after sending the downstream request. + +## 3. How It Works + +The following diagram provides a high-level overview of how Spring Cloud Gateway works: + +![Spring Cloud Gateway Diagram](https://docs.spring.io/spring-cloud-gateway/docs/3.1.1/reference/html/images/spring_cloud_gateway_diagram.png) + +Clients make requests to Spring Cloud Gateway. If the Gateway Handler Mapping determines that a request matches a route, it is sent to the Gateway Web Handler. +This handler runs the request through a filter chain that is specific to the request. +The reason the filters are divided by the dotted line is that filters can run logic both before and after the proxy request is sent. +All “pre” filter logic is executed. Then the proxy request is made. After the proxy request is made, the “post” filter logic is run. + +| |URIs defined in routes without a port get default port values of 80 and 443 for the HTTP and HTTPS URIs, respectively.| +|---|----------------------------------------------------------------------------------------------------------------------| + +## 4. Configuring Route Predicate Factories and Gateway Filter Factories + +There are two ways to configure predicates and filters: shortcuts and fully expanded arguments. Most examples below use the shortcut way. + +The name and argument names will be listed as `code` in the first sentance or two of the each section. The arguments are typically listed in the order that would be needed for the shortcut configuration. + +### 4.1. Shortcut Configuration + +Shortcut configuration is recognized by the filter name, followed by an equals sign (`=`), followed by argument values separated by commas (`,`). + +application.yml + +``` +spring: + cloud: + gateway: + routes: + - id: after_route + uri: https://example.org + predicates: + - Cookie=mycookie,mycookievalue +``` + +The previous sample defines the `Cookie` Route Predicate Factory with two arguments, the cookie name, `mycookie` and the value to match `mycookievalue`. + +### 4.2. Fully Expanded Arguments + +Fully expanded arguments appear more like standard yaml configuration with name/value pairs. Typically, there will be a `name` key and an `args` key. The `args` key is a map of key value pairs to configure the predicate or filter. + +application.yml + +``` +spring: + cloud: + gateway: + routes: + - id: after_route + uri: https://example.org + predicates: + - name: Cookie + args: + name: mycookie + regexp: mycookievalue +``` + +This is the full configuration of the shortcut configuration of the `Cookie` predicate shown above. + +## 5. Route Predicate Factories + +Spring Cloud Gateway matches routes as part of the Spring WebFlux `HandlerMapping` infrastructure. +Spring Cloud Gateway includes many built-in route predicate factories. +All of these predicates match on different attributes of the HTTP request. +You can combine multiple route predicate factories with logical `and` statements. + +### 5.1. The After Route Predicate Factory + +The `After` route predicate factory takes one parameter, a `datetime` (which is a java `ZonedDateTime`). +This predicate matches requests that happen after the specified datetime. +The following example configures an after route predicate: + +Example 1. application.yml + +``` +spring: + cloud: + gateway: + routes: + - id: after_route + uri: https://example.org + predicates: + - After=2017-01-20T17:42:47.789-07:00[America/Denver] +``` + +This route matches any request made after Jan 20, 2017 17:42 Mountain Time (Denver). + +### 5.2. The Before Route Predicate Factory + +The `Before` route predicate factory takes one parameter, a `datetime` (which is a java `ZonedDateTime`). +This predicate matches requests that happen before the specified `datetime`. +The following example configures a before route predicate: + +Example 2. application.yml + +``` +spring: + cloud: + gateway: + routes: + - id: before_route + uri: https://example.org + predicates: + - Before=2017-01-20T17:42:47.789-07:00[America/Denver] +``` + +This route matches any request made before Jan 20, 2017 17:42 Mountain Time (Denver). + +### 5.3. The Between Route Predicate Factory + +The `Between` route predicate factory takes two parameters, `datetime1` and `datetime2`which are java `ZonedDateTime` objects. +This predicate matches requests that happen after `datetime1` and before `datetime2`. +The `datetime2` parameter must be after `datetime1`. +The following example configures a between route predicate: + +Example 3. application.yml + +``` +spring: + cloud: + gateway: + routes: + - id: between_route + uri: https://example.org + predicates: + - Between=2017-01-20T17:42:47.789-07:00[America/Denver], 2017-01-21T17:42:47.789-07:00[America/Denver] +``` + +This route matches any request made after Jan 20, 2017 17:42 Mountain Time (Denver) and before Jan 21, 2017 17:42 Mountain Time (Denver). +This could be useful for maintenance windows. + +### 5.4. The Cookie Route Predicate Factory + +The `Cookie` route predicate factory takes two parameters, the cookie `name` and a `regexp` (which is a Java regular expression). +This predicate matches cookies that have the given name and whose values match the regular expression. +The following example configures a cookie route predicate factory: + +Example 4. application.yml + +``` +spring: + cloud: + gateway: + routes: + - id: cookie_route + uri: https://example.org + predicates: + - Cookie=chocolate, ch.p +``` + +This route matches requests that have a cookie named `chocolate` whose value matches the `ch.p` regular expression. + +### 5.5. The Header Route Predicate Factory + +The `Header` route predicate factory takes two parameters, the `header` and a `regexp` (which is a Java regular expression). +This predicate matches with a header that has the given name whose value matches the regular expression. +The following example configures a header route predicate: + +Example 5. application.yml + +``` +spring: + cloud: + gateway: + routes: + - id: header_route + uri: https://example.org + predicates: + - Header=X-Request-Id, \d+ +``` + +This route matches if the request has a header named `X-Request-Id` whose value matches the `\d+` regular expression (that is, it has a value of one or more digits). + +### 5.6. The Host Route Predicate Factory + +The `Host` route predicate factory takes one parameter: a list of host name `patterns`. +The pattern is an Ant-style pattern with `.` as the separator. +This predicates matches the `Host` header that matches the pattern. +The following example configures a host route predicate: + +Example 6. application.yml + +``` +spring: + cloud: + gateway: + routes: + - id: host_route + uri: https://example.org + predicates: + - Host=**.somehost.org,**.anotherhost.org +``` + +URI template variables (such as `{sub}.myhost.org`) are supported as well. + +This route matches if the request has a `Host` header with a value of `www.somehost.org` or `beta.somehost.org` or `www.anotherhost.org`. + +This predicate extracts the URI template variables (such as `sub`, defined in the preceding example) as a map of names and values and places it in the `ServerWebExchange.getAttributes()` with a key defined in `ServerWebExchangeUtils.URI_TEMPLATE_VARIABLES_ATTRIBUTE`. +Those values are then available for use by [`GatewayFilter` factories](#gateway-route-filters) + +### 5.7. The Method Route Predicate Factory + +The `Method` Route Predicate Factory takes a `methods` argument which is one or more parameters: the HTTP methods to match. +The following example configures a method route predicate: + +Example 7. application.yml + +``` +spring: + cloud: + gateway: + routes: + - id: method_route + uri: https://example.org + predicates: + - Method=GET,POST +``` + +This route matches if the request method was a `GET` or a `POST`. + +### 5.8. The Path Route Predicate Factory + +The `Path` Route Predicate Factory takes two parameters: a list of Spring `PathMatcher` `patterns` and an optional flag called `matchTrailingSlash` (defaults to `true`). +The following example configures a path route predicate: + +Example 8. application.yml + +``` +spring: + cloud: + gateway: + routes: + - id: path_route + uri: https://example.org + predicates: + - Path=/red/{segment},/blue/{segment} +``` + +This route matches if the request path was, for example: `/red/1` or `/red/1/` or `/red/blue` or `/blue/green`. + +If `matchTrailingSlash` is set to `false`, then request path `/red/1/` will not be matched. + +This predicate extracts the URI template variables (such as `segment`, defined in the preceding example) as a map of names and values and places it in the `ServerWebExchange.getAttributes()` with a key defined in `ServerWebExchangeUtils.URI_TEMPLATE_VARIABLES_ATTRIBUTE`. +Those values are then available for use by [`GatewayFilter` factories](#gateway-route-filters) + +A utility method (called `get`) is available to make access to these variables easier. +The following example shows how to use the `get` method: + +``` +Map uriVariables = ServerWebExchangeUtils.getPathPredicateVariables(exchange); + +String segment = uriVariables.get("segment"); +``` + +### 5.9. The Query Route Predicate Factory + +The `Query` route predicate factory takes two parameters: a required `param` and an optional `regexp` (which is a Java regular expression). +The following example configures a query route predicate: + +Example 9. application.yml + +``` +spring: + cloud: + gateway: + routes: + - id: query_route + uri: https://example.org + predicates: + - Query=green +``` + +The preceding route matches if the request contained a `green` query parameter. + +application.yml + +``` +spring: + cloud: + gateway: + routes: + - id: query_route + uri: https://example.org + predicates: + - Query=red, gree. +``` + +The preceding route matches if the request contained a `red` query parameter whose value matched the `gree.` regexp, so `green` and `greet` would match. + +### 5.10. The RemoteAddr Route Predicate Factory + +The `RemoteAddr` route predicate factory takes a list (min size 1) of `sources`, which are CIDR-notation (IPv4 or IPv6) strings, such as `192.168.0.1/16` (where `192.168.0.1` is an IP address and `16` is a subnet mask). +The following example configures a RemoteAddr route predicate: + +Example 10. application.yml + +``` +spring: + cloud: + gateway: + routes: + - id: remoteaddr_route + uri: https://example.org + predicates: + - RemoteAddr=192.168.1.1/24 +``` + +This route matches if the remote address of the request was, for example, `192.168.1.10`. + +#### 5.10.1. Modifying the Way Remote Addresses Are Resolved + +By default, the RemoteAddr route predicate factory uses the remote address from the incoming request. +This may not match the actual client IP address if Spring Cloud Gateway sits behind a proxy layer. + +You can customize the way that the remote address is resolved by setting a custom `RemoteAddressResolver`. +Spring Cloud Gateway comes with one non-default remote address resolver that is based off of the [X-Forwarded-For header](https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/X-Forwarded-For), `XForwardedRemoteAddressResolver`. + +`XForwardedRemoteAddressResolver` has two static constructor methods, which take different approaches to security: + +* `XForwardedRemoteAddressResolver::trustAll` returns a `RemoteAddressResolver` that always takes the first IP address found in the `X-Forwarded-For` header. + This approach is vulnerable to spoofing, as a malicious client could set an initial value for the `X-Forwarded-For`, which would be accepted by the resolver. + +* `XForwardedRemoteAddressResolver::maxTrustedIndex` takes an index that correlates to the number of trusted infrastructure running in front of Spring Cloud Gateway. + If Spring Cloud Gateway is, for example only accessible through HAProxy, then a value of 1 should be used. + If two hops of trusted infrastructure are required before Spring Cloud Gateway is accessible, then a value of 2 should be used. + +Consider the following header value: + +``` +X-Forwarded-For: 0.0.0.1, 0.0.0.2, 0.0.0.3 +``` + +The following `maxTrustedIndex` values yield the following remote addresses: + +| `maxTrustedIndex` | result | +|------------------------|-----------------------------------------------------------| +|[`Integer.MIN_VALUE`,0] |(invalid, `IllegalArgumentException` during initialization)| +| 1 | 0.0.0.3 | +| 2 | 0.0.0.2 | +| 3 | 0.0.0.1 | +|[4, `Integer.MAX_VALUE`]| 0.0.0.1 | + +The following example shows how to achieve the same configuration with Java: + +Example 11. GatewayConfig.java + +``` +RemoteAddressResolver resolver = XForwardedRemoteAddressResolver + .maxTrustedIndex(1); + +... + +.route("direct-route", + r -> r.remoteAddr("10.1.1.1", "10.10.1.1/24") + .uri("https://downstream1") +.route("proxied-route", + r -> r.remoteAddr(resolver, "10.10.1.1", "10.10.1.1/24") + .uri("https://downstream2") +) +``` + +### 5.11. The Weight Route Predicate Factory + +The `Weight` route predicate factory takes two arguments: `group` and `weight` (an int). The weights are calculated per group. +The following example configures a weight route predicate: + +Example 12. application.yml + +``` +spring: + cloud: + gateway: + routes: + - id: weight_high + uri: https://weighthigh.org + predicates: + - Weight=group1, 8 + - id: weight_low + uri: https://weightlow.org + predicates: + - Weight=group1, 2 +``` + +This route would forward \~80% of traffic to [weighthigh.org](https://weighthigh.org) and \~20% of traffic to [weighlow.org](https://weighlow.org) + +### 5.12. The XForwarded Remote Addr Route Predicate Factory + +The `XForwarded Remote Addr` route predicate factory takes a list (min size 1) of `sources`, which are CIDR-notation (IPv4 or IPv6) strings, such as `192.168.0.1/16` (where `192.168.0.1` is an IP address and `16` is a subnet mask). + +This route predicate allows requests to be filtered based on the `X-Forwarded-For` HTTP header. + +This can be used with reverse proxies such as load balancers or web application firewalls where +the request should only be allowed if it comes from a trusted list of IP addresses used by those +reverse proxies. + +The following example configures a XForwardedRemoteAddr route predicate: + +Example 13. application.yml + +``` +spring: + cloud: + gateway: + routes: + - id: xforwarded_remoteaddr_route + uri: https://example.org + predicates: + - XForwardedRemoteAddr=192.168.1.1/24 +``` + +This route matches if the `X-Forwarded-For` header contains, for example, `192.168.1.10`. + +## 6. `GatewayFilter` Factories + +Route filters allow the modification of the incoming HTTP request or outgoing HTTP response in some manner. +Route filters are scoped to a particular route. +Spring Cloud Gateway includes many built-in GatewayFilter Factories. + +| |For more detailed examples of how to use any of the following filters, take a look at the [unit tests](https://github.com/spring-cloud/spring-cloud-gateway/tree/master/spring-cloud-gateway-server/src/test/java/org/springframework/cloud/gateway/filter/factory).| +|---|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +### 6.1. The `AddRequestHeader` `GatewayFilter` Factory + +The `AddRequestHeader` `GatewayFilter` factory takes a `name` and `value` parameter. +The following example configures an `AddRequestHeader` `GatewayFilter`: + +Example 14. application.yml + +``` +spring: + cloud: + gateway: + routes: + - id: add_request_header_route + uri: https://example.org + filters: + - AddRequestHeader=X-Request-red, blue +``` + +This listing adds `X-Request-red:blue` header to the downstream request’s headers for all matching requests. + +`AddRequestHeader` is aware of the URI variables used to match a path or host. +URI variables may be used in the value and are expanded at runtime. +The following example configures an `AddRequestHeader` `GatewayFilter` that uses a variable: + +Example 15. application.yml + +``` +spring: + cloud: + gateway: + routes: + - id: add_request_header_route + uri: https://example.org + predicates: + - Path=/red/{segment} + filters: + - AddRequestHeader=X-Request-Red, Blue-{segment} +``` + +### 6.2. The `AddRequestParameter` `GatewayFilter` Factory + +The `AddRequestParameter` `GatewayFilter` Factory takes a `name` and `value` parameter. +The following example configures an `AddRequestParameter` `GatewayFilter`: + +Example 16. application.yml + +``` +spring: + cloud: + gateway: + routes: + - id: add_request_parameter_route + uri: https://example.org + filters: + - AddRequestParameter=red, blue +``` + +This will add `red=blue` to the downstream request’s query string for all matching requests. + +`AddRequestParameter` is aware of the URI variables used to match a path or host. +URI variables may be used in the value and are expanded at runtime. +The following example configures an `AddRequestParameter` `GatewayFilter` that uses a variable: + +Example 17. application.yml + +``` +spring: + cloud: + gateway: + routes: + - id: add_request_parameter_route + uri: https://example.org + predicates: + - Host: {segment}.myhost.org + filters: + - AddRequestParameter=foo, bar-{segment} +``` + +### 6.3. The `AddResponseHeader` `GatewayFilter` Factory + +The `AddResponseHeader` `GatewayFilter` Factory takes a `name` and `value` parameter. +The following example configures an `AddResponseHeader` `GatewayFilter`: + +Example 18. application.yml + +``` +spring: + cloud: + gateway: + routes: + - id: add_response_header_route + uri: https://example.org + filters: + - AddResponseHeader=X-Response-Red, Blue +``` + +This adds `X-Response-Red:Blue` header to the downstream response’s headers for all matching requests. + +`AddResponseHeader` is aware of URI variables used to match a path or host. +URI variables may be used in the value and are expanded at runtime. +The following example configures an `AddResponseHeader` `GatewayFilter` that uses a variable: + +Example 19. application.yml + +``` +spring: + cloud: + gateway: + routes: + - id: add_response_header_route + uri: https://example.org + predicates: + - Host: {segment}.myhost.org + filters: + - AddResponseHeader=foo, bar-{segment} +``` + +### 6.4. The `DedupeResponseHeader` `GatewayFilter` Factory + +The DedupeResponseHeader GatewayFilter factory takes a `name` parameter and an optional `strategy` parameter. `name` can contain a space-separated list of header names. +The following example configures a `DedupeResponseHeader` `GatewayFilter`: + +Example 20. application.yml + +``` +spring: + cloud: + gateway: + routes: + - id: dedupe_response_header_route + uri: https://example.org + filters: + - DedupeResponseHeader=Access-Control-Allow-Credentials Access-Control-Allow-Origin +``` + +This removes duplicate values of `Access-Control-Allow-Credentials` and `Access-Control-Allow-Origin` response headers in cases when both the gateway CORS logic and the downstream logic add them. + +The `DedupeResponseHeader` filter also accepts an optional `strategy` parameter. +The accepted values are `RETAIN_FIRST` (default), `RETAIN_LAST`, and `RETAIN_UNIQUE`. + +### 6.5. Spring Cloud CircuitBreaker GatewayFilter Factory + +The Spring Cloud CircuitBreaker GatewayFilter factory uses the Spring Cloud CircuitBreaker APIs to wrap Gateway routes in +a circuit breaker. Spring Cloud CircuitBreaker supports multiple libraries that can be used with Spring Cloud Gateway. Spring Cloud supports Resilience4J out of the box. + +To enable the Spring Cloud CircuitBreaker filter, you need to place `spring-cloud-starter-circuitbreaker-reactor-resilience4j` on the classpath. +The following example configures a Spring Cloud CircuitBreaker `GatewayFilter`: + +Example 21. application.yml + +``` +spring: + cloud: + gateway: + routes: + - id: circuitbreaker_route + uri: https://example.org + filters: + - CircuitBreaker=myCircuitBreaker +``` + +To configure the circuit breaker, see the configuration for the underlying circuit breaker implementation you are using. + +* [Resilience4J Documentation](https://cloud.spring.io/spring-cloud-circuitbreaker/reference/html/spring-cloud-circuitbreaker.html) + +The Spring Cloud CircuitBreaker filter can also accept an optional `fallbackUri` parameter. +Currently, only `forward:` schemed URIs are supported. +If the fallback is called, the request is forwarded to the controller matched by the URI. +The following example configures such a fallback: + +Example 22. application.yml + +``` +spring: + cloud: + gateway: + routes: + - id: circuitbreaker_route + uri: lb://backing-service:8088 + predicates: + - Path=/consumingServiceEndpoint + filters: + - name: CircuitBreaker + args: + name: myCircuitBreaker + fallbackUri: forward:/inCaseOfFailureUseThis + - RewritePath=/consumingServiceEndpoint, /backingServiceEndpoint +``` + +The following listing does the same thing in Java: + +Example 23. Application.java + +``` +@Bean +public RouteLocator routes(RouteLocatorBuilder builder) { + return builder.routes() + .route("circuitbreaker_route", r -> r.path("/consumingServiceEndpoint") + .filters(f -> f.circuitBreaker(c -> c.name("myCircuitBreaker").fallbackUri("forward:/inCaseOfFailureUseThis")) + .rewritePath("/consumingServiceEndpoint", "/backingServiceEndpoint")).uri("lb://backing-service:8088") + .build(); +} +``` + +This example forwards to the `/inCaseofFailureUseThis` URI when the circuit breaker fallback is called. +Note that this example also demonstrates the (optional) Spring Cloud LoadBalancer load-balancing (defined by the `lb` prefix on the destination URI). + +The primary scenario is to use the `fallbackUri` to define an internal controller or handler within the gateway application. +However, you can also reroute the request to a controller or handler in an external application, as follows: + +Example 24. application.yml + +``` +spring: + cloud: + gateway: + routes: + - id: ingredients + uri: lb://ingredients + predicates: + - Path=//ingredients/** + filters: + - name: CircuitBreaker + args: + name: fetchIngredients + fallbackUri: forward:/fallback + - id: ingredients-fallback + uri: http://localhost:9994 + predicates: + - Path=/fallback +``` + +In this example, there is no `fallback` endpoint or handler in the gateway application. +However, there is one in another application, registered under `[localhost:9994](http://localhost:9994)`. + +In case of the request being forwarded to fallback, the Spring Cloud CircuitBreaker Gateway filter also provides the `Throwable` that has caused it. +It is added to the `ServerWebExchange` as the `ServerWebExchangeUtils.CIRCUITBREAKER_EXECUTION_EXCEPTION_ATTR` attribute that can be used when handling the fallback within the gateway application. + +For the external controller/handler scenario, headers can be added with exception details. +You can find more information on doing so in the [FallbackHeaders GatewayFilter Factory section](#fallback-headers). + +#### 6.5.1. Tripping The Circuit Breaker On Status Codes + +In some cases you might want to trip a circuit breaker based on the status code +returned from the route it wraps. The circuit breaker config object takes a list of +status codes that if returned will cause the the circuit breaker to be tripped. When setting the +status codes you want to trip the circuit breaker you can either use a integer with the status code +value or the String representation of the `HttpStatus` enumeration. + +Example 25. application.yml + +``` +spring: + cloud: + gateway: + routes: + - id: circuitbreaker_route + uri: lb://backing-service:8088 + predicates: + - Path=/consumingServiceEndpoint + filters: + - name: CircuitBreaker + args: + name: myCircuitBreaker + fallbackUri: forward:/inCaseOfFailureUseThis + statusCodes: + - 500 + - "NOT_FOUND" +``` + +Example 26. Application.java + +``` +@Bean +public RouteLocator routes(RouteLocatorBuilder builder) { + return builder.routes() + .route("circuitbreaker_route", r -> r.path("/consumingServiceEndpoint") + .filters(f -> f.circuitBreaker(c -> c.name("myCircuitBreaker").fallbackUri("forward:/inCaseOfFailureUseThis").addStatusCode("INTERNAL_SERVER_ERROR")) + .rewritePath("/consumingServiceEndpoint", "/backingServiceEndpoint")).uri("lb://backing-service:8088") + .build(); +} +``` + +### 6.6. The `FallbackHeaders` `GatewayFilter` Factory + +The `FallbackHeaders` factory lets you add Spring Cloud CircuitBreaker execution exception details in the headers of a request forwarded to a `fallbackUri` in an external application, as in the following scenario: + +Example 27. application.yml + +``` +spring: + cloud: + gateway: + routes: + - id: ingredients + uri: lb://ingredients + predicates: + - Path=//ingredients/** + filters: + - name: CircuitBreaker + args: + name: fetchIngredients + fallbackUri: forward:/fallback + - id: ingredients-fallback + uri: http://localhost:9994 + predicates: + - Path=/fallback + filters: + - name: FallbackHeaders + args: + executionExceptionTypeHeaderName: Test-Header +``` + +In this example, after an execution exception occurs while running the circuit breaker, the request is forwarded to the `fallback` endpoint or handler in an application running on `localhost:9994`. +The headers with the exception type, message and (if available) root cause exception type and message are added to that request by the `FallbackHeaders` filter. + +You can overwrite the names of the headers in the configuration by setting the values of the following arguments (shown with their default values): + +* `executionExceptionTypeHeaderName` (`"Execution-Exception-Type"`) + +* `executionExceptionMessageHeaderName` (`"Execution-Exception-Message"`) + +* `rootCauseExceptionTypeHeaderName` (`"Root-Cause-Exception-Type"`) + +* `rootCauseExceptionMessageHeaderName` (`"Root-Cause-Exception-Message"`) + +For more information on circuit breakers and the gateway see the [Spring Cloud CircuitBreaker Factory section](#spring-cloud-circuitbreaker-filter-factory). + +### 6.7. The `MapRequestHeader` `GatewayFilter` Factory + +The `MapRequestHeader` `GatewayFilter` factory takes `fromHeader` and `toHeader` parameters. +It creates a new named header (`toHeader`), and the value is extracted out of an existing named header (`fromHeader`) from the incoming http request. +If the input header does not exist, the filter has no impact. +If the new named header already exists, its values are augmented with the new values. +The following example configures a `MapRequestHeader`: + +Example 28. application.yml + +``` +spring: + cloud: + gateway: + routes: + - id: map_request_header_route + uri: https://example.org + filters: + - MapRequestHeader=Blue, X-Request-Red +``` + +This adds `X-Request-Red:` header to the downstream request with updated values from the incoming HTTP request’s `Blue` header. + +### 6.8. The `PrefixPath` `GatewayFilter` Factory + +The `PrefixPath` `GatewayFilter` factory takes a single `prefix` parameter. +The following example configures a `PrefixPath` `GatewayFilter`: + +Example 29. application.yml + +``` +spring: + cloud: + gateway: + routes: + - id: prefixpath_route + uri: https://example.org + filters: + - PrefixPath=/mypath +``` + +This will prefix `/mypath` to the path of all matching requests. +So a request to `/hello` would be sent to `/mypath/hello`. + +### 6.9. The `PreserveHostHeader` `GatewayFilter` Factory + +The `PreserveHostHeader` `GatewayFilter` factory has no parameters. +This filter sets a request attribute that the routing filter inspects to determine if the original host header should be sent, rather than the host header determined by the HTTP client. +The following example configures a `PreserveHostHeader` `GatewayFilter`: + +Example 30. application.yml + +``` +spring: + cloud: + gateway: + routes: + - id: preserve_host_route + uri: https://example.org + filters: + - PreserveHostHeader +``` + +### 6.10. The `RequestRateLimiter` `GatewayFilter` Factory + +The `RequestRateLimiter` `GatewayFilter` factory uses a `RateLimiter` implementation to determine if the current request is allowed to proceed. If it is not, a status of `HTTP 429 - Too Many Requests` (by default) is returned. + +This filter takes an optional `keyResolver` parameter and parameters specific to the rate limiter (described later in this section). + +`keyResolver` is a bean that implements the `KeyResolver` interface. +In configuration, reference the bean by name using SpEL.`#{@myKeyResolver}` is a SpEL expression that references a bean named `myKeyResolver`. +The following listing shows the `KeyResolver` interface: + +Example 31. KeyResolver.java + +``` +public interface KeyResolver { + Mono resolve(ServerWebExchange exchange); +} +``` + +The `KeyResolver` interface lets pluggable strategies derive the key for limiting requests. +In future milestone releases, there will be some `KeyResolver` implementations. + +The default implementation of `KeyResolver` is the `PrincipalNameKeyResolver`, which retrieves the `Principal` from the `ServerWebExchange` and calls `Principal.getName()`. + +By default, if the `KeyResolver` does not find a key, requests are denied. +You can adjust this behavior by setting the `spring.cloud.gateway.filter.request-rate-limiter.deny-empty-key` (`true` or `false`) and `spring.cloud.gateway.filter.request-rate-limiter.empty-key-status-code` properties. + +| |The `RequestRateLimiter` is not configurable with the "shortcut" notation. The following example below is *invalid*:

Example 32. application.properties

```
# INVALID SHORTCUT CONFIGURATION
spring.cloud.gateway.routes[0].filters[0]=RequestRateLimiter=2, 2, #{@userkeyresolver}
```| +|---|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +#### 6.10.1. The Redis `RateLimiter` + +The Redis implementation is based off of work done at [Stripe](https://stripe.com/blog/rate-limiters). +It requires the use of the `spring-boot-starter-data-redis-reactive` Spring Boot starter. + +The algorithm used is the [Token Bucket Algorithm](https://en.wikipedia.org/wiki/Token_bucket). + +The `redis-rate-limiter.replenishRate` property is how many requests per second you want a user to be allowed to do, without any dropped requests. +This is the rate at which the token bucket is filled. + +The `redis-rate-limiter.burstCapacity` property is the maximum number of requests a user is allowed to do in a single second. +This is the number of tokens the token bucket can hold. +Setting this value to zero blocks all requests. + +The `redis-rate-limiter.requestedTokens` property is how many tokens a request costs. +This is the number of tokens taken from the bucket for each request and defaults to `1`. + +A steady rate is accomplished by setting the same value in `replenishRate` and `burstCapacity`. +Temporary bursts can be allowed by setting `burstCapacity` higher than `replenishRate`. +In this case, the rate limiter needs to be allowed some time between bursts (according to `replenishRate`), as two consecutive bursts will result in dropped requests (`HTTP 429 - Too Many Requests`). +The following listing configures a `redis-rate-limiter`: + +Rate limits bellow `1 request/s` are accomplished by setting `replenishRate` to the wanted number of requests, `requestedTokens` to the timespan in seconds and `burstCapacity` to the product of `replenishRate` and `requestedTokens`, e.g. setting `replenishRate=1`, `requestedTokens=60` and `burstCapacity=60` will result in a limit of `1 request/min`. + +Example 33. application.yml + +``` +spring: + cloud: + gateway: + routes: + - id: requestratelimiter_route + uri: https://example.org + filters: + - name: RequestRateLimiter + args: + redis-rate-limiter.replenishRate: 10 + redis-rate-limiter.burstCapacity: 20 + redis-rate-limiter.requestedTokens: 1 +``` + +The following example configures a KeyResolver in Java: + +Example 34. Config.java + +``` +@Bean +KeyResolver userKeyResolver() { + return exchange -> Mono.just(exchange.getRequest().getQueryParams().getFirst("user")); +} +``` + +This defines a request rate limit of 10 per user. A burst of 20 is allowed, but, in the next second, only 10 requests are available. +The `KeyResolver` is a simple one that gets the `user` request parameter (note that this is not recommended for production). + +You can also define a rate limiter as a bean that implements the `RateLimiter` interface. +In configuration, you can reference the bean by name using SpEL.`#{@myRateLimiter}` is a SpEL expression that references a bean with named `myRateLimiter`. +The following listing defines a rate limiter that uses the `KeyResolver` defined in the previous listing: + +Example 35. application.yml + +``` +spring: + cloud: + gateway: + routes: + - id: requestratelimiter_route + uri: https://example.org + filters: + - name: RequestRateLimiter + args: + rate-limiter: "#{@myRateLimiter}" + key-resolver: "#{@userKeyResolver}" +``` + +### 6.11. The `RedirectTo` `GatewayFilter` Factory + +The `RedirectTo` `GatewayFilter` factory takes two parameters, `status` and `url`. +The `status` parameter should be a 300 series redirect HTTP code, such as 301. +The `url` parameter should be a valid URL. +This is the value of the `Location` header. +For relative redirects, you should use `uri: no://op` as the uri of your route definition. +The following listing configures a `RedirectTo` `GatewayFilter`: + +Example 36. application.yml + +``` +spring: + cloud: + gateway: + routes: + - id: prefixpath_route + uri: https://example.org + filters: + - RedirectTo=302, https://acme.org +``` + +This will send a status 302 with a `Location:https://acme.org` header to perform a redirect. + +### 6.12. The `RemoveRequestHeader` GatewayFilter Factory + +The `RemoveRequestHeader` `GatewayFilter` factory takes a `name` parameter. +It is the name of the header to be removed. +The following listing configures a `RemoveRequestHeader` `GatewayFilter`: + +Example 37. application.yml + +``` +spring: + cloud: + gateway: + routes: + - id: removerequestheader_route + uri: https://example.org + filters: + - RemoveRequestHeader=X-Request-Foo +``` + +This removes the `X-Request-Foo` header before it is sent downstream. + +### 6.13. `RemoveResponseHeader` `GatewayFilter` Factory + +The `RemoveResponseHeader` `GatewayFilter` factory takes a `name` parameter. +It is the name of the header to be removed. +The following listing configures a `RemoveResponseHeader` `GatewayFilter`: + +Example 38. application.yml + +``` +spring: + cloud: + gateway: + routes: + - id: removeresponseheader_route + uri: https://example.org + filters: + - RemoveResponseHeader=X-Response-Foo +``` + +This will remove the `X-Response-Foo` header from the response before it is returned to the gateway client. + +To remove any kind of sensitive header, you should configure this filter for any routes for which you may want to do so. +In addition, you can configure this filter once by using `spring.cloud.gateway.default-filters` and have it applied to all routes. + +### 6.14. The `RemoveRequestParameter` `GatewayFilter` Factory + +The `RemoveRequestParameter` `GatewayFilter` factory takes a `name` parameter. +It is the name of the query parameter to be removed. +The following example configures a `RemoveRequestParameter` `GatewayFilter`: + +Example 39. application.yml + +``` +spring: + cloud: + gateway: + routes: + - id: removerequestparameter_route + uri: https://example.org + filters: + - RemoveRequestParameter=red +``` + +This will remove the `red` parameter before it is sent downstream. + +### 6.15. The `RewritePath` `GatewayFilter` Factory + +The `RewritePath` `GatewayFilter` factory takes a path `regexp` parameter and a `replacement` parameter. +This uses Java regular expressions for a flexible way to rewrite the request path. +The following listing configures a `RewritePath` `GatewayFilter`: + +Example 40. application.yml + +``` +spring: + cloud: + gateway: + routes: + - id: rewritepath_route + uri: https://example.org + predicates: + - Path=/red/** + filters: + - RewritePath=/red/?(?.*), /$\{segment} +``` + +For a request path of `/red/blue`, this sets the path to `/blue` before making the downstream request. Note that the `$` should be replaced with `$\` because of the YAML specification. + +### 6.16. `RewriteLocationResponseHeader` `GatewayFilter` Factory + +The `RewriteLocationResponseHeader` `GatewayFilter` factory modifies the value of the `Location` response header, usually to get rid of backend-specific details. +It takes `stripVersionMode`, `locationHeaderName`, `hostValue`, and `protocolsRegex` parameters. +The following listing configures a `RewriteLocationResponseHeader` `GatewayFilter`: + +Example 41. application.yml + +``` +spring: + cloud: + gateway: + routes: + - id: rewritelocationresponseheader_route + uri: http://example.org + filters: + - RewriteLocationResponseHeader=AS_IN_REQUEST, Location, , +``` + +For example, for a request of `POST [api.example.com/some/object/name](https://api.example.com/some/object/name)`, the `Location` response header value of `[object-service.prod.example.net/v2/some/object/id](https://object-service.prod.example.net/v2/some/object/id)` is rewritten as `[api.example.com/some/object/id](https://api.example.com/some/object/id)`. + +The `stripVersionMode` parameter has the following possible values: `NEVER_STRIP`, `AS_IN_REQUEST` (default), and `ALWAYS_STRIP`. + +* `NEVER_STRIP`: The version is not stripped, even if the original request path contains no version. + +* `AS_IN_REQUEST` The version is stripped only if the original request path contains no version. + +* `ALWAYS_STRIP` The version is always stripped, even if the original request path contains version. + +The `hostValue` parameter, if provided, is used to replace the `host:port` portion of the response `Location` header. +If it is not provided, the value of the `Host` request header is used. + +The `protocolsRegex` parameter must be a valid regex `String`, against which the protocol name is matched. +If it is not matched, the filter does nothing. +The default is `http|https|ftp|ftps`. + +### 6.17. The `RewriteResponseHeader` `GatewayFilter` Factory + +The `RewriteResponseHeader` `GatewayFilter` factory takes `name`, `regexp`, and `replacement` parameters. +It uses Java regular expressions for a flexible way to rewrite the response header value. +The following example configures a `RewriteResponseHeader` `GatewayFilter`: + +Example 42. application.yml + +``` +spring: + cloud: + gateway: + routes: + - id: rewriteresponseheader_route + uri: https://example.org + filters: + - RewriteResponseHeader=X-Response-Red, , password=[^&]+, password=*** +``` + +For a header value of `/42?user=ford&password=omg!what&flag=true`, it is set to `/42?user=ford&password=***&flag=true` after making the downstream request. +You must use `$\` to mean `$` because of the YAML specification. + +### 6.18. The `SaveSession` `GatewayFilter` Factory + +The `SaveSession` `GatewayFilter` factory forces a `WebSession::save` operation *before* forwarding the call downstream. +This is of particular use when using something like [Spring Session](https://projects.spring.io/spring-session/) with a lazy data store and you need to ensure the session state has been saved before making the forwarded call. +The following example configures a `SaveSession` `GatewayFilter`: + +Example 43. application.yml + +``` +spring: + cloud: + gateway: + routes: + - id: save_session + uri: https://example.org + predicates: + - Path=/foo/** + filters: + - SaveSession +``` + +If you integrate [Spring Security](https://projects.spring.io/spring-security/) with Spring Session and want to ensure security details have been forwarded to the remote process, this is critical. + +### 6.19. The `SecureHeaders` `GatewayFilter` Factory + +The `SecureHeaders` `GatewayFilter` factory adds a number of headers to the response, per the recommendation made in [this blog post](https://blog.appcanary.com/2017/http-security-headers.html). + +The following headers (shown with their default values) are added: + +* `X-Xss-Protection:1 (mode=block`) + +* `Strict-Transport-Security (max-age=631138519`) + +* `X-Frame-Options (DENY)` + +* `X-Content-Type-Options (nosniff)` + +* `Referrer-Policy (no-referrer)` + +* `Content-Security-Policy (default-src 'self' https:; font-src 'self' https: data:; img-src 'self' https: data:; object-src 'none'; script-src https:; style-src 'self' https: 'unsafe-inline)'` + +* `X-Download-Options (noopen)` + +* `X-Permitted-Cross-Domain-Policies (none)` + +To change the default values, set the appropriate property in the `spring.cloud.gateway.filter.secure-headers` namespace. +The following properties are available: + +* `xss-protection-header` + +* `strict-transport-security` + +* `x-frame-options` + +* `x-content-type-options` + +* `referrer-policy` + +* `content-security-policy` + +* `x-download-options` + +* `x-permitted-cross-domain-policies` + +To disable the default values set the `spring.cloud.gateway.filter.secure-headers.disable` property with comma-separated values. +The following example shows how to do so: + +``` +spring.cloud.gateway.filter.secure-headers.disable=x-frame-options,strict-transport-security +``` + +| |The lowercase full name of the secure header needs to be used to disable it..| +|---|-----------------------------------------------------------------------------| + +### 6.20. The `SetPath` `GatewayFilter` Factory + +The `SetPath` `GatewayFilter` factory takes a path `template` parameter. +It offers a simple way to manipulate the request path by allowing templated segments of the path. +This uses the URI templates from Spring Framework. +Multiple matching segments are allowed. +The following example configures a `SetPath` `GatewayFilter`: + +Example 44. application.yml + +``` +spring: + cloud: + gateway: + routes: + - id: setpath_route + uri: https://example.org + predicates: + - Path=/red/{segment} + filters: + - SetPath=/{segment} +``` + +For a request path of `/red/blue`, this sets the path to `/blue` before making the downstream request. + +### 6.21. The `SetRequestHeader` `GatewayFilter` Factory + +The `SetRequestHeader` `GatewayFilter` factory takes `name` and `value` parameters. +The following listing configures a `SetRequestHeader` `GatewayFilter`: + +Example 45. application.yml + +``` +spring: + cloud: + gateway: + routes: + - id: setrequestheader_route + uri: https://example.org + filters: + - SetRequestHeader=X-Request-Red, Blue +``` + +This `GatewayFilter` replaces (rather than adding) all headers with the given name. +So, if the downstream server responded with a `X-Request-Red:1234`, this would be replaced with `X-Request-Red:Blue`, which is what the downstream service would receive. + +`SetRequestHeader` is aware of URI variables used to match a path or host. +URI variables may be used in the value and are expanded at runtime. +The following example configures an `SetRequestHeader` `GatewayFilter` that uses a variable: + +Example 46. application.yml + +``` +spring: + cloud: + gateway: + routes: + - id: setrequestheader_route + uri: https://example.org + predicates: + - Host: {segment}.myhost.org + filters: + - SetRequestHeader=foo, bar-{segment} +``` + +### 6.22. The `SetResponseHeader` `GatewayFilter` Factory + +The `SetResponseHeader` `GatewayFilter` factory takes `name` and `value` parameters. +The following listing configures a `SetResponseHeader` `GatewayFilter`: + +Example 47. application.yml + +``` +spring: + cloud: + gateway: + routes: + - id: setresponseheader_route + uri: https://example.org + filters: + - SetResponseHeader=X-Response-Red, Blue +``` + +This GatewayFilter replaces (rather than adding) all headers with the given name. +So, if the downstream server responded with a `X-Response-Red:1234`, this is replaced with `X-Response-Red:Blue`, which is what the gateway client would receive. + +`SetResponseHeader` is aware of URI variables used to match a path or host. +URI variables may be used in the value and will be expanded at runtime. +The following example configures an `SetResponseHeader` `GatewayFilter` that uses a variable: + +Example 48. application.yml + +``` +spring: + cloud: + gateway: + routes: + - id: setresponseheader_route + uri: https://example.org + predicates: + - Host: {segment}.myhost.org + filters: + - SetResponseHeader=foo, bar-{segment} +``` + +### 6.23. The `SetStatus` `GatewayFilter` Factory + +The `SetStatus` `GatewayFilter` factory takes a single parameter, `status`. +It must be a valid Spring `HttpStatus`. +It may be the integer value `404` or the string representation of the enumeration: `NOT_FOUND`. +The following listing configures a `SetStatus` `GatewayFilter`: + +Example 49. application.yml + +``` +spring: + cloud: + gateway: + routes: + - id: setstatusstring_route + uri: https://example.org + filters: + - SetStatus=UNAUTHORIZED + - id: setstatusint_route + uri: https://example.org + filters: + - SetStatus=401 +``` + +In either case, the HTTP status of the response is set to 401. + +You can configure the `SetStatus` `GatewayFilter` to return the original HTTP status code from the proxied request in a header in the response. +The header is added to the response if configured with the following property: + +Example 50. application.yml + +``` +spring: + cloud: + gateway: + set-status: + original-status-header-name: original-http-status +``` + +### 6.24. The `StripPrefix` `GatewayFilter` Factory + +The `StripPrefix` `GatewayFilter` factory takes one parameter, `parts`. +The `parts` parameter indicates the number of parts in the path to strip from the request before sending it downstream. +The following listing configures a `StripPrefix` `GatewayFilter`: + +Example 51. application.yml + +``` +spring: + cloud: + gateway: + routes: + - id: nameRoot + uri: https://nameservice + predicates: + - Path=/name/** + filters: + - StripPrefix=2 +``` + +When a request is made through the gateway to `/name/blue/red`, the request made to `nameservice` looks like `[nameservice/red](https://nameservice/red)`. + +### 6.25. The Retry `GatewayFilter` Factory + +The `Retry` `GatewayFilter` factory supports the following parameters: + +* `retries`: The number of retries that should be attempted. + +* `statuses`: The HTTP status codes that should be retried, represented by using `org.springframework.http.HttpStatus`. + +* `methods`: The HTTP methods that should be retried, represented by using `org.springframework.http.HttpMethod`. + +* `series`: The series of status codes to be retried, represented by using `org.springframework.http.HttpStatus.Series`. + +* `exceptions`: A list of thrown exceptions that should be retried. + +* `backoff`: The configured exponential backoff for the retries. + Retries are performed after a backoff interval of `firstBackoff * (factor ^ n)`, where `n` is the iteration. + If `maxBackoff` is configured, the maximum backoff applied is limited to `maxBackoff`. + If `basedOnPreviousValue` is true, the backoff is calculated byusing `prevBackoff * factor`. + +The following defaults are configured for `Retry` filter, if enabled: + +* `retries`: Three times + +* `series`: 5XX series + +* `methods`: GET method + +* `exceptions`: `IOException` and `TimeoutException` + +* `backoff`: disabled + +The following listing configures a Retry `GatewayFilter`: + +Example 52. application.yml + +``` +spring: + cloud: + gateway: + routes: + - id: retry_test + uri: http://localhost:8080/flakey + predicates: + - Host=*.retry.com + filters: + - name: Retry + args: + retries: 3 + statuses: BAD_GATEWAY + methods: GET,POST + backoff: + firstBackoff: 10ms + maxBackoff: 50ms + factor: 2 + basedOnPreviousValue: false +``` + +| |When using the retry filter with a `forward:` prefixed URL, the target endpoint should be written carefully so that, in case of an error, it does not do anything that could result in a response being sent to the client and committed.
For example, if the target endpoint is an annotated controller, the target controller method should not return `ResponseEntity` with an error status code.
Instead, it should throw an `Exception` or signal an error (for example, through a `Mono.error(ex)` return value), which the retry filter can be configured to handle by retrying.| +|---|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +| |When using the retry filter with any HTTP method with a body, the body will be cached and the gateway will become memory constrained. The body is cached in a request attribute defined by `ServerWebExchangeUtils.CACHED_REQUEST_BODY_ATTR`. The type of the object is a `org.springframework.core.io.buffer.DataBuffer`.| +|---|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +A simplified "shortcut" notation can be added with a single `status` and `method`. + +The following two examples are equivalent: + +Example 53. application.yml + +``` +spring: + cloud: + gateway: + routes: + - id: retry_route + uri: https://example.org + filters: + - name: Retry + args: + retries: 3 + statuses: INTERNAL_SERVER_ERROR + methods: GET + backoff: + firstBackoff: 10ms + maxBackoff: 50ms + factor: 2 + basedOnPreviousValue: false + + - id: retryshortcut_route + uri: https://example.org + filters: + - Retry=3,INTERNAL_SERVER_ERROR,GET,10ms,50ms,2,false +``` + +### 6.26. The `RequestSize` `GatewayFilter` Factory + +When the request size is greater than the permissible limit, the `RequestSize` `GatewayFilter` factory can restrict a request from reaching the downstream service. +The filter takes a `maxSize` parameter. +The `maxSize` is a `DataSize` type, so values can be defined as a number followed by an optional `DataUnit` suffix such as 'KB' or 'MB'. The default is 'B' for bytes. +It is the permissible size limit of the request defined in bytes. +The following listing configures a `RequestSize` `GatewayFilter`: + +Example 54. application.yml + +``` +spring: + cloud: + gateway: + routes: + - id: request_size_route + uri: http://localhost:8080/upload + predicates: + - Path=/upload + filters: + - name: RequestSize + args: + maxSize: 5000000 +``` + +The `RequestSize` `GatewayFilter` factory sets the response status as `413 Payload Too Large` with an additional header `errorMessage` when the request is rejected due to size. The following example shows such an `errorMessage`: + +``` +errorMessage : Request size is larger than permissible limit. Request size is 6.0 MB where permissible limit is 5.0 MB +``` + +| |The default request size is set to five MB if not provided as a filter argument in the route definition.| +|---|--------------------------------------------------------------------------------------------------------| + +### 6.27. The `SetRequestHostHeader` `GatewayFilter` Factory + +There are certain situation when the host header may need to be overridden. In this situation, the `SetRequestHostHeader` `GatewayFilter` factory can replace the existing host header with a specified vaue. +The filter takes a `host` parameter. +The following listing configures a `SetRequestHostHeader` `GatewayFilter`: + +Example 55. application.yml + +``` +spring: + cloud: + gateway: + routes: + - id: set_request_host_header_route + uri: http://localhost:8080/headers + predicates: + - Path=/headers + filters: + - name: SetRequestHostHeader + args: + host: example.org +``` + +The `SetRequestHostHeader` `GatewayFilter` factory replaces the value of the host header with `example.org`. + +### 6.28. Modify a Request Body `GatewayFilter` Factory + +You can use the `ModifyRequestBody` filter filter to modify the request body before it is sent downstream by the gateway. + +| |This filter can be configured only by using the Java DSL.| +|---|---------------------------------------------------------| + +The following listing shows how to modify a request body `GatewayFilter`: + +``` +@Bean +public RouteLocator routes(RouteLocatorBuilder builder) { + return builder.routes() + .route("rewrite_request_obj", r -> r.host("*.rewriterequestobj.org") + .filters(f -> f.prefixPath("/httpbin") + .modifyRequestBody(String.class, Hello.class, MediaType.APPLICATION_JSON_VALUE, + (exchange, s) -> return Mono.just(new Hello(s.toUpperCase())))).uri(uri)) + .build(); +} + +static class Hello { + String message; + + public Hello() { } + + public Hello(String message) { + this.message = message; + } + + public String getMessage() { + return message; + } + + public void setMessage(String message) { + this.message = message; + } +} +``` + +| |if the request has no body, the `RewriteFilter` will be passed `null`. `Mono.empty()` should be returned to assign a missing body in the request.| +|---|-------------------------------------------------------------------------------------------------------------------------------------------------| + +### 6.29. Modify a Response Body `GatewayFilter` Factory + +You can use the `ModifyResponseBody` filter to modify the response body before it is sent back to the client. + +| |This filter can be configured only by using the Java DSL.| +|---|---------------------------------------------------------| + +The following listing shows how to modify a response body `GatewayFilter`: + +``` +@Bean +public RouteLocator routes(RouteLocatorBuilder builder) { + return builder.routes() + .route("rewrite_response_upper", r -> r.host("*.rewriteresponseupper.org") + .filters(f -> f.prefixPath("/httpbin") + .modifyResponseBody(String.class, String.class, + (exchange, s) -> Mono.just(s.toUpperCase()))).uri(uri)) + .build(); +} +``` + +| |if the response has no body, the `RewriteFilter` will be passed `null`. `Mono.empty()` should be returned to assign a missing body in the response.| +|---|---------------------------------------------------------------------------------------------------------------------------------------------------| + +### 6.30. Token Relay `GatewayFilter` Factory + +A Token Relay is where an OAuth2 consumer acts as a Client and +forwards the incoming token to outgoing resource requests. The +consumer can be a pure Client (like an SSO application) or a Resource +Server. + +Spring Cloud Gateway can forward OAuth2 access tokens downstream to the services +it is proxying. To add this functionlity to gateway you need to add the`TokenRelayGatewayFilterFactory` like this: + +App.java + +``` +@Bean +public RouteLocator customRouteLocator(RouteLocatorBuilder builder) { + return builder.routes() + .route("resource", r -> r.path("/resource") + .filters(f -> f.tokenRelay()) + .uri("http://localhost:9000")) + .build(); +} +``` + +or this + +application.yaml + +``` +spring: + cloud: + gateway: + routes: + - id: resource + uri: http://localhost:9000 + predicates: + - Path=/resource + filters: + - TokenRelay= +``` + +and it will (in addition to logging the user in and grabbing a token) +pass the authentication token downstream to the services (in this case`/resource`). + +To enable this for Spring Cloud Gateway add the following dependencies + +* `org.springframework.boot:spring-boot-starter-oauth2-client` + +How does it work? The +{githubmaster}/src/main/java/org/springframework/cloud/gateway/security/TokenRelayGatewayFilterFactory.java[filter] +extracts an access token from the currently authenticated user, +and puts it in a request header for the downstream requests. + +For a full working sample see [this project](https://github.com/spring-cloud-samples/sample-gateway-oauth2login). + +| |A `TokenRelayGatewayFilterFactory` bean will only be created if the proper `spring.security.oauth2.client.*` properties are set which will trigger creation of a `ReactiveClientRegistrationRepository` bean.| +|---|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +| |The default implementation of `ReactiveOAuth2AuthorizedClientService` used by `TokenRelayGatewayFilterFactory`uses an in-memory data store. You will need to provide your own implementation `ReactiveOAuth2AuthorizedClientService`if you need a more robust solution.| +|---|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +### 6.31. The `CacheRequestBody` `GatewayFilter` Factory + +There are certain situation need to read body.Since the request body stream can only be read once, we need to cache the request body. +You can use the `CacheRequestBody` filter to cache request body before it send to the downstream and get body from exchagne attribute. + +The following listing shows how to cache the request body `GatewayFilter`: + +``` +@Bean +public RouteLocator routes(RouteLocatorBuilder builder) { + return builder.routes() + .route("cache_request_body_route", r -> r.path("/downstream/**") + .filters(f -> f.prefixPath("/httpbin") + .cacheRequestBody(String.class).uri(uri)) + .build(); +} +``` + +Example 56. application.yml + +``` +spring: + cloud: + gateway: + routes: + - id: cache_request_body_route + uri: lb://downstream + predicates: + - Path=/downstream/** + filters: + - name: CacheRequestBody + args: + bodyClass: java.lang.String +``` + +`CacheRequestBody` will extract request body and conver it to body class (such as `java.lang.String`, defined in the preceding example). then places it in the `ServerWebExchange.getAttributes()` with a key defined in `ServerWebExchangeUtils.CACHED_REQUEST_BODY_ATTR`. + +| |This filter only works with http request (including https).| +|---|-----------------------------------------------------------| + +### 6.32. Default Filters + +To add a filter and apply it to all routes, you can use `spring.cloud.gateway.default-filters`. +This property takes a list of filters. +The following listing defines a set of default filters: + +Example 57. application.yml + +``` +spring: + cloud: + gateway: + default-filters: + - AddResponseHeader=X-Response-Default-Red, Default-Blue + - PrefixPath=/httpbin +``` + +## 7. Global Filters + +The `GlobalFilter` interface has the same signature as `GatewayFilter`. +These are special filters that are conditionally applied to all routes. + +| |This interface and its usage are subject to change in future milestone releases.| +|---|--------------------------------------------------------------------------------| + +### 7.1. Combined Global Filter and `GatewayFilter` Ordering + +When a request matches a route, the filtering web handler adds all instances of `GlobalFilter` and all route-specific instances of `GatewayFilter` to a filter chain. +This combined filter chain is sorted by the `org.springframework.core.Ordered` interface, which you can set by implementing the `getOrder()` method. + +As Spring Cloud Gateway distinguishes between “pre” and “post” phases for filter logic execution (see [How it Works](#gateway-how-it-works)), the filter with the highest precedence is the first in the “pre”-phase and the last in the “post”-phase. + +The following listing configures a filter chain: + +Example 58. ExampleConfiguration.java + +``` +@Bean +public GlobalFilter customFilter() { + return new CustomGlobalFilter(); +} + +public class CustomGlobalFilter implements GlobalFilter, Ordered { + + @Override + public Mono filter(ServerWebExchange exchange, GatewayFilterChain chain) { + log.info("custom global filter"); + return chain.filter(exchange); + } + + @Override + public int getOrder() { + return -1; + } +} +``` + +### 7.2. Forward Routing Filter + +The `ForwardRoutingFilter` looks for a URI in the exchange attribute `ServerWebExchangeUtils.GATEWAY_REQUEST_URL_ATTR`. +If the URL has a `forward` scheme (such as `forward:///localendpoint`), it uses the Spring `DispatcherHandler` to handle the request. +The path part of the request URL is overridden with the path in the forward URL. +The unmodified original URL is appended to the list in the `ServerWebExchangeUtils.GATEWAY_ORIGINAL_REQUEST_URL_ATTR` attribute. + +### 7.3. The `ReactiveLoadBalancerClientFilter` + +The `ReactiveLoadBalancerClientFilter` looks for a URI in the exchange attribute named `ServerWebExchangeUtils.GATEWAY_REQUEST_URL_ATTR`. +If the URL has a `lb` scheme (such as `lb://myservice`), it uses the Spring Cloud `ReactorLoadBalancer` to resolve the name (`myservice` in this example) to an actual host and port and replaces the URI in the same attribute. +The unmodified original URL is appended to the list in the `ServerWebExchangeUtils.GATEWAY_ORIGINAL_REQUEST_URL_ATTR` attribute. +The filter also looks in the `ServerWebExchangeUtils.GATEWAY_SCHEME_PREFIX_ATTR` attribute to see if it equals `lb`. +If so, the same rules apply. +The following listing configures a `ReactiveLoadBalancerClientFilter`: + +Example 59. application.yml + +``` +spring: + cloud: + gateway: + routes: + - id: myRoute + uri: lb://service + predicates: + - Path=/service/** +``` + +| |By default, when a service instance cannot be found by the `ReactorLoadBalancer`, a `503` is returned.
You can configure the gateway to return a `404` by setting `spring.cloud.gateway.loadbalancer.use404=true`.| +|---|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +| |The `isSecure` value of the `ServiceInstance` returned from the `ReactiveLoadBalancerClientFilter` overrides
the scheme specified in the request made to the Gateway.
For example, if the request comes into the Gateway over `HTTPS` but the `ServiceInstance` indicates it is not secure, the downstream request is made over `HTTP`.
The opposite situation can also apply.
However, if `GATEWAY_SCHEME_PREFIX_ATTR` is specified for the route in the Gateway configuration, the prefix is stripped and the resulting scheme from the route URL overrides the `ServiceInstance` configuration.| +|---|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +| |Gateway supports all the LoadBalancer features. You can read more about them in the [Spring Cloud Commons documentation](https://docs.spring.io/spring-cloud-commons/docs/current/reference/html/#spring-cloud-loadbalancer).| +|---|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +### 7.4. The Netty Routing Filter + +The Netty routing filter runs if the URL located in the `ServerWebExchangeUtils.GATEWAY_REQUEST_URL_ATTR` exchange attribute has a `http` or `https` scheme. +It uses the Netty `HttpClient` to make the downstream proxy request. +The response is put in the `ServerWebExchangeUtils.CLIENT_RESPONSE_ATTR` exchange attribute for use in a later filter. +(There is also an experimental `WebClientHttpRoutingFilter` that performs the same function but does not require Netty.) + +### 7.5. The Netty Write Response Filter + +The `NettyWriteResponseFilter` runs if there is a Netty `HttpClientResponse` in the `ServerWebExchangeUtils.CLIENT_RESPONSE_ATTR` exchange attribute. +It runs after all other filters have completed and writes the proxy response back to the gateway client response. +(There is also an experimental `WebClientWriteResponseFilter` that performs the same function but does not require Netty.) + +### 7.6. The `RouteToRequestUrl` Filter + +If there is a `Route` object in the `ServerWebExchangeUtils.GATEWAY_ROUTE_ATTR` exchange attribute, the `RouteToRequestUrlFilter` runs. +It creates a new URI, based off of the request URI but updated with the URI attribute of the `Route` object. +The new URI is placed in the `ServerWebExchangeUtils.GATEWAY_REQUEST_URL_ATTR` exchange attribute. + +If the URI has a scheme prefix, such as `lb:ws://serviceid`, the `lb` scheme is stripped from the URI and placed in the `ServerWebExchangeUtils.GATEWAY_SCHEME_PREFIX_ATTR` for use later in the filter chain. + +### 7.7. The Websocket Routing Filter + +If the URL located in the `ServerWebExchangeUtils.GATEWAY_REQUEST_URL_ATTR` exchange attribute has a `ws` or `wss` scheme, the websocket routing filter runs. It uses the Spring WebSocket infrastructure to forward the websocket request downstream. + +You can load-balance websockets by prefixing the URI with `lb`, such as `lb:ws://serviceid`. + +| |If you use [SockJS](https://github.com/sockjs) as a fallback over normal HTTP, you should configure a normal HTTP route as well as the websocket Route.| +|---|-------------------------------------------------------------------------------------------------------------------------------------------------------| + +The following listing configures a websocket routing filter: + +Example 60. application.yml + +``` +spring: + cloud: + gateway: + routes: + # SockJS route + - id: websocket_sockjs_route + uri: http://localhost:3001 + predicates: + - Path=/websocket/info/** + # Normal Websocket route + - id: websocket_route + uri: ws://localhost:3001 + predicates: + - Path=/websocket/** +``` + +### 7.8. The Gateway Metrics Filter + +To enable gateway metrics, add spring-boot-starter-actuator as a project dependency. Then, by default, the gateway metrics filter runs as long as the property `spring.cloud.gateway.metrics.enabled` is not set to `false`. This filter adds a timer metric named `spring.cloud.gateway.requests` with the following tags: + +* `routeId`: The route ID. + +* `routeUri`: The URI to which the API is routed. + +* `outcome`: The outcome, as classified by [HttpStatus.Series](https://docs.spring.io/spring-framework/docs/current/javadoc-api/org/springframework/http/HttpStatus.Series.html). + +* `status`: The HTTP status of the request returned to the client. + +* `httpStatusCode`: The HTTP Status of the request returned to the client. + +* `httpMethod`: The HTTP method used for the request. + +In addition, through the property `spring.cloud.gateway.metrics.tags.path.enabled` (by default, set to false), you can activate an extra metric with the tag: + +* `path`: Path of the request. + +These metrics are then available to be scraped from `/actuator/metrics/spring.cloud.gateway.requests` and can be easily integrated with Prometheus to create a [Grafana](images/gateway-grafana-dashboard.jpeg) [dashboard](gateway-grafana-dashboard.json). + +| |To enable the prometheus endpoint, add `micrometer-registry-prometheus` as a project dependency.| +|---|------------------------------------------------------------------------------------------------| + +### 7.9. Marking An Exchange As Routed + +After the gateway has routed a `ServerWebExchange`, it marks that exchange as “routed” by adding `gatewayAlreadyRouted`to the exchange attributes. Once a request has been marked as routed, other routing filters will not route the request again, +essentially skipping the filter. There are convenience methods that you can use to mark an exchange as routed +or check if an exchange has already been routed. + +* `ServerWebExchangeUtils.isAlreadyRouted` takes a `ServerWebExchange` object and checks if it has been “routed”. + +* `ServerWebExchangeUtils.setAlreadyRouted` takes a `ServerWebExchange` object and marks it as “routed”. + +## 8. HttpHeadersFilters + +HttpHeadersFilters are applied to requests before sending them downstream, such as in the `NettyRoutingFilter`. + +### 8.1. Forwarded Headers Filter + +The `Forwarded` Headers Filter creates a `Forwarded` header to send to the downstream service. It adds the `Host` header, scheme and port of the current request to any existing `Forwarded` header. + +### 8.2. RemoveHopByHop Headers Filter + +The `RemoveHopByHop` Headers Filter removes headers from forwarded requests. The default list of headers that is removed comes from the [IETF](https://tools.ietf.org/html/draft-ietf-httpbis-p1-messaging-14#section-7.1.3). + +The default removed headers are: + +* Connection + +* Keep-Alive + +* Proxy-Authenticate + +* Proxy-Authorization + +* TE + +* Trailer + +* Transfer-Encoding + +* Upgrade + +To change this, set the `spring.cloud.gateway.filter.remove-hop-by-hop.headers` property to the list of header names to remove. + +### 8.3. XForwarded Headers Filter + +The `XForwarded` Headers Filter creates various a `X-Forwarded-*` headers to send to the downstream service. It users the `Host` header, scheme, port and path of the current request to create the various headers. + +Creating of individual headers can be controlled by the following boolean properties (defaults to true): + +* `spring.cloud.gateway.x-forwarded.for-enabled` + +* `spring.cloud.gateway.x-forwarded.host-enabled` + +* `spring.cloud.gateway.x-forwarded.port-enabled` + +* `spring.cloud.gateway.x-forwarded.proto-enabled` + +* `spring.cloud.gateway.x-forwarded.prefix-enabled` + +Appending multiple headers can be controlled by the following boolean properties (defaults to true): + +* `spring.cloud.gateway.x-forwarded.for-append` + +* `spring.cloud.gateway.x-forwarded.host-append` + +* `spring.cloud.gateway.x-forwarded.port-append` + +* `spring.cloud.gateway.x-forwarded.proto-append` + +* `spring.cloud.gateway.x-forwarded.prefix-append` + +## 9. TLS and SSL + +The gateway can listen for requests on HTTPS by following the usual Spring server configuration. +The following example shows how to do so: + +Example 61. application.yml + +``` +server: + ssl: + enabled: true + key-alias: scg + key-store-password: scg1234 + key-store: classpath:scg-keystore.p12 + key-store-type: PKCS12 +``` + +You can route gateway routes to both HTTP and HTTPS backends. +If you are routing to an HTTPS backend, you can configure the gateway to trust all downstream certificates with the following configuration: + +Example 62. application.yml + +``` +spring: + cloud: + gateway: + httpclient: + ssl: + useInsecureTrustManager: true +``` + +Using an insecure trust manager is not suitable for production. +For a production deployment, you can configure the gateway with a set of known certificates that it can trust with the following configuration: + +Example 63. application.yml + +``` +spring: + cloud: + gateway: + httpclient: + ssl: + trustedX509Certificates: + - cert1.pem + - cert2.pem +``` + +If the Spring Cloud Gateway is not provisioned with trusted certificates, the default trust store is used (which you can override by setting the `javax.net.ssl.trustStore` system property). + +### 9.1. TLS Handshake + +The gateway maintains a client pool that it uses to route to backends. +When communicating over HTTPS, the client initiates a TLS handshake. +A number of timeouts are associated with this handshake. +You can configure these timeouts can be configured (defaults shown) as follows: + +Example 64. application.yml + +``` +spring: + cloud: + gateway: + httpclient: + ssl: + handshake-timeout-millis: 10000 + close-notify-flush-timeout-millis: 3000 + close-notify-read-timeout-millis: 0 +``` + +## 10. Configuration + +Configuration for Spring Cloud Gateway is driven by a collection of `RouteDefinitionLocator` instances. +The following listing shows the definition of the `RouteDefinitionLocator` interface: + +Example 65. RouteDefinitionLocator.java + +``` +public interface RouteDefinitionLocator { + Flux getRouteDefinitions(); +} +``` + +By default, a `PropertiesRouteDefinitionLocator` loads properties by using Spring Boot’s `@ConfigurationProperties` mechanism. + +The earlier configuration examples all use a shortcut notation that uses positional arguments rather than named ones. +The following two examples are equivalent: + +Example 66. application.yml + +``` +spring: + cloud: + gateway: + routes: + - id: setstatus_route + uri: https://example.org + filters: + - name: SetStatus + args: + status: 401 + - id: setstatusshortcut_route + uri: https://example.org + filters: + - SetStatus=401 +``` + +For some usages of the gateway, properties are adequate, but some production use cases benefit from loading configuration from an external source, such as a database. Future milestone versions will have `RouteDefinitionLocator` implementations based off of Spring Data Repositories, such as Redis, MongoDB, and Cassandra. + +### 10.1. RouteDefinition Metrics + +To enable `RouteDefinition` metrics, add spring-boot-starter-actuator as a project dependency. Then, by default, the metrics will be available as long as the property `spring.cloud.gateway.metrics.enabled` is set to `true`. A gauge metric named `spring.cloud.gateway.routes.count` will be added, whose value is the number of `RouteDefinitions`. This metric will be available from `/actuator/metrics/spring.cloud.gateway.routes.count`. + +## 11. Route Metadata Configuration + +You can configure additional parameters for each route by using metadata, as follows: + +Example 67. application.yml + +``` +spring: + cloud: + gateway: + routes: + - id: route_with_metadata + uri: https://example.org + metadata: + optionName: "OptionValue" + compositeObject: + name: "value" + iAmNumber: 1 +``` + +You could acquire all metadata properties from an exchange, as follows: + +``` +Route route = exchange.getAttribute(GATEWAY_ROUTE_ATTR); +// get all metadata properties +route.getMetadata(); +// get a single metadata property +route.getMetadata(someKey); +``` + +## 12. Http timeouts configuration + +Http timeouts (response and connect) can be configured for all routes and overridden for each specific route. + +### 12.1. Global timeouts + +To configure Global http timeouts: +`connect-timeout` must be specified in milliseconds. +`response-timeout` must be specified as a java.time.Duration + +global http timeouts example + +``` +spring: + cloud: + gateway: + httpclient: + connect-timeout: 1000 + response-timeout: 5s +``` + +### 12.2. Per-route timeouts + +To configure per-route timeouts: +`connect-timeout` must be specified in milliseconds. +`response-timeout` must be specified in milliseconds. + +per-route http timeouts configuration via configuration + +``` + - id: per_route_timeouts + uri: https://example.org + predicates: + - name: Path + args: + pattern: /delay/{timeout} + metadata: + response-timeout: 200 + connect-timeout: 200 +``` + +per-route timeouts configuration using Java DSL + +``` +import static org.springframework.cloud.gateway.support.RouteMetadataUtils.CONNECT_TIMEOUT_ATTR; +import static org.springframework.cloud.gateway.support.RouteMetadataUtils.RESPONSE_TIMEOUT_ATTR; + + @Bean + public RouteLocator customRouteLocator(RouteLocatorBuilder routeBuilder){ + return routeBuilder.routes() + .route("test1", r -> { + return r.host("*.somehost.org").and().path("/somepath") + .filters(f -> f.addRequestHeader("header1", "header-value-1")) + .uri("http://someuri") + .metadata(RESPONSE_TIMEOUT_ATTR, 200) + .metadata(CONNECT_TIMEOUT_ATTR, 200); + }) + .build(); + } +``` + +A per-route `response-timeout` with a negative value will disable the global `response-timeout` value. + +``` + - id: per_route_timeouts + uri: https://example.org + predicates: + - name: Path + args: + pattern: /delay/{timeout} + metadata: + response-timeout: -1 +``` + +### 12.3. Fluent Java Routes API + +To allow for simple configuration in Java, the `RouteLocatorBuilder` bean includes a fluent API. +The following listing shows how it works: + +Example 68. GatewaySampleApplication.java + +``` +// static imports from GatewayFilters and RoutePredicates +@Bean +public RouteLocator customRouteLocator(RouteLocatorBuilder builder, ThrottleGatewayFilterFactory throttle) { + return builder.routes() + .route(r -> r.host("**.abc.org").and().path("/image/png") + .filters(f -> + f.addResponseHeader("X-TestHeader", "foobar")) + .uri("http://httpbin.org:80") + ) + .route(r -> r.path("/image/webp") + .filters(f -> + f.addResponseHeader("X-AnotherHeader", "baz")) + .uri("http://httpbin.org:80") + .metadata("key", "value") + ) + .route(r -> r.order(-1) + .host("**.throttle.org").and().path("/get") + .filters(f -> f.filter(throttle.apply(1, + 1, + 10, + TimeUnit.SECONDS))) + .uri("http://httpbin.org:80") + .metadata("key", "value") + ) + .build(); +} +``` + +This style also allows for more custom predicate assertions. +The predicates defined by `RouteDefinitionLocator` beans are combined using logical `and`. +By using the fluent Java API, you can use the `and()`, `or()`, and `negate()` operators on the `Predicate` class. + +### 12.4. The `DiscoveryClient` Route Definition Locator + +You can configure the gateway to create routes based on services registered with a `DiscoveryClient` compatible service registry. + +To enable this, set `spring.cloud.gateway.discovery.locator.enabled=true` and make sure a `DiscoveryClient` implementation (such as Netflix Eureka, Consul, or Zookeeper) is on the classpath and enabled. + +#### 12.4.1. Configuring Predicates and Filters For `DiscoveryClient` Routes + +By default, the gateway defines a single predicate and filter for routes created with a `DiscoveryClient`. + +The default predicate is a path predicate defined with the pattern `/serviceId/**`, where `serviceId` is +the ID of the service from the `DiscoveryClient`. + +The default filter is a rewrite path filter with the regex `/serviceId/?(?.*)` and the replacement `/${remaining}`. +This strips the service ID from the path before the request is sent downstream. + +If you want to customize the predicates or filters used by the `DiscoveryClient` routes, set `spring.cloud.gateway.discovery.locator.predicates[x]` and `spring.cloud.gateway.discovery.locator.filters[y]`. +When doing so, you need to make sure to include the default predicate and filter shown earlier, if you want to retain that functionality. +The following example shows what this looks like: + +Example 69. application.properties + +``` +spring.cloud.gateway.discovery.locator.predicates[0].name: Path +spring.cloud.gateway.discovery.locator.predicates[0].args[pattern]: "'/'+serviceId+'/**'" +spring.cloud.gateway.discovery.locator.predicates[1].name: Host +spring.cloud.gateway.discovery.locator.predicates[1].args[pattern]: "'**.foo.com'" +spring.cloud.gateway.discovery.locator.filters[0].name: CircuitBreaker +spring.cloud.gateway.discovery.locator.filters[0].args[name]: serviceId +spring.cloud.gateway.discovery.locator.filters[1].name: RewritePath +spring.cloud.gateway.discovery.locator.filters[1].args[regexp]: "'/' + serviceId + '/?(?.*)'" +spring.cloud.gateway.discovery.locator.filters[1].args[replacement]: "'/${remaining}'" +``` + +## 13. Reactor Netty Access Logs + +To enable Reactor Netty access logs, set `-Dreactor.netty.http.server.accessLogEnabled=true`. + +| |It must be a Java System Property, not a Spring Boot property.| +|---|--------------------------------------------------------------| + +You can configure the logging system to have a separate access log file. The following example creates a Logback configuration: + +Example 70. logback.xml + +``` + + access_log.log + + %msg%n + + + + + + + + + +``` + +## 14. CORS Configuration + +You can configure the gateway to control CORS behavior. The “global” CORS configuration is a map of URL patterns to [Spring Framework `CorsConfiguration`](https://docs.spring.io/spring/docs/5.0.x/javadoc-api/org/springframework/web/cors/CorsConfiguration.html). +The following example configures CORS: + +Example 71. application.yml + +``` +spring: + cloud: + gateway: + globalcors: + cors-configurations: + '[/**]': + allowedOrigins: "https://docs.spring.io" + allowedMethods: + - GET +``` + +In the preceding example, CORS requests are allowed from requests that originate from `docs.spring.io` for all GET requested paths. + +To provide the same CORS configuration to requests that are not handled by some gateway route predicate, set the `spring.cloud.gateway.globalcors.add-to-simple-url-handler-mapping` property to `true`. +This is useful when you try to support CORS preflight requests and your route predicate does not evalute to `true` because the HTTP method is `options`. + +## 15. Actuator API + +The `/gateway` actuator endpoint lets you monitor and interact with a Spring Cloud Gateway application. +To be remotely accessible, the endpoint has to be [enabled](https://docs.spring.io/spring-boot/docs/current/reference/html/production-ready-endpoints.html#production-ready-endpoints-enabling-endpoints) and [exposed over HTTP or JMX](https://docs.spring.io/spring-boot/docs/current/reference/html/production-ready-endpoints.html#production-ready-endpoints-exposing-endpoints) in the application properties. +The following listing shows how to do so: + +Example 72. application.properties + +``` +management.endpoint.gateway.enabled=true # default value +management.endpoints.web.exposure.include=gateway +``` + +### 15.1. Verbose Actuator Format + +A new, more verbose format has been added to Spring Cloud Gateway. +It adds more detail to each route, letting you view the predicates and filters associated with each route along with any configuration that is available. +The following example configures `/actuator/gateway/routes`: + +``` +[ + { + "predicate": "(Hosts: [**.addrequestheader.org] && Paths: [/headers], match trailing slash: true)", + "route_id": "add_request_header_test", + "filters": [ + "[[AddResponseHeader X-Response-Default-Foo = 'Default-Bar'], order = 1]", + "[[AddRequestHeader X-Request-Foo = 'Bar'], order = 1]", + "[[PrefixPath prefix = '/httpbin'], order = 2]" + ], + "uri": "lb://testservice", + "order": 0 + } +] +``` + +This feature is enabled by default. To disable it, set the following property: + +Example 73. application.properties + +``` +spring.cloud.gateway.actuator.verbose.enabled=false +``` + +This will default to `true` in a future release. + +### 15.2. Retrieving Route Filters + +This section details how to retrieve route filters, including: + +* [Global Filters](#gateway-global-filters) + +* [[gateway-route-filters]](#gateway-route-filters) + +#### 15.2.1. Global Filters + +To retrieve the [global filters](#global-filters) applied to all routes, make a `GET` request to `/actuator/gateway/globalfilters`. The resulting response is similar to the following: + +``` +{ + "org.spring[email protected]77856cc5": 10100, + "o[email protected]4f6fd101": 10000, + "or[email protected]32d22650": -1, + "[email protected]6459d9": 2147483647, + "[email protected]5e0": 2147483647, + "[email protected]d23": 0, + "org.s[email protected]135064ea": 2147483637, + "[email protected]23c05889": 2147483646 +} +``` + +The response contains the details of the global filters that are in place. +For each global filter, there is a string representation of the filter object (for example, `org.spring[[email protected]](/cdn-cgi/l/email-protection)77856cc5`) and the corresponding [order](#gateway-combined-global-filter-and-gatewayfilter-ordering) in the filter chain.} + +#### 15.2.2. Route Filters + +To retrieve the [`GatewayFilter` factories](#gatewayfilter-factories) applied to routes, make a `GET` request to `/actuator/gateway/routefilters`. +The resulting response is similar to the following: + +``` +{ + "[[email protected] configClass = AbstractNameValueGatewayFilterFactory.NameValueConfig]": null, + "[[email protected] configClass = Object]": null, + "[[email protected] configClass = Object]": null +} +``` + +The response contains the details of the `GatewayFilter` factories applied to any particular route. +For each factory there is a string representation of the corresponding object (for example, `[[[email protected]](/cdn-cgi/l/email-protection) configClass = Object]`). +Note that the `null` value is due to an incomplete implementation of the endpoint controller, because it tries to set the order of the object in the filter chain, which does not apply to a `GatewayFilter` factory object. + +### 15.3. Refreshing the Route Cache + +To clear the routes cache, make a `POST` request to `/actuator/gateway/refresh`. +The request returns a 200 without a response body. + +### 15.4. Retrieving the Routes Defined in the Gateway + +To retrieve the routes defined in the gateway, make a `GET` request to `/actuator/gateway/routes`. +The resulting response is similar to the following: + +``` +[{ + "route_id": "first_route", + "route_object": { + "predicate": "org.springframework.cloud.gateway.handler.predicate.PathRoutePredicateFactory$$Lambda$432/[email protected]", + "filters": [ + "OrderedGatewayFilter{delegate=org.springframework.cloud.gateway.filter.factory.PreserveHostHeaderGatewayFilterFactory$$Lambda$436/[email protected], order=0}" + ] + }, + "order": 0 +}, +{ + "route_id": "second_route", + "route_object": { + "predicate": "org.springframework.cloud.gateway.handler.predicate.PathRoutePredicateFactory$$Lambda$432/[email protected]", + "filters": [] + }, + "order": 0 +}] +``` + +The response contains the details of all the routes defined in the gateway. +The following table describes the structure of each element (each is a route) of the response: + +| Path | Type | Description | +|------------------------|------|-------------------------------------------------------------------------------| +| `route_id` |String| The route ID. | +|`route_object.predicate`|Object| The route predicate. | +| `route_object.filters` |Array |The [`GatewayFilter` factories](#gatewayfilter-factories) applied to the route.| +| `order` |Number| The route order. | + +### 15.5. Retrieving Information about a Particular Route + +To retrieve information about a single route, make a `GET` request to `/actuator/gateway/routes/{id}` (for example, `/actuator/gateway/routes/first_route`). +The resulting response is similar to the following: + +``` +{ + "id": "first_route", + "predicates": [{ + "name": "Path", + "args": {"_genkey_0":"/first"} + }], + "filters": [], + "uri": "https://www.uri-destination.org", + "order": 0 +} +``` + +The following table describes the structure of the response: + +| Path | Type | Description | +|------------|------|------------------------------------------------------------------------------------------------------| +| `id` |String| The route ID. | +|`predicates`|Array |The collection of route predicates. Each item defines the name and the arguments of a given predicate.| +| `filters` |Array | The collection of filters applied to the route. | +| `uri` |String| The destination URI of the route. | +| `order` |Number| The route order. | + +### 15.6. Creating and Deleting a Particular Route + +To create a route, make a `POST` request to `/gateway/routes/{id_route_to_create}` with a JSON body that specifies the fields of the route (see [Retrieving Information about a Particular Route](#gateway-retrieving-information-about-a-particular-route)). + +To delete a route, make a `DELETE` request to `/gateway/routes/{id_route_to_delete}`. + +### 15.7. Recap: The List of All endpoints + +The folloiwng table below summarizes the Spring Cloud Gateway actuator endpoints (note that each endpoint has `/actuator/gateway` as the base-path): + +| ID |HTTP Method| Description | +|---------------|-----------|-----------------------------------------------------------------------------| +|`globalfilters`| GET | Displays the list of global filters applied to the routes. | +|`routefilters` | GET |Displays the list of `GatewayFilter` factories applied to a particular route.| +| `refresh` | POST | Clears the routes cache. | +| `routes` | GET | Displays the list of routes defined in the gateway. | +| `routes/{id}` | GET | Displays information about a particular route. | +| `routes/{id}` | POST | Adds a new route to the gateway. | +| `routes/{id}` | DELETE | Removes an existing route from the gateway. | + +### 15.8. Sharing Routes between multiple Gateway instances + +Spring Cloud Gateway offers two `RouteDefinitionRepository` implementations. The first one is the`InMemoryRouteDefinitionRepository` which only lives within the memory of one Gateway instance. +This type of Repository is not suited to populate Routes across multiple Gateway instances. + +In order to share Routes across a cluster of Spring Cloud Gateway instances, `RedisRouteDefinitionRepository` can be used. +To enable this kind of repository, the following property has to set to true: `spring.cloud.gateway.redis-route-definition-repository.enabled`Likewise to the RedisRateLimiter Filter Factory it requires the use of the spring-boot-starter-data-redis-reactive Spring Boot starter. + +## 16. Troubleshooting + +This section covers common problems that may arise when you use Spring Cloud Gateway. + +### 16.1. Log Levels + +The following loggers may contain valuable troubleshooting information at the `DEBUG` and `TRACE` levels: + +* `org.springframework.cloud.gateway` + +* `org.springframework.http.server.reactive` + +* `org.springframework.web.reactive` + +* `org.springframework.boot.autoconfigure.web` + +* `reactor.netty` + +* `redisratelimiter` + +### 16.2. Wiretap + +The Reactor Netty `HttpClient` and `HttpServer` can have wiretap enabled. +When combined with setting the `reactor.netty` log level to `DEBUG` or `TRACE`, it enables the logging of information, such as headers and bodies sent and received across the wire. +To enable wiretap, set `spring.cloud.gateway.httpserver.wiretap=true` or `spring.cloud.gateway.httpclient.wiretap=true` for the `HttpServer` and `HttpClient`, respectively. + +## 17. Developer Guide + +These are basic guides to writing some custom components of the gateway. + +### 17.1. Writing Custom Route Predicate Factories + +In order to write a Route Predicate you will need to implement `RoutePredicateFactory` as a bean. There is an abstract class called `AbstractRoutePredicateFactory` which you can extend. + +MyRoutePredicateFactory.java + +``` +@Component +public class MyRoutePredicateFactory extends AbstractRoutePredicateFactory { + + public MyRoutePredicateFactory() { + super(Config.class); + } + + @Override + public Predicate apply(Config config) { + // grab configuration from Config object + return exchange -> { + //grab the request + ServerHttpRequest request = exchange.getRequest(); + //take information from the request to see if it + //matches configuration. + return matches(config, request); + }; + } + + public static class Config { + //Put the configuration properties for your filter here + } + +} +``` + +### 17.2. Writing Custom GatewayFilter Factories + +To write a `GatewayFilter`, you must implement `GatewayFilterFactory` as a bean. +You can extend an abstract class called `AbstractGatewayFilterFactory`. +The following examples show how to do so: + +Example 74. PreGatewayFilterFactory.java + +``` +@Component +public class PreGatewayFilterFactory extends AbstractGatewayFilterFactory { + + public PreGatewayFilterFactory() { + super(Config.class); + } + + @Override + public GatewayFilter apply(Config config) { + // grab configuration from Config object + return (exchange, chain) -> { + //If you want to build a "pre" filter you need to manipulate the + //request before calling chain.filter + ServerHttpRequest.Builder builder = exchange.getRequest().mutate(); + //use builder to manipulate the request + return chain.filter(exchange.mutate().request(builder.build()).build()); + }; + } + + public static class Config { + //Put the configuration properties for your filter here + } + +} +``` + +PostGatewayFilterFactory.java + +``` +@Component +public class PostGatewayFilterFactory extends AbstractGatewayFilterFactory { + + public PostGatewayFilterFactory() { + super(Config.class); + } + + @Override + public GatewayFilter apply(Config config) { + // grab configuration from Config object + return (exchange, chain) -> { + return chain.filter(exchange).then(Mono.fromRunnable(() -> { + ServerHttpResponse response = exchange.getResponse(); + //Manipulate the response in some way + })); + }; + } + + public static class Config { + //Put the configuration properties for your filter here + } + +} +``` + +#### 17.2.1. Naming Custom Filters And References In Configuration + +Custom filters class names should end in `GatewayFilterFactory`. + +For example, to reference a filter named `Something` in configuration files, the filter +must be in a class named `SomethingGatewayFilterFactory`. + +| |It is possible to create a gateway filter named without the`GatewayFilterFactory` suffix, such as `class AnotherThing`. This filter could be
referenced as `AnotherThing` in configuration files. This is **not** a supported naming
convention and this syntax may be removed in future releases. Please update the filter
name to be compliant.| +|---|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +### 17.3. Writing Custom Global Filters + +To write a custom global filter, you must implement `GlobalFilter` interface as a bean. +This applies the filter to all requests. + +The following examples show how to set up global pre and post filters, respectively: + +``` +@Bean +public GlobalFilter customGlobalFilter() { + return (exchange, chain) -> exchange.getPrincipal() + .map(Principal::getName) + .defaultIfEmpty("Default User") + .map(userName -> { + //adds header to proxied request + exchange.getRequest().mutate().header("CUSTOM-REQUEST-HEADER", userName).build(); + return exchange; + }) + .flatMap(chain::filter); +} + +@Bean +public GlobalFilter customGlobalPostFilter() { + return (exchange, chain) -> chain.filter(exchange) + .then(Mono.just(exchange)) + .map(serverWebExchange -> { + //adds header to response + serverWebExchange.getResponse().getHeaders().set("CUSTOM-RESPONSE-HEADER", + HttpStatus.OK.equals(serverWebExchange.getResponse().getStatusCode()) ? "It worked": "It did not work"); + return serverWebExchange; + }) + .then(); +} +``` + +## 18. Building a Simple Gateway by Using Spring MVC or Webflux + +| |The following describes an alternative style gateway. None of the prior documentation applies to what follows.| +|---|--------------------------------------------------------------------------------------------------------------| + +Spring Cloud Gateway provides a utility object called `ProxyExchange`. +You can use it inside a regular Spring web handler as a method parameter. +It supports basic downstream HTTP exchanges through methods that mirror the HTTP verbs. +With MVC, it also supports forwarding to a local handler through the `forward()` method. +To use the `ProxyExchange`, include the right module in your classpath (either `spring-cloud-gateway-mvc` or `spring-cloud-gateway-webflux`). + +The following MVC example proxies a request to `/test` downstream to a remote server: + +``` +@RestController +@SpringBootApplication +public class GatewaySampleApplication { + + @Value("${remote.home}") + private URI home; + + @GetMapping("/test") + public ResponseEntity proxy(ProxyExchange proxy) throws Exception { + return proxy.uri(home.toString() + "/image/png").get(); + } + +} +``` + +The following example does the same thing with Webflux: + +``` +@RestController +@SpringBootApplication +public class GatewaySampleApplication { + + @Value("${remote.home}") + private URI home; + + @GetMapping("/test") + public Mono> proxy(ProxyExchange proxy) throws Exception { + return proxy.uri(home.toString() + "/image/png").get(); + } + +} +``` + +Convenience methods on the `ProxyExchange` enable the handler method to discover and enhance the URI path of the incoming request. +For example, you might want to extract the trailing elements of a path to pass them downstream: + +``` +@GetMapping("/proxy/path/**") +public ResponseEntity proxyPath(ProxyExchange proxy) throws Exception { + String path = proxy.path("/proxy/path/"); + return proxy.uri(home.toString() + "/foos/" + path).get(); +} +``` + +All the features of Spring MVC and Webflux are available to gateway handler methods. +As a result, you can inject request headers and query parameters, for instance, and you can constrain the incoming requests with declarations in the mapping annotation. +See the documentation for `@RequestMapping` in Spring MVC for more details of those features. + +You can add headers to the downstream response by using the `header()` methods on `ProxyExchange`. + +You can also manipulate response headers (and anything else you like in the response) by adding a mapper to the `get()` method (and other methods). +The mapper is a `Function` that takes the incoming `ResponseEntity` and converts it to an outgoing one. + +First-class support is provided for “sensitive” headers (by default, `cookie` and `authorization`), which are not passed downstream, and for “proxy” (`x-forwarded-*`) headers. + +## 19. Configuration properties + +To see the list of all Spring Cloud Gateway related configuration properties, see [the appendix](appendix.html). + diff --git a/docs/en/spring-cloud/spring-cloud-kubernetes.md b/docs/en/spring-cloud/spring-cloud-kubernetes.md new file mode 100644 index 0000000000000000000000000000000000000000..01529fb5c3df3627e7494a8e803c10cabd061b55 --- /dev/null +++ b/docs/en/spring-cloud/spring-cloud-kubernetes.md @@ -0,0 +1,1886 @@ +# Spring Cloud Kubernetes + +## 1. Why do you need Spring Cloud Kubernetes? + +Spring Cloud Kubernetes provides implementations of well known Spring Cloud interfaces allowing developers to build and run Spring Cloud applications on Kubernetes. While this project may be useful to you when building a cloud native application, it is also not a requirement in order to deploy a Spring Boot app on Kubernetes. If you are just getting started in your journey to running your Spring Boot app on Kubernetes you can accomplish a lot with nothing more than a basic Spring Boot app and Kubernetes itself. To learn more, you can get started by reading the [Spring Boot reference documentation for deploying to Kubernetes ](https://docs.spring.io/spring-boot/docs/current/reference/htmlsingle/#cloud-deployment-kubernetes) and also working through the workshop material [Spring and Kubernetes](https://hackmd.io/@ryanjbaxter/spring-on-k8s-workshop). + +## 2. Starters + +Starters are convenient dependency descriptors you can include in your +application. Include a starter to get the dependencies and Spring Boot +auto-configuration for a feature set. Starters that begin with `spring-cloud-starter-kubernetes-fabric8`provide implementations using the [Fabric8 Kubernetes Java Client](https://github.com/fabric8io/kubernetes-client). +Starters that begin with`spring-cloud-starter-kubernetes-client` provide implementations using the [Kubernetes Java Client](https://github.com/kubernetes-client/java). + +| Starter | Features | +|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| Fabric8 Dependency

```

org.springframework.cloud
spring-cloud-starter-kubernetes-fabric8

```

Kubernetes Client Dependency

```

org.springframework.cloud
spring-cloud-starter-kubernetes-client

``` | [Discovery Client](#discoveryclient-for-kubernetes) implementation that
resolves service names to Kubernetes Services. | +|Fabric8 Dependency

```

org.springframework.cloud
spring-cloud-starter-kubernetes-fabric8-config

```

Kubernetes Client Dependency

```

org.springframework.cloud
spring-cloud-starter-kubernetes-client-config

```|Load application properties from Kubernetes[ConfigMaps](#configmap-propertysource) and [Secrets](#secrets-propertysource).[Reload](#propertysource-reload) application properties when a ConfigMap or
Secret changes.| +| Fabric8 Dependency

```

org.springframework.cloud
spring-cloud-starter-kubernetes-fabric8-all

```

Kubernetes Client Dependency

```

org.springframework.cloud
spring-cloud-starter-kubernetes-client-all

``` | All Spring Cloud Kubernetes features. | + +## 3. DiscoveryClient for Kubernetes + +This project provides an implementation of [Discovery Client](https://github.com/spring-cloud/spring-cloud-commons/blob/master/spring-cloud-commons/src/main/java/org/springframework/cloud/client/discovery/DiscoveryClient.java)for [Kubernetes](https://kubernetes.io). +This client lets you query Kubernetes endpoints (see [services](https://kubernetes.io/docs/user-guide/services/)) by name. +A service is typically exposed by the Kubernetes API server as a collection of endpoints that represent `http` and `https` addresses and that a client can +access from a Spring Boot application running as a pod. + +This is something that you get for free by adding the following dependency inside your project: + +HTTP Based `DiscoveryClient` + +``` + + org.springframework.cloud + spring-cloud-starter-kubernetes-discoveryclient + +``` + +| |`spring-cloud-starter-kubernetes-discoveryclient` is designed to be used with the[Spring Cloud Kubernetes DiscoveryServer](#spring-cloud-kubernetes-discoveryserver).| +|---|---------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +Fabric8 Kubernetes Client + +``` + + org.springframework.cloud + spring-cloud-starter-kubernetes-fabric8 + +``` + +Kubernetes Java Client + +``` + + org.springframework.cloud + spring-cloud-starter-kubernetes-client + +``` + +To enable loading of the `DiscoveryClient`, add `@EnableDiscoveryClient` to the according configuration or application class, as the following example shows: + +``` +@SpringBootApplication +@EnableDiscoveryClient +public class Application { + public static void main(String[] args) { + SpringApplication.run(Application.class, args); + } +} +``` + +Then you can inject the client in your code simply by autowiring it, as the following example shows: + +``` +@Autowired +private DiscoveryClient discoveryClient; +``` + +You can choose to enable `DiscoveryClient` from all namespaces by setting the following property in `application.properties`: + +``` +spring.cloud.kubernetes.discovery.all-namespaces=true +``` + +To discover service endpoint addresses that are not marked as "ready" by the kubernetes api server, you can set the following property in `application.properties` (default: false): + +``` +spring.cloud.kubernetes.discovery.include-not-ready-addresses=true +``` + +| |This might be useful when discovering services for monitoring purposes, and would enable inspecting the `/health` endpoint of not-ready service instances.| +|---|----------------------------------------------------------------------------------------------------------------------------------------------------------| + +If your service exposes multiple ports, you will need to specify which port the `DiscoveryClient` should use. +The `DiscoveryClient` will choose the port using the following logic. + +1. If the service has a label `primary-port-name` it will use the port with the name specified in the label’s value. + +2. If no label is present, then the port name specified in `spring.cloud.kubernetes.discovery.primary-port-name` will be used. + +3. If neither of the above are specified it will use the port named `https`. + +4. If none of the above conditions are met it will use the port named `http`. + +5. As a last resort it wil pick the first port in the list of ports. + +| |The last option may result in non-deterministic behaviour.
Please make sure to configure your service and/or application accordingly.| +|---|-----------------------------------------------------------------------------------------------------------------------------------------| + +By default all of the ports and their names will be added to the metadata of the `ServiceInstance`. + +If, for any reason, you need to disable the `DiscoveryClient`, you can set the following property in `application.properties`: + +``` +spring.cloud.kubernetes.discovery.enabled=false +``` + +Some Spring Cloud components use the `DiscoveryClient` in order to obtain information about the local service instance. For +this to work, you need to align the Kubernetes service name with the `spring.application.name` property. + +| |`spring.application.name` has no effect as far as the name registered for the application within Kubernetes| +|---|-----------------------------------------------------------------------------------------------------------| + +Spring Cloud Kubernetes can also watch the Kubernetes service catalog for changes and update the`DiscoveryClient` implementation accordingly. In order to enable this functionality you need to add`@EnableScheduling` on a configuration class in your application. + +## 4. Kubernetes native service discovery + +Kubernetes itself is capable of (server side) service discovery (see: [kubernetes.io/docs/concepts/services-networking/service/#discovering-services](https://kubernetes.io/docs/concepts/services-networking/service/#discovering-services)). +Using native kubernetes service discovery ensures compatibility with additional tooling, such as Istio ([istio.io](https://istio.io)), a service mesh that is capable of load balancing, circuit breaker, failover, and much more. + +The caller service then need only refer to names resolvable in a particular Kubernetes cluster. A simple implementation might use a spring `RestTemplate` that refers to a fully qualified domain name (FQDN), such as `[{service-name}.{namespace}.svc.{cluster}.local:{service-port}](https://{service-name}.{namespace}.svc.{cluster}.local:{service-port})`. + +Additionally, you can use Hystrix for: + +* Circuit breaker implementation on the caller side, by annotating the spring boot application class with `@EnableCircuitBreaker` + +* Fallback functionality, by annotating the respective method with `@HystrixCommand(fallbackMethod=` + +## 5. Kubernetes PropertySource implementations + +The most common approach to configuring your Spring Boot application is to create an `application.properties` or `application.yaml` or +an `application-profile.properties` or `application-profile.yaml` file that contains key-value pairs that provide customization values to your +application or Spring Boot starters. You can override these properties by specifying system properties or environment +variables. + +### 5.1. Using a `ConfigMap` `PropertySource` + +Kubernetes provides a resource named [`ConfigMap`](https://kubernetes.io/docs/user-guide/configmap/) to externalize the +parameters to pass to your application in the form of key-value pairs or embedded `application.properties` or `application.yaml` files. +The [Spring Cloud Kubernetes Config](https://github.com/spring-cloud/spring-cloud-kubernetes/tree/master/spring-cloud-kubernetes-fabric8-config) project makes Kubernetes `ConfigMap` instances available +during application bootstrapping and triggers hot reloading of beans or Spring context when changes are detected on +observed `ConfigMap` instances. + +The default behavior is to create a `Fabric8ConfigMapPropertySource` based on a Kubernetes `ConfigMap` that has a `metadata.name` value of either the name of +your Spring application (as defined by its `spring.application.name` property) or a custom name defined within the`bootstrap.properties` file under the following key: `spring.cloud.kubernetes.config.name`. + +However, more advanced configuration is possible where you can use multiple `ConfigMap` instances. +The `spring.cloud.kubernetes.config.sources` list makes this possible. +For example, you could define the following `ConfigMap` instances: + +``` +spring: + application: + name: cloud-k8s-app + cloud: + kubernetes: + config: + name: default-name + namespace: default-namespace + sources: + # Spring Cloud Kubernetes looks up a ConfigMap named c1 in namespace default-namespace + - name: c1 + # Spring Cloud Kubernetes looks up a ConfigMap named default-name in whatever namespace n2 + - namespace: n2 + # Spring Cloud Kubernetes looks up a ConfigMap named c3 in namespace n3 + - namespace: n3 + name: c3 +``` + +In the preceding example, if `spring.cloud.kubernetes.config.namespace` had not been set, +the `ConfigMap` named `c1` would be looked up in the namespace that the application runs. +See [Namespace resolution](#namespace-resolution) to get a better understanding of how the namespace +of the application is resolved. + +Any matching `ConfigMap` that is found is processed as follows: + +* Apply individual configuration properties. + +* Apply as `yaml` the content of any property named `application.yaml`. + +* Apply as a properties file the content of any property named `application.properties`. + +The single exception to the aforementioned flow is when the `ConfigMap` contains a **single** key that indicates +the file is a YAML or properties file. In that case, the name of the key does NOT have to be `application.yaml` or`application.properties` (it can be anything) and the value of the property is treated correctly. +This features facilitates the use case where the `ConfigMap` was created by using something like the following: + +``` +kubectl create configmap game-config --from-file=/path/to/app-config.yaml +``` + +Assume that we have a Spring Boot application named `demo` that uses the following properties to read its thread pool +configuration. + +* `pool.size.core` + +* `pool.size.maximum` + +This can be externalized to config map in `yaml` format as follows: + +``` +kind: ConfigMap +apiVersion: v1 +metadata: + name: demo +data: + pool.size.core: 1 + pool.size.max: 16 +``` + +Individual properties work fine for most cases. However, sometimes, embedded `yaml` is more convenient. In this case, we +use a single property named `application.yaml` to embed our `yaml`, as follows: + +``` +kind: ConfigMap +apiVersion: v1 +metadata: + name: demo +data: + application.yaml: |- + pool: + size: + core: 1 + max:16 +``` + +The following example also works: + +``` +kind: ConfigMap +apiVersion: v1 +metadata: + name: demo +data: + custom-name.yaml: |- + pool: + size: + core: 1 + max:16 +``` + +You can also configure Spring Boot applications differently depending on active profiles that are merged together +when the `ConfigMap` is read. You can provide different property values for different profiles by using an`application.properties` or `application.yaml` property, specifying profile-specific values, each in their own document +(indicated by the `---` sequence), as follows: + +``` +kind: ConfigMap +apiVersion: v1 +metadata: + name: demo +data: + application.yml: |- + greeting: + message: Say Hello to the World + farewell: + message: Say Goodbye + --- + spring: + profiles: development + greeting: + message: Say Hello to the Developers + farewell: + message: Say Goodbye to the Developers + --- + spring: + profiles: production + greeting: + message: Say Hello to the Ops +``` + +In the preceding case, the configuration loaded into your Spring Application with the `development` profile is as follows: + +``` + greeting: + message: Say Hello to the Developers + farewell: + message: Say Goodbye to the Developers +``` + +However, if the `production` profile is active, the configuration becomes: + +``` + greeting: + message: Say Hello to the Ops + farewell: + message: Say Goodbye +``` + +If both profiles are active, the property that appears last within the `ConfigMap` overwrites any preceding values. + +Another option is to create a different config map per profile and spring boot will automatically fetch it based +on active profiles + +``` +kind: ConfigMap +apiVersion: v1 +metadata: + name: demo +data: + application.yml: |- + greeting: + message: Say Hello to the World + farewell: + message: Say Goodbye +``` + +``` +kind: ConfigMap +apiVersion: v1 +metadata: + name: demo-development +data: + application.yml: |- + spring: + profiles: development + greeting: + message: Say Hello to the Developers + farewell: + message: Say Goodbye to the Developers +``` + +``` +kind: ConfigMap +apiVersion: v1 +metadata: + name: demo-production +data: + application.yml: |- + spring: + profiles: production + greeting: + message: Say Hello to the Ops + farewell: + message: Say Goodbye +``` + +To tell Spring Boot which `profile` should be enabled at bootstrap, you can pass `SPRING_PROFILES_ACTIVE` environment variable. +To do so, you can launch your Spring Boot application with an environment variable that you can define it in the PodSpec at the container specification. +Deployment resource file, as follows: + +``` +apiVersion: apps/v1 +kind: Deployment +metadata: + name: deployment-name + labels: + app: deployment-name +spec: + replicas: 1 + selector: + matchLabels: + app: deployment-name + template: + metadata: + labels: + app: deployment-name + spec: + containers: + - name: container-name + image: your-image + env: + - name: SPRING_PROFILES_ACTIVE + value: "development" +``` + +You could run into a situation where there are multiple configs maps that have the same property names. For example: + +``` +kind: ConfigMap +apiVersion: v1 +metadata: + name: config-map-one +data: + application.yml: |- + greeting: + message: Say Hello from one +``` + +and + +``` +kind: ConfigMap +apiVersion: v1 +metadata: + name: config-map-two +data: + application.yml: |- + greeting: + message: Say Hello from two +``` + +Depending on the order in which you place these in `bootstrap.yaml|properties`, you might end up with an un-expected result (the last config map wins). For example: + +``` +spring: + application: + name: cloud-k8s-app + cloud: + kubernetes: + config: + namespace: default-namespace + sources: + - name: config-map-two + - name: config-map-one +``` + +will result in property `greetings.message` being `Say Hello from one`. + +There is a way to change this default configuration by specifying `useNameAsPrefix`. For example: + +``` +spring: + application: + name: with-prefix + cloud: + kubernetes: + config: + useNameAsPrefix: true + namespace: default-namespace + sources: + - name: config-map-one + useNameAsPrefix: false + - name: config-map-two +``` + +Such a configuration will result in two properties being generated: + +* `greetings.message` equal to `Say Hello from one`. + +* `config-map-two.greetings.message` equal to `Say Hello from two` + +Notice that `spring.cloud.kubernetes.config.useNameAsPrefix` has a *lower* priority than `spring.cloud.kubernetes.config.sources.useNameAsPrefix`. +This allows you to set a "default" strategy for all sources, at the same time allowing to override only a few. + +If using the config map name is not an option, you can specify a different strategy, called : `explicitPrefix`. Since this is an *explicit* prefix that +you select, it can only be supplied to the `sources` level. At the same time it has a higher priority than `useNameAsPrefix`. Let’s suppose we have a third config map with these entries: + +``` +kind: ConfigMap +apiVersion: v1 +metadata: + name: config-map-three +data: + application.yml: |- + greeting: + message: Say Hello from three +``` + +A configuration like the one below: + +``` +spring: + application: + name: with-prefix + cloud: + kubernetes: + config: + useNameAsPrefix: true + namespace: default-namespace + sources: + - name: config-map-one + useNameAsPrefix: false + - name: config-map-two + explicitPrefix: two + - name: config-map-three +``` + +will result in three properties being generated: + +* `greetings.message` equal to `Say Hello from one`. + +* `two.greetings.message` equal to `Say Hello from two`. + +* `config-map-three.greetings.message` equal to `Say Hello from three`. + +By default, besides reading the config map that is specified in the `sources` configuration, Spring will also try to read +all properties from "profile aware" sources. The easiest way to explain this is via an example. Let’s suppose your application +enables a profile called "dev" and you have a configuration like the one below: + +``` +spring: + application: + name: spring-k8s + cloud: + kubernetes: + config: + namespace: default-namespace + sources: + - name: config-map-one +``` + +Besides reading the `config-map-one`, Spring will also try to read `config-map-one-dev`; in this particular order. Each active profile +generates such a profile aware config map. + +Though your application should not be impacted by such a config map, it can be disabled if needed: + +``` +spring: + application: + name: spring-k8s + cloud: + kubernetes: + config: + includeProfileSpecificSources: false + namespace: default-namespace + sources: + - name: config-map-one + includeProfileSpecificSources: false +``` + +Notice that just like before, there are two levels where you can specify this property: for all config maps or +for individual ones; the latter having a higher priority. + +| |You should check the security configuration section. To access config maps from inside a pod you need to have the correct
Kubernetes service accounts, roles and role bindings.| +|---|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +Another option for using `ConfigMap` instances is to mount them into the Pod by running the Spring Cloud Kubernetes application +and having Spring Cloud Kubernetes read them from the file system. +This behavior is controlled by the `spring.cloud.kubernetes.config.paths` property. You can use it in +addition to or instead of the mechanism described earlier. +You can specify multiple (exact) file paths in `spring.cloud.kubernetes.config.paths` by using the `,` delimiter. + +| |You have to provide the full exact path to each property file, because directories are not being recursively parsed.| +|---|--------------------------------------------------------------------------------------------------------------------| + +| |If you use `spring.cloud.kubernetes.config.paths` or `spring.cloud.kubernetes.secrets.path` the automatic reload
functionality will not work. You will need to make a `POST` request to the `/actuator/refresh` endpoint or
restart/redeploy the application.| +|---|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +In some cases, your application may be unable to load some of your `ConfigMaps` using the Kubernetes API. +If you want your application to fail the start-up process in such cases, you can set`spring.cloud.kubernetes.config.fail-fast=true` to make the application start-up fail with an Exception. + +You can also make your application retry loading `ConfigMap` property sources on a failure. First, you need to +set `spring.cloud.kubernetes.config.fail-fast=true`. Then you need to add `spring-retry`and `spring-boot-starter-aop` to your classpath. You can configure retry properties such as +the maximum number of attempts, backoff options like initial interval, multiplier, max interval by setting the`spring.cloud.kubernetes.config.retry.*` properties. + +| |If you already have `spring-retry` and `spring-boot-starter-aop` on the classpath for some reason
and want to enable fail-fast, but do not want retry to be enabled; you can disable retry for `ConfigMap` `PropertySources`by setting `spring.cloud.kubernetes.config.retry.enabled=false`.| +|---|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +| Name | Type | Default | Description | +|-------------------------------------------------------|---------|----------------------------|-----------------------------------------------------------------------------------------------------| +| `spring.cloud.kubernetes.config.enabled` |`Boolean`| `true` | Enable ConfigMaps `PropertySource` | +| `spring.cloud.kubernetes.config.name` |`String` |`${spring.application.name}`| Sets the name of `ConfigMap` to look up | +| `spring.cloud.kubernetes.config.namespace` |`String` | Client namespace | Sets the Kubernetes namespace where to lookup | +| `spring.cloud.kubernetes.config.paths` | `List` | `null` | Sets the paths where `ConfigMap` instances are mounted | +| `spring.cloud.kubernetes.config.enableApi` |`Boolean`| `true` | Enable or disable consuming `ConfigMap` instances through APIs | +| `spring.cloud.kubernetes.config.fail-fast` |`Boolean`| `false` |Enable or disable failing the application start-up when an error occurred while loading a `ConfigMap`| +| `spring.cloud.kubernetes.config.retry.enabled` |`Boolean`| `true` | Enable or disable config retry. | +|`spring.cloud.kubernetes.config.retry.initial-interval`| `Long` | `1000` | Initial retry interval in milliseconds. | +| `spring.cloud.kubernetes.config.retry.max-attempts` |`Integer`| `6` | Maximum number of attempts. | +| `spring.cloud.kubernetes.config.retry.max-interval` | `Long` | `2000` | Maximum interval for backoff. | +| `spring.cloud.kubernetes.config.retry.multiplier` |`Double` | `1.1` | Multiplier for next interval. | + +### 5.2. Secrets PropertySource + +Kubernetes has the notion of [Secrets](https://kubernetes.io/docs/concepts/configuration/secret/) for storing +sensitive data such as passwords, OAuth tokens, and so on. This project provides integration with `Secrets` to make secrets +accessible by Spring Boot applications. You can explicitly enable or disable This feature by setting the `spring.cloud.kubernetes.secrets.enabled` property. + +When enabled, the `Fabric8SecretsPropertySource` looks up Kubernetes for `Secrets` from the following sources: + +1. Reading recursively from secrets mounts + +2. Named after the application (as defined by `spring.application.name`) + +3. Matching some labels + +**Note:** + +By default, consuming Secrets through the API (points 2 and 3 above) **is not enabled** for security reasons. The permission 'list' on secrets allows clients to inspect secrets values in the specified namespace. +Further, we recommend that containers share secrets through mounted volumes. + +If you enable consuming Secrets through the API, we recommend that you limit access to Secrets by using an authorization policy, such as RBAC. +For more information about risks and best practices when consuming Secrets through the API refer to [this doc](https://kubernetes.io/docs/concepts/configuration/secret/#best-practices). + +If the secrets are found, their data is made available to the application. + +Assume that we have a spring boot application named `demo` that uses properties to read its database +configuration. We can create a Kubernetes secret by using the following command: + +``` +kubectl create secret generic db-secret --from-literal=username=user --from-literal=password=p455w0rd +``` + +The preceding command would create the following secret (which you can see by using `kubectl get secrets db-secret -o yaml`): + +``` +apiVersion: v1 +data: + password: cDQ1NXcwcmQ= + username: dXNlcg== +kind: Secret +metadata: + creationTimestamp: 2017-07-04T09:15:57Z + name: db-secret + namespace: default + resourceVersion: "357496" + selfLink: /api/v1/namespaces/default/secrets/db-secret + uid: 63c89263-6099-11e7-b3da-76d6186905a8 +type: Opaque +``` + +Note that the data contains Base64-encoded versions of the literal provided by the `create` command. + +Your application can then use this secret — for example, by exporting the secret’s value as environment variables: + +``` +apiVersion: v1 +kind: Deployment +metadata: + name: ${project.artifactId} +spec: + template: + spec: + containers: + - env: + - name: DB_USERNAME + valueFrom: + secretKeyRef: + name: db-secret + key: username + - name: DB_PASSWORD + valueFrom: + secretKeyRef: + name: db-secret + key: password +``` + +You can select the Secrets to consume in a number of ways: + +1. By listing the directories where secrets are mapped: + + ``` + -Dspring.cloud.kubernetes.secrets.paths=/etc/secrets/db-secret,etc/secrets/postgresql + ``` + + If you have all the secrets mapped to a common root, you can set them like: + + ``` + -Dspring.cloud.kubernetes.secrets.paths=/etc/secrets + ``` + +2. By setting a named secret: + + ``` + -Dspring.cloud.kubernetes.secrets.name=db-secret + ``` + +3. By defining a list of labels: + + ``` + -Dspring.cloud.kubernetes.secrets.labels.broker=activemq + -Dspring.cloud.kubernetes.secrets.labels.db=postgresql + ``` + +As the case with `ConfigMap`, more advanced configuration is also possible where you can use multiple `Secret`instances. The `spring.cloud.kubernetes.secrets.sources` list makes this possible. +For example, you could define the following `Secret` instances: + +``` +spring: + application: + name: cloud-k8s-app + cloud: + kubernetes: + secrets: + name: default-name + namespace: default-namespace + sources: + # Spring Cloud Kubernetes looks up a Secret named s1 in namespace default-namespace + - name: s1 + # Spring Cloud Kubernetes looks up a Secret named default-name in namespace n2 + - namespace: n2 + # Spring Cloud Kubernetes looks up a Secret named s3 in namespace n3 + - namespace: n3 + name: s3 +``` + +In the preceding example, if `spring.cloud.kubernetes.secrets.namespace` had not been set, +the `Secret` named `s1` would be looked up in the namespace that the application runs. +See [namespace-resolution](#namespace-resolution) to get a better understanding of how the namespace +of the application is resolved. + +[Similar to the `ConfigMaps`](#config-map-fail-fast); if you want your application to fail to start +when it is unable to load `Secrets` property sources, you can set `spring.cloud.kubernetes.secrets.fail-fast=true`. + +It is also possible to enable retry for `Secret` property sources [like the `ConfigMaps`](#config-map-retry). +As with the `ConfigMap` property sources, first you need to set `spring.cloud.kubernetes.secrets.fail-fast=true`. +Then you need to add `spring-retry` and `spring-boot-starter-aop` to your classpath. +Retry behavior of the `Secret` property sources can be configured by setting the `spring.cloud.kubernetes.secrets.retry.*`properties. + +| |If you already have `spring-retry` and `spring-boot-starter-aop` on the classpath for some reason
and want to enable fail-fast, but do not want retry to be enabled; you can disable retry for `Secrets` `PropertySources`by setting `spring.cloud.kubernetes.secrets.retry.enabled=false`.| +|---|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +| Name | Type | Default | Description | +|--------------------------------------------------------|---------|----------------------------|--------------------------------------------------------------------------------------------------| +| `spring.cloud.kubernetes.secrets.enabled` |`Boolean`| `true` | Enable Secrets `PropertySource` | +| `spring.cloud.kubernetes.secrets.name` |`String` |`${spring.application.name}`| Sets the name of the secret to look up | +| `spring.cloud.kubernetes.secrets.namespace` |`String` | Client namespace | Sets the Kubernetes namespace where to look up | +| `spring.cloud.kubernetes.secrets.labels` | `Map` | `null` | Sets the labels used to lookup secrets | +| `spring.cloud.kubernetes.secrets.paths` | `List` | `null` | Sets the paths where secrets are mounted (example 1) | +| `spring.cloud.kubernetes.secrets.enableApi` |`Boolean`| `false` | Enables or disables consuming secrets through APIs (examples 2 and 3) | +| `spring.cloud.kubernetes.secrets.fail-fast` |`Boolean`| `false` |Enable or disable failing the application start-up when an error occurred while loading a `Secret`| +| `spring.cloud.kubernetes.secrets.retry.enabled` |`Boolean`| `true` | Enable or disable secrets retry. | +|`spring.cloud.kubernetes.secrets.retry.initial-interval`| `Long` | `1000` | Initial retry interval in milliseconds. | +| `spring.cloud.kubernetes.secrets.retry.max-attempts` |`Integer`| `6` | Maximum number of attempts. | +| `spring.cloud.kubernetes.secrets.retry.max-interval` | `Long` | `2000` | Maximum interval for backoff. | +| `spring.cloud.kubernetes.secrets.retry.multiplier` |`Double` | `1.1` | Multiplier for next interval. | + +Notes: + +* The `spring.cloud.kubernetes.secrets.labels` property behaves as defined by[Map-based binding](https://github.com/spring-projects/spring-boot/wiki/Spring-Boot-Configuration-Binding#map-based-binding). + +* The `spring.cloud.kubernetes.secrets.paths` property behaves as defined by[Collection-based binding](https://github.com/spring-projects/spring-boot/wiki/Spring-Boot-Configuration-Binding#collection-based-binding). + +* Access to secrets through the API may be restricted for security reasons. The preferred way is to mount secrets to the Pod. + +You can find an example of an application that uses secrets (though it has not been updated to use the new `spring-cloud-kubernetes` project) at[spring-boot-camel-config](https://github.com/fabric8-quickstarts/spring-boot-camel-config) + +### 5.3. Namespace resolution + +Finding an application namespace happens on a best-effort basis. There are some steps that we iterate in order +to find it. The easiest and most common one, is to specify it in the proper configuration, for example: + +``` +spring: + application: + name: app + cloud: + kubernetes: + secrets: + name: secret + namespace: default + sources: + # Spring Cloud Kubernetes looks up a Secret named 'a' in namespace 'default' + - name: a + # Spring Cloud Kubernetes looks up a Secret named 'secret' in namespace 'b' + - namespace: b + # Spring Cloud Kubernetes looks up a Secret named 'd' in namespace 'c' + - namespace: c + name: d +``` + +Remember that the same can be done for config maps. If such a namespace is not specified, it will be read (in this order): + +1. from property `spring.cloud.kubernetes.client.namespace` + +2. from a String residing in a file denoted by `spring.cloud.kubernetes.client.serviceAccountNamespacePath` property + +3. from a String residing in `/var/run/secrets/kubernetes.io/serviceaccount/namespace` file + (kubernetes default namespace path) + +4. from a designated client method call (for example fabric8’s : `KubernetesClient::getNamespace`), if the client provides + such a method. This, in turn, could be configured via environment properties. For example fabric8 client can be configured via + "KUBERNETES\_NAMESPACE" property; consult the client documentation for exact details. + +Failure to find a namespace from the above steps will result in an Exception being raised. + +### 5.4. `PropertySource` Reload + +| |This functionality has been deprecated in the 2020.0 release. Please see
the [Spring Cloud Kubernetes Configuration Watcher](#spring-cloud-kubernetes-configuration-watcher) controller for an alternative way
to achieve the same functionality.| +|---|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +Some applications may need to detect changes on external property sources and update their internal status to reflect the new configuration. +The reload feature of Spring Cloud Kubernetes is able to trigger an application reload when a related `ConfigMap` or`Secret` changes. + +By default, this feature is disabled. You can enable it by using the `spring.cloud.kubernetes.reload.enabled=true` configuration property (for example, in the `application.properties` file). + +The following levels of reload are supported (by setting the `spring.cloud.kubernetes.reload.strategy` property): + +* `refresh` (default): Only configuration beans annotated with `@ConfigurationProperties` or `@RefreshScope` are reloaded. + This reload level leverages the refresh feature of Spring Cloud Context. + +* `restart_context`: the whole Spring `ApplicationContext` is gracefully restarted. Beans are recreated with the new configuration. + In order for the restart context functionality to work properly you must enable and expose the restart actuator endpoint + +``` +management: + endpoint: + restart: + enabled: true + endpoints: + web: + exposure: + include: restart +``` + +* `shutdown`: the Spring `ApplicationContext` is shut down to activate a restart of the container. + When you use this level, make sure that the lifecycle of all non-daemon threads is bound to the `ApplicationContext`and that a replication controller or replica set is configured to restart the pod. + +Assuming that the reload feature is enabled with default settings (`refresh` mode), the following bean is refreshed when the config map changes: + +``` +@Configuration +@ConfigurationProperties(prefix = "bean") +public class MyConfig { + + private String message = "a message that can be changed live"; + + // getter and setters + +} +``` + +To see that changes effectively happen, you can create another bean that prints the message periodically, as follows + +``` +@Component +public class MyBean { + + @Autowired + private MyConfig config; + + @Scheduled(fixedDelay = 5000) + public void hello() { + System.out.println("The message is: " + config.getMessage()); + } +} +``` + +You can change the message printed by the application by using a `ConfigMap`, as follows: + +``` +apiVersion: v1 +kind: ConfigMap +metadata: + name: reload-example +data: + application.properties: |- + bean.message=Hello World! +``` + +Any change to the property named `bean.message` in the `ConfigMap` associated with the pod is reflected in the +output. More generally speaking, changes associated to properties prefixed with the value defined by the `prefix`field of the `@ConfigurationProperties` annotation are detected and reflected in the application.[Associating a `ConfigMap` with a pod](#configmap-propertysource) is explained earlier in this chapter. + +The full example is available in [`spring-cloud-kubernetes-reload-example`](https://github.com/spring-cloud/spring-cloud-kubernetes/tree/main/spring-cloud-kubernetes-examples/kubernetes-reload-example). + +The reload feature supports two operating modes: +\* Event (default): Watches for changes in config maps or secrets by using the Kubernetes API (web socket). +Any event produces a re-check on the configuration and, in case of changes, a reload. +The `view` role on the service account is required in order to listen for config map changes. A higher level role (such as `edit`) is required for secrets +(by default, secrets are not monitored). +\* Polling: Periodically re-creates the configuration from config maps and secrets to see if it has changed. +You can configure the polling period by using the `spring.cloud.kubernetes.reload.period` property and defaults to 15 seconds. +It requires the same role as the monitored property source. +This means, for example, that using polling on file-mounted secret sources does not require particular privileges. + +| Name | Type | Default | Description | +|-------------------------------------------------------|----------|---------|--------------------------------------------------------------------------------------| +| `spring.cloud.kubernetes.reload.enabled` |`Boolean` | `false` | Enables monitoring of property sources and configuration reload | +|`spring.cloud.kubernetes.reload.monitoring-config-maps`|`Boolean` | `true` | Allow monitoring changes in config maps | +| `spring.cloud.kubernetes.reload.monitoring-secrets` |`Boolean` | `false` | Allow monitoring changes in secrets | +| `spring.cloud.kubernetes.reload.strategy` | `Enum` |`refresh`|The strategy to use when firing a reload (`refresh`, `restart_context`, or `shutdown`)| +| `spring.cloud.kubernetes.reload.mode` | `Enum` | `event` | Specifies how to listen for changes in property sources (`event` or `polling`) | +| `spring.cloud.kubernetes.reload.period` |`Duration`| `15s` | The period for verifying changes when using the `polling` strategy | + +Notes: +\* You should not use properties under `spring.cloud.kubernetes.reload` in config maps or secrets. Changing such properties at runtime may lead to unexpected results. +\* Deleting a property or the whole config map does not restore the original state of the beans when you use the `refresh` level. + +## 6. Kubernetes Ecosystem Awareness + +All of the features described earlier in this guide work equally well, regardless of whether your application is running inside +Kubernetes. This is really helpful for development and troubleshooting. +From a development point of view, this lets you start your Spring Boot application and debug one +of the modules that is part of this project. You need not deploy it in Kubernetes, +as the code of the project relies on the[Fabric8 Kubernetes Java client](https://github.com/fabric8io/kubernetes-client), which is a fluent DSL that can +communicate by using `http` protocol to the REST API of the Kubernetes Server. + +To disable the integration with Kubernetes you can set `spring.cloud.kubernetes.enabled` to `false`. Please be aware that when `spring-cloud-kubernetes-config` is on the classpath,`spring.cloud.kubernetes.enabled` should be set in `bootstrap.{properties|yml}` (or the profile specific one), otherwise it should be in `application.{properties|yml}` (or the profile specific one). +Because of the way we set up a specific `EnvironmentPostProcessor` in `spring-cloud-kubernetes-config`, you also need to disable that processor via a system property (or an environment variable), for example you could start +your application via `-DSPRING_CLOUD_KUBERNETES_ENABLED=false` (any form of relaxed binding will work too). +Also note that these properties: `spring.cloud.kubernetes.config.enabled` and `spring.cloud.kubernetes.secrets.enabled` only take effect when set in `bootstrap.{properties|yml}` + +### 6.1. Kubernetes Profile Autoconfiguration + +When the application runs as a pod inside Kubernetes, a Spring profile named `kubernetes` automatically gets activated. +This lets you customize the configuration, to define beans that are applied when the Spring Boot application is deployed +within the Kubernetes platform (for example, different development and production configuration). + +### 6.2. Istio Awareness + +When you include the `spring-cloud-kubernetes-fabric8-istio` module in the application classpath, a new profile is added to the application, +provided the application is running inside a Kubernetes Cluster with [Istio](https://istio.io) installed. You can then use +spring `@Profile("istio")` annotations in your Beans and `@Configuration` classes. + +The Istio awareness module uses `me.snowdrop:istio-client` to interact with Istio APIs, letting us discover traffic rules, circuit breakers, and so on, +making it easy for our Spring Boot applications to consume this data to dynamically configure themselves according to the environment. + +## 7. Pod Health Indicator + +Spring Boot uses [`HealthIndicator`](https://github.com/spring-projects/spring-boot/blob/master/spring-boot-project/spring-boot-actuator/src/main/java/org/springframework/boot/actuate/health/HealthEndpoint.java) to expose info about the health of an application. +That makes it really useful for exposing health-related information to the user and makes it a good fit for use as [readiness probes](https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/). + +The Kubernetes health indicator (which is part of the core module) exposes the following info: + +* Pod name, IP address, namespace, service account, node name, and its IP address + +* A flag that indicates whether the Spring Boot application is internal or external to Kubernetes + +You can disable this `HealthContributor` by setting `management.health.kubernetes.enabled`to `false` in `application.[properties | yaml]`. + +## 8. Info Contributor + +Spring Cloud Kubernetes includes an `InfoContributor` which adds Pod information to +Spring Boot’s `/info` Acturator endpoint. + +You can disable this `InfoContributor` by setting `management.info.kubernetes.enabled`to `false` in `application.[properties | yaml]`. + +## 9. Leader Election + +The Spring Cloud Kubernetes leader election mechanism implements the leader election API of Spring Integration using a Kubernetes ConfigMap. + +Multiple application instances compete for leadership, but leadership will only be granted to one. +When granted leadership, a leader application receives an `OnGrantedEvent` application event with leadership `Context`. +Applications periodically attempt to gain leadership, with leadership granted to the first caller. +A leader will remain a leader until either it is removed from the cluster, or it yields its leadership. +When leadership removal occurs, the previous leader receives `OnRevokedEvent` application event. +After removal, any instances in the cluster may become the new leader, including the old leader. + +To include it in your project, add the following dependency. + +Fabric8 Leader Implementation + +``` + + org.springframework.cloud + spring-cloud-kubernetes-fabric8-leader + +``` + +To specify the name of the configmap used for leader election use the following property. + +``` +spring.cloud.kubernetes.leader.config-map-name=leader +``` + +## 10. LoadBalancer for Kubernetes + +This project includes Spring Cloud Load Balancer for load balancing based on Kubernetes Endpoints and provides implementation of load balancer based on Kubernetes Service. +To include it to your project add the following dependency. + +Fabric8 Implementation + +``` + + org.springframework.cloud + spring-cloud-starter-kubernetes-fabric8-loadbalancer + +``` + +Kubernetes Java Client Implementation + +``` + + org.springframework.cloud + spring-cloud-starter-kubernetes-client-loadbalancer + +``` + +To enable load balancing based on Kubernetes Service name use the following property. Then load balancer would try to call application using address, for example `service-a.default.svc.cluster.local` + +``` +spring.cloud.kubernetes.loadbalancer.mode=SERVICE +``` + +To enabled load balancing across all namespaces use the following property. Property from `spring-cloud-kubernetes-discovery` module is respected. + +``` +spring.cloud.kubernetes.discovery.all-namespaces=true +``` + +If a service needs to be accessed over HTTPS you need to add a label or annotation to your service definition with the name `secured` and the value `true` and the load balancer will then use HTTPS to make requests to the service. + +## 11. Security Configurations Inside Kubernetes + +### 11.1. Namespace + +Most of the components provided in this project need to know the namespace. For Kubernetes (1.3+), the namespace is made available to the pod as part of the service account secret and is automatically detected by the client. +For earlier versions, it needs to be specified as an environment variable to the pod. A quick way to do this is as follows: + +``` + env: + - name: "KUBERNETES_NAMESPACE" + valueFrom: + fieldRef: + fieldPath: "metadata.namespace" +``` + +### 11.2. Service Account + +For distributions of Kubernetes that support more fine-grained role-based access within the cluster, you need to make sure a pod that runs with `spring-cloud-kubernetes` has access to the Kubernetes API. +For any service accounts you assign to a deployment or pod, you need to make sure they have the correct roles. + +Depending on the requirements, you’ll need `get`, `list` and `watch` permission on the following resources: + +| Dependency | Resources | +|----------------------------------------------|-------------------------| +| spring-cloud-starter-kubernetes-fabric8 |pods, services, endpoints| +|spring-cloud-starter-kubernetes-fabric8-config| configmaps, secrets | +| spring-cloud-starter-kubernetes-client |pods, services, endpoints| +|spring-cloud-starter-kubernetes-client-config | configmaps, secrets | + +For development purposes, you can add `cluster-reader` permissions to your `default` service account. On a production system you’ll likely want to provide more granular permissions. + +The following Role and RoleBinding are an example for namespaced permissions for the `default` account: + +``` +kind: Role +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + namespace: YOUR-NAME-SPACE + name: namespace-reader +rules: + - apiGroups: [""] + resources: ["configmaps", "pods", "services", "endpoints", "secrets"] + verbs: ["get", "list", "watch"] + +--- + +kind: RoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: namespace-reader-binding + namespace: YOUR-NAME-SPACE +subjects: +- kind: ServiceAccount + name: default + apiGroup: "" +roleRef: + kind: Role + name: namespace-reader + apiGroup: "" +``` + +## 12. Service Registry Implementation + +In Kubernetes service registration is controlled by the platform, the application itself does not control +registration as it may do in other platforms. For this reason using `spring.cloud.service-registry.auto-registration.enabled`or setting `@EnableDiscoveryClient(autoRegister=false)` will have no effect in Spring Cloud Kubernetes. + +## 13. Spring Cloud Kubernetes Configuration Watcher + +Kubernetes provides the ability to [mount a ConfigMap or Secret as a volume](https://kubernetes.io/docs/tasks/configure-pod-container/configure-pod-configmap/#add-configmap-data-to-a-volume)in the container of your application. When the contents of the ConfigMap or Secret changes, the [mounted volume will be updated with those changes](https://kubernetes.io/docs/tasks/configure-pod-container/configure-pod-configmap/#mounted-configmaps-are-updated-automatically). + +However, Spring Boot will not automatically update those changes unless you restart the application. Spring Cloud +provides the ability refresh the application context without restarting the application by either hitting the +actuator endpoint `/refresh` or via publishing a `RefreshRemoteApplicationEvent` using Spring Cloud Bus. + +To achieve this configuration refresh of a Spring Cloud app running on Kubernetes, you can deploy the Spring Cloud +Kubernetes Configuration Watcher controller into your Kubernetes cluster. + +The application is published as a container and is available on [Docker Hub](https://hub.docker.com/r/springcloud/spring-cloud-kubernetes-configuration-watcher). + +Spring Cloud Kubernetes Configuration Watcher can send refresh notifications to applications in two ways. + +1. Over HTTP in which case the application being notified must of the `/refresh` actuator endpoint exposed and accessible from within the cluster + +2. Using Spring Cloud Bus, in which case you will need a message broker deployed to your custer for the application to use. + +### 13.1. Deployment YAML + +Below is a sample deployment YAML you can use to deploy the Kubernetes Configuration Watcher to Kubernetes. + +``` +--- +apiVersion: v1 +kind: List +items: + - apiVersion: v1 + kind: Service + metadata: + labels: + app: spring-cloud-kubernetes-configuration-watcher + name: spring-cloud-kubernetes-configuration-watcher + spec: + ports: + - name: http + port: 8888 + targetPort: 8888 + selector: + app: spring-cloud-kubernetes-configuration-watcher + type: ClusterIP + - apiVersion: v1 + kind: ServiceAccount + metadata: + labels: + app: spring-cloud-kubernetes-configuration-watcher + name: spring-cloud-kubernetes-configuration-watcher + - apiVersion: rbac.authorization.k8s.io/v1 + kind: RoleBinding + metadata: + labels: + app: spring-cloud-kubernetes-configuration-watcher + name: spring-cloud-kubernetes-configuration-watcher:view + roleRef: + kind: Role + apiGroup: rbac.authorization.k8s.io + name: namespace-reader + subjects: + - kind: ServiceAccount + name: spring-cloud-kubernetes-configuration-watcher + - apiVersion: rbac.authorization.k8s.io/v1 + kind: Role + metadata: + namespace: default + name: namespace-reader + rules: + - apiGroups: ["", "extensions", "apps"] + resources: ["configmaps", "pods", "services", "endpoints", "secrets"] + verbs: ["get", "list", "watch"] + - apiVersion: apps/v1 + kind: Deployment + metadata: + name: spring-cloud-kubernetes-configuration-watcher-deployment + spec: + selector: + matchLabels: + app: spring-cloud-kubernetes-configuration-watcher + template: + metadata: + labels: + app: spring-cloud-kubernetes-configuration-watcher + spec: + serviceAccount: spring-cloud-kubernetes-configuration-watcher + containers: + - name: spring-cloud-kubernetes-configuration-watcher + image: springcloud/spring-cloud-kubernetes-configuration-watcher:2.0.1-SNAPSHOT + imagePullPolicy: IfNotPresent + readinessProbe: + httpGet: + port: 8888 + path: /actuator/health/readiness + livenessProbe: + httpGet: + port: 8888 + path: /actuator/health/liveness + ports: + - containerPort: 8888 +``` + +The Service Account and associated Role Binding is important for Spring Cloud Kubernetes Configuration to work properly. +The controller needs access to read data about ConfigMaps, Pods, Services, Endpoints and Secrets in the Kubernetes cluster. + +### 13.2. Monitoring ConfigMaps and Secrets + +Spring Cloud Kubernetes Configuration Watcher will react to changes in ConfigMaps with a label of `spring.cloud.kubernetes.config` with the value `true`or any Secret with a label of `spring.cloud.kubernetes.secret` with the value `true`. If the ConfigMap or Secret does not have either of those labels +or the values of those labels is not `true` then any changes will be ignored. + +The labels Spring Cloud Kubernetes Configuration Watcher looks for on ConfigMaps and Secrets can be changed by setting`spring.cloud.kubernetes.configuration.watcher.configLabel` and `spring.cloud.kubernetes.configuration.watcher.secretLabel` respectively. + +If a change is made to a ConfigMap or Secret with valid labels then Spring Cloud Kubernetes Configuration Watcher will take the name of the ConfigMap or Secret +and send a notification to the application with that name. + +### 13.3. HTTP Implementation + +The HTTP implementation is what is used by default. When this implementation is used Spring Cloud Kubernetes Configuration Watcher and a +change to a ConfigMap or Secret occurs then the HTTP implementation will use the Spring Cloud Kubernetes Discovery Client to fetch all +instances of the application which match the name of the ConfigMap or Secret and send an HTTP POST request to the application’s actuator`/refresh` endpoint. By default it will send the post request to `/actuator/refresh` using the port registered in the discovery client. + +#### 13.3.1. Non-Default Management Port and Actuator Path + +If the application is using a non-default actuator path and/or using a different port for the management endpoints, the Kubernetes service for the application +can add an annotation called `boot.spring.io/actuator` and set its value to the path and port used by the application. For example + +``` +apiVersion: v1 +kind: Service +metadata: + labels: + app: config-map-demo + name: config-map-demo + annotations: + boot.spring.io/actuator: http://:9090/myactuator/home +spec: + ports: + - name: http + port: 8080 + targetPort: 8080 + selector: + app: config-map-demo +``` + +Another way you can choose to configure the actuator path and/or management port is by setting`spring.cloud.kubernetes.configuration.watcher.actuatorPath` and `spring.cloud.kubernetes.configuration.watcher.actuatorPort`. + +### 13.4. Messaging Implementation + +The messaging implementation can be enabled by setting profile to either `bus-amqp` (RabbitMQ) or `bus-kafka` (Kafka) when the Spring Cloud Kubernetes Configuration Watcher +application is deployed to Kubernetes. + +### 13.5. Configuring RabbitMQ + +When the `bus-amqp` profile is enabled you will need to configure Spring RabbitMQ to point it to the location of the RabbitMQ +instance you would like to use as well as any credentials necessary to authenticate. This can be done +by setting the standard Spring RabbitMQ properties, for example + +``` +spring: + rabbitmq: + username: user + password: password + host: rabbitmq +``` + +### 13.6. Configuring Kafka + +When the `bus-kafka` profile is enabled you will need to configure Spring Kafka to point it to the location of the Kafka Broker +instance you would like to use. This can be done by setting the standard Spring Kafka properties, for example + +``` +spring: + kafka: + producer: + bootstrap-servers: localhost:9092 +``` + +## 14. Spring Cloud Kubernetes Config Server + +The Spring Cloud Kubernetes Config Server, is based on [Spring Cloud Config Server](https://spring.io/projects/spring-cloud-config) and adds an [environment repository](https://docs.spring.io/spring-cloud-config/docs/current/reference/html/#_environment_repository) for Kubernetes[Config Maps](https://kubernetes.io/docs/concepts/configuration/configmap/) and [Secrets](https://kubernetes.io/docs/concepts/configuration/secret/). + +This is component is completely optional. However, it allows you to continue to leverage configuration +you may have stored in existing environment repositories (Git, SVN, Vault, etc) with applications that you are running on Kubernetes. + +A default image is located on [Docker Hub](https://hub.docker.com/r/springcloud/spring-cloud-kubernetes-configserver) which will allow you to easily get a Config Server deployed on Kubernetes without building +the code and image yourself. However, if you need to customize the config server behavior you can easily build your own +image from the source code on GitHub and use that. + +### 14.1. Configuration + +#### 14.1.1. Enabling The Kubernetes Environment Repository + +To enable the Kubernetes environment repository the `kubernetes` profile must be included in the list of active profiles. +You may activate other profiles as well to use other environment repository implementations. + +#### 14.1.2. Config Map and Secret PropertySources + +By default, only Config Map data will be fetched. To enable Secrets as well you will need to set `spring.cloud.kubernetes.secrets.enableApi=true`. +You can disable the Config Map `PropertySource` by setting `spring.cloud.kubernetes.config.enableApi=false`. + +#### 14.1.3. Fetching Config Map and Secret Data From Additional Namespaces + +By default, the Kubernetes environment repository will only fetch Config Map and Secrets from the namespace in which it is deployed. +If you want to include data from other namespaces you can set `spring.cloud.kubernetes.configserver.config-map-namespaces` and/or `spring.cloud.kubernetes.configserver.secrets-namespaces` to a comma separated +list of namespace values. + +| |If you set `spring.cloud.kubernetes.configserver.config-map-namespaces` and/or `spring.cloud.kubernetes.configserver.secrets-namespaces`you will need to include the namespace in which the Config Server is deployed in order to continue to fetch Config Map and Secret data from that namespace.| +|---|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +#### 14.1.4. Kubernetes Access Controls + +The Kubernetes Config Server uses the Kubernetes API server to fetch Config Map and Secret data. In order for it to do that +it needs ability to `get` and `list` Config Map and Secrets (depending on what you enable/disable). + +### 14.2. Deployment Yaml + +Below is a sample deployment, service and permissions configuration you can use to deploy a basic Config Server to Kubernetes. + +``` +--- +apiVersion: v1 +kind: List +items: + - apiVersion: v1 + kind: Service + metadata: + labels: + app: spring-cloud-kubernetes-configserver + name: spring-cloud-kubernetes-configserver + spec: + ports: + - name: http + port: 8888 + targetPort: 8888 + selector: + app: spring-cloud-kubernetes-configserver + type: ClusterIP + - apiVersion: v1 + kind: ServiceAccount + metadata: + labels: + app: spring-cloud-kubernetes-configserver + name: spring-cloud-kubernetes-configserver + - apiVersion: rbac.authorization.k8s.io/v1 + kind: RoleBinding + metadata: + labels: + app: spring-cloud-kubernetes-configserver + name: spring-cloud-kubernetes-configserver:view + roleRef: + kind: Role + apiGroup: rbac.authorization.k8s.io + name: namespace-reader + subjects: + - kind: ServiceAccount + name: spring-cloud-kubernetes-configserver + - apiVersion: rbac.authorization.k8s.io/v1 + kind: Role + metadata: + namespace: default + name: namespace-reader + rules: + - apiGroups: ["", "extensions", "apps"] + resources: ["configmaps", "secrets"] + verbs: ["get", "list"] + - apiVersion: apps/v1 + kind: Deployment + metadata: + name: spring-cloud-kubernetes-configserver-deployment + spec: + selector: + matchLabels: + app: spring-cloud-kubernetes-configserver + template: + metadata: + labels: + app: spring-cloud-kubernetes-configserver + spec: + serviceAccount: spring-cloud-kubernetes-configserver + containers: + - name: spring-cloud-kubernetes-configserver + image: springcloud/spring-cloud-kubernetes-configserver + imagePullPolicy: IfNotPresent + env: + - name: SPRING_PROFILES_INCLUDE + value: "kubernetes" + readinessProbe: + httpGet: + port: 8888 + path: /actuator/health/readiness + livenessProbe: + httpGet: + port: 8888 + path: /actuator/health/liveness + ports: + - containerPort: 8888 +``` + +## 15. Spring Cloud Kubernetes Discovery Server + +The Spring Cloud Kubernetes Discovery Server provides HTTP endpoints apps can use to gather information +about services available within a Kubernetes cluster. The Spring Cloud Kubernetes Discovery Server +can be used by apps using the `spring-cloud-starter-kubernetes-discoveryclient` to provide data to +the `DiscoveryClient` implementation provided by that starter. + +### 15.1. Permissions + +The Spring Cloud Discovery server uses +the Kubernetes API server to get data about Service and Endpoint resrouces so it needs list, watch, and +get permissions to use those endpoints. See the below sample Kubernetes deployment YAML for an +examlpe of how to configure the Service Account on Kubernetes. + +### 15.2. Endpoints + +There are three endpoints exposed by the server. + +#### 15.2.1. `/apps` + +A `GET` request sent to `/apps` will return a JSON array of available services. Each item contains +the name of the Kubernetes service and service instance information. Below is a sample response. + +``` +[ + { + "name":"spring-cloud-kubernetes-discoveryserver", + "serviceInstances":[ + { + "instanceId":"836a2f25-daee-4af2-a1be-aab9ce2b938f", + "serviceId":"spring-cloud-kubernetes-discoveryserver", + "host":"10.244.1.6", + "port":8761, + "uri":"http://10.244.1.6:8761", + "secure":false, + "metadata":{ + "app":"spring-cloud-kubernetes-discoveryserver", + "kubectl.kubernetes.io/last-applied-configuration":"{\"apiVersion\":\"v1\",\"kind\":\"Service\",\"metadata\":{\"annotations\":{},\"labels\":{\"app\":\"spring-cloud-kubernetes-discoveryserver\"},\"name\":\"spring-cloud-kubernetes-discoveryserver\",\"namespace\":\"default\"},\"spec\":{\"ports\":[{\"name\":\"http\",\"port\":80,\"targetPort\":8761}],\"selector\":{\"app\":\"spring-cloud-kubernetes-discoveryserver\"},\"type\":\"ClusterIP\"}}\n", + "http":"8761" + }, + "namespace":"default", + "scheme":"http" + } + ] + }, + { + "name":"kubernetes", + "serviceInstances":[ + { + "instanceId":"1234", + "serviceId":"kubernetes", + "host":"172.18.0.3", + "port":6443, + "uri":"http://172.18.0.3:6443", + "secure":false, + "metadata":{ + "provider":"kubernetes", + "component":"apiserver", + "https":"6443" + }, + "namespace":"default", + "scheme":"http" + } + ] + } +] +``` + +#### 15.2.2. `/app/{name}` + +A `GET` request to `/app/{name}` can be used to get instance data for all instances of a given +service. Below is a sample response when a `GET` request is made to `/app/kubernetes`. + +``` +[ + { + "instanceId":"1234", + "serviceId":"kubernetes", + "host":"172.18.0.3", + "port":6443, + "uri":"http://172.18.0.3:6443", + "secure":false, + "metadata":{ + "provider":"kubernetes", + "component":"apiserver", + "https":"6443" + }, + "namespace":"default", + "scheme":"http" + } +] +``` + +#### 15.2.3. `/app/{name}/{instanceid}` + +A `GET` request made to `/app/{name}/{instanceid}` will return the instance data for a specific +instance of a given service. Below is a sample response when a `GET` request is made to `/app/kubernetes/1234`. + +``` + { + "instanceId":"1234", + "serviceId":"kubernetes", + "host":"172.18.0.3", + "port":6443, + "uri":"http://172.18.0.3:6443", + "secure":false, + "metadata":{ + "provider":"kubernetes", + "component":"apiserver", + "https":"6443" + }, + "namespace":"default", + "scheme":"http" + } +``` + +### 15.3. Deployment YAML + +An image of the Spring Cloud Discovery Server is hosted on [Docker Hub](https://hub.docker.com/r/springcloud/spring-cloud-kubernetes-discoveryserver). + +Below is a sample deployment YAML you can use to deploy the Kubernetes Configuration Watcher to Kubernetes. + +``` +--- +apiVersion: v1 +kind: List +items: + - apiVersion: v1 + kind: Service + metadata: + labels: + app: spring-cloud-kubernetes-discoveryserver + name: spring-cloud-kubernetes-discoveryserver + spec: + ports: + - name: http + port: 80 + targetPort: 8761 + selector: + app: spring-cloud-kubernetes-discoveryserver + type: ClusterIP + - apiVersion: v1 + kind: ServiceAccount + metadata: + labels: + app: spring-cloud-kubernetes-discoveryserver + name: spring-cloud-kubernetes-discoveryserver + - apiVersion: rbac.authorization.k8s.io/v1 + kind: RoleBinding + metadata: + labels: + app: spring-cloud-kubernetes-discoveryserver + name: spring-cloud-kubernetes-discoveryserver:view + roleRef: + kind: Role + apiGroup: rbac.authorization.k8s.io + name: namespace-reader + subjects: + - kind: ServiceAccount + name: spring-cloud-kubernetes-discoveryserver + - apiVersion: rbac.authorization.k8s.io/v1 + kind: Role + metadata: + namespace: default + name: namespace-reader + rules: + - apiGroups: ["", "extensions", "apps"] + resources: ["services", "endpoints"] + verbs: ["get", "list", "watch"] + - apiVersion: apps/v1 + kind: Deployment + metadata: + name: spring-cloud-kubernetes-discoveryserver-deployment + spec: + selector: + matchLabels: + app: spring-cloud-kubernetes-discoveryserver + template: + metadata: + labels: + app: spring-cloud-kubernetes-discoveryserver + spec: + serviceAccount: spring-cloud-kubernetes-discoveryserver + containers: + - name: spring-cloud-kubernetes-discoveryserver + image: springcloud/spring-cloud-kubernetes-discoveryserver:2.1.0-SNAPSHOT + imagePullPolicy: IfNotPresent + readinessProbe: + httpGet: + port: 8761 + path: /actuator/health/readiness + livenessProbe: + httpGet: + port: 8761 + path: /actuator/health/liveness + ports: + - containerPort: 8761 +``` + +## 16. Examples + +Spring Cloud Kubernetes tries to make it transparent for your applications to consume Kubernetes Native Services by +following the Spring Cloud interfaces. + +In your applications, you need to add the `spring-cloud-kubernetes-discovery` dependency to your classpath and remove any other dependency that contains a `DiscoveryClient` implementation (that is, a Eureka discovery client). +The same applies for `PropertySourceLocator`, where you need to add to the classpath the `spring-cloud-kubernetes-config` and remove any other dependency that contains a `PropertySourceLocator` implementation (that is, a configuration server client). + +The following projects highlight the usage of these dependencies and demonstrate how you can use these libraries from any Spring Boot application: + +* [Spring Cloud Kubernetes Examples](https://github.com/spring-cloud/spring-cloud-kubernetes/tree/master/spring-cloud-kubernetes-examples): the ones located inside this repository. + +* Spring Cloud Kubernetes Full Example: Minions and Boss + + * [Minion](https://github.com/salaboy/spring-cloud-k8s-minion) + + * [Boss](https://github.com/salaboy/spring-cloud-k8s-boss) + +* Spring Cloud Kubernetes Full Example: [SpringOne Platform Tickets Service](https://github.com/salaboy/s1p_docs) + +* [Spring Cloud Gateway with Spring Cloud Kubernetes Discovery and Config](https://github.com/salaboy/s1p_gateway) + +* [Spring Boot Admin with Spring Cloud Kubernetes Discovery and Config](https://github.com/salaboy/showcase-admin-tool) + +## 17. Other Resources + +This section lists other resources, such as presentations (slides) and videos about Spring Cloud Kubernetes. + +* [S1P Spring Cloud on PKS](https://salaboy.com/2018/09/27/the-s1p-experience/) + +* [Spring Cloud, Docker, Kubernetes → London Java Community July 2018](https://salaboy.com/2018/07/18/ljc-july-18-spring-cloud-docker-k8s/) + +Please feel free to submit other resources through pull requests to [this repository](https://github.com/spring-cloud/spring-cloud-kubernetes). + +## 18. Configuration properties + +To see the list of all Kubernetes related configuration properties please check [the Appendix page](appendix.html). + +## 19. Building + +### 19.1. Basic Compile and Test + +To build the source you will need to install JDK 17. + +Spring Cloud uses Maven for most build-related activities, and you +should be able to get off the ground quite quickly by cloning the +project you are interested in and typing + +``` +$ ./mvnw install +``` + +| |You can also install Maven (\>=3.3.3) yourself and run the `mvn` command
in place of `./mvnw` in the examples below. If you do that you also
might need to add `-P spring` if your local Maven settings do not
contain repository declarations for spring pre-release artifacts.| +|---|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +| |Be aware that you might need to increase the amount of memory
available to Maven by setting a `MAVEN_OPTS` environment variable with
a value like `-Xmx512m -XX:MaxPermSize=128m`. We try to cover this in
the `.mvn` configuration, so if you find you have to do it to make a
build succeed, please raise a ticket to get the settings added to
source control.| +|---|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +The projects that require middleware (i.e. Redis) for testing generally +require that a local instance of [Docker]([www.docker.com/get-started](https://www.docker.com/get-started)) is installed and running. + +### 19.2. Documentation + +The spring-cloud-build module has a "docs" profile, and if you switch +that on it will try to build asciidoc sources from`src/main/asciidoc`. As part of that process it will look for a`README.adoc` and process it by loading all the includes, but not +parsing or rendering it, just copying it to `${main.basedir}`(defaults to `$/tmp/releaser-1645122597379-0/spring-cloud-kubernetes/docs`, i.e. the root of the project). If there are +any changes in the README it will then show up after a Maven build as +a modified file in the correct place. Just commit it and push the change. + +### 19.3. Working with the code + +If you don’t have an IDE preference we would recommend that you use[Spring Tools Suite](https://www.springsource.com/developer/sts) or[Eclipse](https://eclipse.org) when working with the code. We use the[m2eclipse](https://eclipse.org/m2e/) eclipse plugin for maven support. Other IDEs and tools +should also work without issue as long as they use Maven 3.3.3 or better. + +#### 19.3.1. Activate the Spring Maven profile + +Spring Cloud projects require the 'spring' Maven profile to be activated to resolve +the spring milestone and snapshot repositories. Use your preferred IDE to set this +profile to be active, or you may experience build errors. + +#### 19.3.2. Importing into eclipse with m2eclipse + +We recommend the [m2eclipse](https://eclipse.org/m2e/) eclipse plugin when working with +eclipse. If you don’t already have m2eclipse installed it is available from the "eclipse +marketplace". + +| |Older versions of m2e do not support Maven 3.3, so once the
projects are imported into Eclipse you will also need to tell
m2eclipse to use the right profile for the projects. If you
see many different errors related to the POMs in the projects, check
that you have an up to date installation. If you can’t upgrade m2e,
add the "spring" profile to your `settings.xml`. Alternatively you can
copy the repository settings from the "spring" profile of the parent
pom into your `settings.xml`.| +|---|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +#### 19.3.3. Importing into eclipse without m2eclipse + +If you prefer not to use m2eclipse you can generate eclipse project metadata using the +following command: + +``` +$ ./mvnw eclipse:eclipse +``` + +The generated eclipse projects can be imported by selecting `import existing projects`from the `file` menu. + +## 20. Contributing + +Spring Cloud is released under the non-restrictive Apache 2.0 license, +and follows a very standard Github development process, using Github +tracker for issues and merging pull requests into master. If you want +to contribute even something trivial please do not hesitate, but +follow the guidelines below. + +### 20.1. Sign the Contributor License Agreement + +Before we accept a non-trivial patch or pull request we will need you to sign the[Contributor License Agreement](https://cla.pivotal.io/sign/spring). +Signing the contributor’s agreement does not grant anyone commit rights to the main +repository, but it does mean that we can accept your contributions, and you will get an +author credit if we do. Active contributors might be asked to join the core team, and +given the ability to merge pull requests. + +### 20.2. Code of Conduct + +This project adheres to the Contributor Covenant [code of +conduct](https://github.com/spring-cloud/spring-cloud-build/blob/master/docs/src/main/asciidoc/code-of-conduct.adoc). By participating, you are expected to uphold this code. Please report +unacceptable behavior to [[email protected]](/cdn-cgi/l/email-protection#d4a7a4a6bdbab3f9b7bbb0b1f9bbb2f9b7bbbab0a1b7a094a4bda2bba0b5b8fabdbb). + +### 20.3. Code Conventions and Housekeeping + +None of these is essential for a pull request, but they will all help. They can also be +added after the original pull request but before a merge. + +* Use the Spring Framework code format conventions. If you use Eclipse + you can import formatter settings using the`eclipse-code-formatter.xml` file from the[Spring + Cloud Build](https://raw.githubusercontent.com/spring-cloud/spring-cloud-build/master/spring-cloud-dependencies-parent/eclipse-code-formatter.xml) project. If using IntelliJ, you can use the[Eclipse Code Formatter + Plugin](https://plugins.jetbrains.com/plugin/6546) to import the same file. + +* Make sure all new `.java` files to have a simple Javadoc class comment with at least an`@author` tag identifying you, and preferably at least a paragraph on what the class is + for. + +* Add the ASF license header comment to all new `.java` files (copy from existing files + in the project) + +* Add yourself as an `@author` to the .java files that you modify substantially (more + than cosmetic changes). + +* Add some Javadocs and, if you change the namespace, some XSD doc elements. + +* A few unit tests would help a lot as well — someone has to do it. + +* If no-one else is using your branch, please rebase it against the current master (or + other target branch in the main project). + +* When writing a commit message please follow [these conventions](https://tbaggery.com/2008/04/19/a-note-about-git-commit-messages.html), + if you are fixing an existing issue please add `Fixes gh-XXXX` at the end of the commit + message (where XXXX is the issue number). + +### 20.4. Checkstyle + +Spring Cloud Build comes with a set of checkstyle rules. You can find them in the `spring-cloud-build-tools` module. The most notable files under the module are: + +spring-cloud-build-tools/ + +``` +└── src +    ├── checkstyle +    │   └── checkstyle-suppressions.xml (3) +    └── main +    └── resources +    ├── checkstyle-header.txt (2) +    └── checkstyle.xml (1) +``` + +|**1**|Default Checkstyle rules | +|-----|-------------------------| +|**2**| File header setup | +|**3**|Default suppression rules| + +#### 20.4.1. Checkstyle configuration + +Checkstyle rules are **disabled by default**. To add checkstyle to your project just define the following properties and plugins. + +pom.xml + +``` + +true (1) + true + (2) + true + (3) + + + + + (4) + io.spring.javaformat + spring-javaformat-maven-plugin + + (5) + org.apache.maven.plugins + maven-checkstyle-plugin + + + + + + (5) + org.apache.maven.plugins + maven-checkstyle-plugin + + + + +``` + +|**1**| Fails the build upon Checkstyle errors | +|-----|--------------------------------------------------------------------------------------------------------------| +|**2**| Fails the build upon Checkstyle violations | +|**3**| Checkstyle analyzes also the test sources | +|**4**|Add the Spring Java Format plugin that will reformat your code to pass most of the Checkstyle formatting rules| +|**5**| Add checkstyle plugin to your build and reporting phases | + +If you need to suppress some rules (e.g. line length needs to be longer), then it’s enough for you to define a file under `${project.root}/src/checkstyle/checkstyle-suppressions.xml` with your suppressions. Example: + +projectRoot/src/checkstyle/checkstyle-suppresions.xml + +``` + + + + + + +``` + +It’s advisable to copy the `${spring-cloud-build.rootFolder}/.editorconfig` and `${spring-cloud-build.rootFolder}/.springformat` to your project. That way, some default formatting rules will be applied. You can do so by running this script: + +``` +$ curl https://raw.githubusercontent.com/spring-cloud/spring-cloud-build/master/.editorconfig -o .editorconfig +$ touch .springformat +``` + +### 20.5. IDE setup + +#### 20.5.1. Intellij IDEA + +In order to setup Intellij you should import our coding conventions, inspection profiles and set up the checkstyle plugin. +The following files can be found in the [Spring Cloud Build](https://github.com/spring-cloud/spring-cloud-build/tree/master/spring-cloud-build-tools) project. + +spring-cloud-build-tools/ + +``` +└── src +    ├── checkstyle +    │   └── checkstyle-suppressions.xml (3) +    └── main +    └── resources +    ├── checkstyle-header.txt (2) +    ├── checkstyle.xml (1) +    └── intellij +       ├── Intellij_Project_Defaults.xml (4) +       └── Intellij_Spring_Boot_Java_Conventions.xml (5) +``` + +|**1**| Default Checkstyle rules | +|-----|--------------------------------------------------------------------------| +|**2**| File header setup | +|**3**| Default suppression rules | +|**4**| Project defaults for Intellij that apply most of Checkstyle rules | +|**5**|Project style conventions for Intellij that apply most of Checkstyle rules| + +![Code style](https://raw.githubusercontent.com/spring-cloud/spring-cloud-build/master/docs/src/main/asciidoc/images/intellij-code-style.png) + +Figure 1. Code style + +Go to `File` → `Settings` → `Editor` → `Code style`. There click on the icon next to the `Scheme` section. There, click on the `Import Scheme` value and pick the `Intellij IDEA code style XML` option. Import the `spring-cloud-build-tools/src/main/resources/intellij/Intellij_Spring_Boot_Java_Conventions.xml` file. + +![Code style](https://raw.githubusercontent.com/spring-cloud/spring-cloud-build/master/docs/src/main/asciidoc/images/intellij-inspections.png) + +Figure 2. Inspection profiles + +Go to `File` → `Settings` → `Editor` → `Inspections`. There click on the icon next to the `Profile` section. There, click on the `Import Profile` and import the `spring-cloud-build-tools/src/main/resources/intellij/Intellij_Project_Defaults.xml` file. + +Checkstyle + +To have Intellij work with Checkstyle, you have to install the `Checkstyle` plugin. It’s advisable to also install the `Assertions2Assertj` to automatically convert the JUnit assertions + +![Checkstyle](https://raw.githubusercontent.com/spring-cloud/spring-cloud-build/master/docs/src/main/asciidoc/images/intellij-checkstyle.png) + +Go to `File` → `Settings` → `Other settings` → `Checkstyle`. There click on the `+` icon in the `Configuration file` section. There, you’ll have to define where the checkstyle rules should be picked from. In the image above, we’ve picked the rules from the cloned Spring Cloud Build repository. However, you can point to the Spring Cloud Build’s GitHub repository (e.g. for the `checkstyle.xml` : `[raw.githubusercontent.com/spring-cloud/spring-cloud-build/master/spring-cloud-build-tools/src/main/resources/checkstyle.xml](https://raw.githubusercontent.com/spring-cloud/spring-cloud-build/master/spring-cloud-build-tools/src/main/resources/checkstyle.xml)`). We need to provide the following variables: + +* `checkstyle.header.file` - please point it to the Spring Cloud Build’s, `spring-cloud-build-tools/src/main/resources/checkstyle-header.txt` file either in your cloned repo or via the `[raw.githubusercontent.com/spring-cloud/spring-cloud-build/master/spring-cloud-build-tools/src/main/resources/checkstyle-header.txt](https://raw.githubusercontent.com/spring-cloud/spring-cloud-build/master/spring-cloud-build-tools/src/main/resources/checkstyle-header.txt)` URL. + +* `checkstyle.suppressions.file` - default suppressions. Please point it to the Spring Cloud Build’s, `spring-cloud-build-tools/src/checkstyle/checkstyle-suppressions.xml` file either in your cloned repo or via the `[raw.githubusercontent.com/spring-cloud/spring-cloud-build/master/spring-cloud-build-tools/src/checkstyle/checkstyle-suppressions.xml](https://raw.githubusercontent.com/spring-cloud/spring-cloud-build/master/spring-cloud-build-tools/src/checkstyle/checkstyle-suppressions.xml)` URL. + +* `checkstyle.additional.suppressions.file` - this variable corresponds to suppressions in your local project. E.g. you’re working on `spring-cloud-contract`. Then point to the `project-root/src/checkstyle/checkstyle-suppressions.xml` folder. Example for `spring-cloud-contract` would be: `/home/username/spring-cloud-contract/src/checkstyle/checkstyle-suppressions.xml`. + +| |Remember to set the `Scan Scope` to `All sources` since we apply checkstyle rules for production and test sources.| +|---|------------------------------------------------------------------------------------------------------------------| + +### 20.6. Duplicate Finder + +Spring Cloud Build brings along the `basepom:duplicate-finder-maven-plugin`, that enables flagging duplicate and conflicting classes and resources on the java classpath. + +#### 20.6.1. Duplicate Finder configuration + +Duplicate finder is **enabled by default** and will run in the `verify` phase of your Maven build, but it will only take effect in your project if you add the `duplicate-finder-maven-plugin` to the `build` section of the projecst’s `pom.xml`. + +pom.xml + +``` + + + + org.basepom.maven + duplicate-finder-maven-plugin + + + +``` + +For other properties, we have set defaults as listed in the [plugin documentation](https://github.com/basepom/duplicate-finder-maven-plugin/wiki). + +You can easily override them but setting the value of the selected property prefixed with `duplicate-finder-maven-plugin`. For example, set `duplicate-finder-maven-plugin.skip` to `true` in order to skip duplicates check in your build. + +If you need to add `ignoredClassPatterns` or `ignoredResourcePatterns` to your setup, make sure to add them in the plugin configuration section of your project: + +``` + + + + org.basepom.maven + duplicate-finder-maven-plugin + + + org.joda.time.base.BaseDateTime + .*module-info + + + changelog.txt + + + + + +``` + diff --git a/docs/en/spring-cloud/spring-cloud-netflix.md b/docs/en/spring-cloud/spring-cloud-netflix.md new file mode 100644 index 0000000000000000000000000000000000000000..6efa497eacb880fb8e399f4a7cb494e310f20464 --- /dev/null +++ b/docs/en/spring-cloud/spring-cloud-netflix.md @@ -0,0 +1,544 @@ +# Spring Cloud Netflix + +## 1. Service Discovery: Eureka Clients + +Service Discovery is one of the key tenets of a microservice-based architecture. +Trying to hand-configure each client or some form of convention can be difficult to do and can be brittle. +Eureka is the Netflix Service Discovery Server and Client. +The server can be configured and deployed to be highly available, with each server replicating state about the registered services to the others. + +### 1.1. How to Include Eureka Client + +To include the Eureka Client in your project, use the starter with a group ID of `org.springframework.cloud` and an artifact ID of `spring-cloud-starter-netflix-eureka-client`. +See the [Spring Cloud Project page](https://projects.spring.io/spring-cloud/) for details on setting up your build system with the current Spring Cloud Release Train. + +### 1.2. Registering with Eureka + +When a client registers with Eureka, it provides meta-data about itself — such as host, port, health indicator URL, home page, and other details. +Eureka receives heartbeat messages from each instance belonging to a service. +If the heartbeat fails over a configurable timetable, the instance is normally removed from the registry. + +The following example shows a minimal Eureka client application: + +``` +@SpringBootApplication +@RestController +public class Application { + + @RequestMapping("/") + public String home() { + return "Hello world"; + } + + public static void main(String[] args) { + new SpringApplicationBuilder(Application.class).web(true).run(args); + } + +} +``` + +Note that the preceding example shows a normal [Spring Boot](https://projects.spring.io/spring-boot/) application. +By having `spring-cloud-starter-netflix-eureka-client` on the classpath, your application automatically registers with the Eureka Server. Configuration is required to locate the Eureka server, as shown in the following example: + +application.yml + +``` +eureka: + client: + serviceUrl: + defaultZone: http://localhost:8761/eureka/ +``` + +In the preceding example, `defaultZone` is a magic string fallback value that provides the service URL for any client that does not express a preference (in other words, it is a useful default). + +| |The `defaultZone` property is case sensitive and requires camel case because the `serviceUrl` property is a `Map`. Therefore, the `defaultZone` property does not follow the normal Spring Boot snake-case convention of `default-zone`.| +|---|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +The default application name (that is, the service ID), virtual host, and non-secure port (taken from the `Environment`) are `${spring.application.name}`, `${spring.application.name}` and `${server.port}`, respectively. + +Having `spring-cloud-starter-netflix-eureka-client` on the classpath makes the app into both a Eureka “instance” (that is, it registers itself) and a “client” (it can query the registry to locate other services). +The instance behaviour is driven by `eureka.instance.*` configuration keys, but the defaults are fine if you ensure that your application has a value for `spring.application.name` (this is the default for the Eureka service ID or VIP). + +See [EurekaInstanceConfigBean](https://github.com/spring-cloud/spring-cloud-netflix/tree/main/spring-cloud-netflix-eureka-client/src/main/java/org/springframework/cloud/netflix/eureka/EurekaInstanceConfigBean.java) and [EurekaClientConfigBean](https://github.com/spring-cloud/spring-cloud-netflix/tree/main/spring-cloud-netflix-eureka-client/src/main/java/org/springframework/cloud/netflix/eureka/EurekaClientConfigBean.java) for more details on the configurable options. + +To disable the Eureka Discovery Client, you can set `eureka.client.enabled` to `false`. Eureka Discovery Client will also be disabled when `spring.cloud.discovery.enabled` is set to `false`. + +### 1.3. Authenticating with the Eureka Server + +HTTP basic authentication is automatically added to your eureka client if one of the `eureka.client.serviceUrl.defaultZone` URLs has credentials embedded in it (curl style, as follows: `[user:[email protected]:8761/eureka](https://user:password@localhost:8761/eureka)`). +For more complex needs, you can create a `@Bean` of type `DiscoveryClientOptionalArgs` and inject `ClientFilter` instances into it, all of which is applied to the calls from the client to the server. + +When Eureka server requires client side certificate for authentication, the client side certificate and trust store can be configured via properties, as shown in following example: + +application.yml + +``` +eureka: + client: + tls: + enabled: true + key-store: + key-store-type: PKCS12 + key-store-password: + key-password: + trust-store: + trust-store-type: PKCS12 + trust-store-password: +``` + +The `eureka.client.tls.enabled` needs to be true to enable Eureka client side TLS. When `eureka.client.tls.trust-store` is omitted, a JVM default trust store is used. The default value for `eureka.client.tls.key-store-type` and `eureka.client.tls.trust-store-type` is PKCS12. When password properties are omitted, empty password is assumed. + +| |Because of a limitation in Eureka, it is not possible to support per-server basic auth credentials, so only the first set that are found is used.| +|---|-------------------------------------------------------------------------------------------------------------------------------------------------| + +If you want to customize the RestTemplate used by the Eureka HTTP Client you may want to create a bean of `EurekaClientHttpRequestFactorySupplier` and provide your own logic for generating a `ClientHttpRequestFactory` instance. + +### 1.4. Status Page and Health Indicator + +The status page and health indicators for a Eureka instance default to `/info` and `/health` respectively, which are the default locations of useful endpoints in a Spring Boot Actuator application. +You need to change these, even for an Actuator application if you use a non-default context path or servlet path (such as `server.servletPath=/custom`). The following example shows the default values for the two settings: + +application.yml + +``` +eureka: + instance: + statusPageUrlPath: ${server.servletPath}/info + healthCheckUrlPath: ${server.servletPath}/health +``` + +These links show up in the metadata that is consumed by clients and are used in some scenarios to decide whether to send requests to your application, so it is helpful if they are accurate. + +| |In Dalston it was also required to set the status and health check URLs when changing
that management context path. This requirement was removed beginning in Edgware.| +|---|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +### 1.5. Registering a Secure Application + +If your app wants to be contacted over HTTPS, you can set two flags in the `EurekaInstanceConfigBean`: + +* `eureka.instance.[nonSecurePortEnabled]=[false]` + +* `eureka.instance.[securePortEnabled]=[true]` + +Doing so makes Eureka publish instance information that shows an explicit preference for secure communication. +The Spring Cloud `DiscoveryClient` always returns a URI starting with `https` for a service configured this way. +Similarly, when a service is configured this way, the Eureka (native) instance information has a secure health check URL. + +Because of the way Eureka works internally, it still publishes a non-secure URL for the status and home pages unless you also override those explicitly. +You can use placeholders to configure the eureka instance URLs, as shown in the following example: + +application.yml + +``` +eureka: + instance: + statusPageUrl: https://${eureka.hostname}/info + healthCheckUrl: https://${eureka.hostname}/health + homePageUrl: https://${eureka.hostname}/ +``` + +(Note that `${eureka.hostname}` is a native placeholder only available +in later versions of Eureka. You could achieve the same thing with +Spring placeholders as well — for example, by using `${eureka.instance.hostName}`.) + +| |If your application runs behind a proxy, and the SSL termination is in the proxy (for example, if you run in Cloud Foundry or other platforms as a service), then you need to ensure that the proxy “forwarded” headers are intercepted and handled by the application.
If the Tomcat container embedded in a Spring Boot application has explicit configuration for the 'X-Forwarded-\\\*` headers, this happens automatically.
The links rendered by your app to itself being wrong (the wrong host, port, or protocol) is a sign that you got this configuration wrong.| +|---|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +### 1.6. Eureka’s Health Checks + +By default, Eureka uses the client heartbeat to determine if a client is up. +Unless specified otherwise, the Discovery Client does not propagate the current health check status of the application, per the Spring Boot Actuator. +Consequently, after successful registration, Eureka always announces that the application is in 'UP' state. This behavior can be altered by enabling Eureka health checks, which results in propagating application status to Eureka. +As a consequence, every other application does not send traffic to applications in states other then 'UP'. +The following example shows how to enable health checks for the client: + +application.yml + +``` +eureka: + client: + healthcheck: + enabled: true +``` + +| |`eureka.client.healthcheck.enabled=true` should only be set in `application.yml`. Setting the value in `bootstrap.yml` causes undesirable side effects, such as registering in Eureka with an `UNKNOWN` status.| +|---|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +If you require more control over the health checks, consider implementing your own `com.netflix.appinfo.HealthCheckHandler`. + +### 1.7. Eureka Metadata for Instances and Clients + +It is worth spending a bit of time understanding how the Eureka metadata works, so you can use it in a way that makes sense in your platform. +There is standard metadata for information such as hostname, IP address, port numbers, the status page, and health check. +These are published in the service registry and used by clients to contact the services in a straightforward way. +Additional metadata can be added to the instance registration in the `eureka.instance.metadataMap`, and this metadata is accessible in the remote clients. +In general, additional metadata does not change the behavior of the client, unless the client is made aware of the meaning of the metadata. +There are a couple of special cases, described later in this document, where Spring Cloud already assigns meaning to the metadata map. + +#### 1.7.1. Using Eureka on Cloud Foundry + +Cloud Foundry has a global router so that all instances of the same app have the same hostname (other PaaS solutions with a similar architecture have the same arrangement). +This is not necessarily a barrier to using Eureka. +However, if you use the router (recommended or even mandatory, depending on the way your platform was set up), you need to explicitly set the hostname and port numbers (secure or non-secure) so that they use the router. +You might also want to use instance metadata so that you can distinguish between the instances on the client (for example, in a custom load balancer). +By default, the `eureka.instance.instanceId` is `vcap.application.instance_id`, as shown in the following example: + +application.yml + +``` +eureka: + instance: + hostname: ${vcap.application.uris[0]} + nonSecurePort: 80 +``` + +Depending on the way the security rules are set up in your Cloud Foundry instance, you might be able to register and use the IP address of the host VM for direct service-to-service calls. +This feature is not yet available on Pivotal Web Services ([PWS](https://run.pivotal.io)). + +#### 1.7.2. Using Eureka on AWS + +If the application is planned to be deployed to an AWS cloud, the Eureka instance must be configured to be AWS-aware. You can do so by customizing the [EurekaInstanceConfigBean](https://github.com/spring-cloud/spring-cloud-netflix/tree/main/spring-cloud-netflix-eureka-client/src/main/java/org/springframework/cloud/netflix/eureka/EurekaInstanceConfigBean.java) as follows: + +``` +@Bean +@Profile("!default") +public EurekaInstanceConfigBean eurekaInstanceConfig(InetUtils inetUtils) { + EurekaInstanceConfigBean bean = new EurekaInstanceConfigBean(inetUtils); + AmazonInfo info = AmazonInfo.Builder.newBuilder().autoBuild("eureka"); + bean.setDataCenterInfo(info); + return bean; +} +``` + +#### 1.7.3. Changing the Eureka Instance ID + +A vanilla Netflix Eureka instance is registered with an ID that is equal to its host name (that is, there is only one service per host). +Spring Cloud Eureka provides a sensible default, which is defined as follows: + +`${spring.cloud.client.hostname}:${spring.application.name}:${spring.application.instance_id:${server.port}}` + +An example is `myhost:myappname:8080`. + +By using Spring Cloud, you can override this value by providing a unique identifier in `eureka.instance.instanceId`, as shown in the following example: + +application.yml + +``` +eureka: + instance: + instanceId: ${spring.application.name}:${vcap.application.instance_id:${spring.application.instance_id:${random.value}}} +``` + +With the metadata shown in the preceding example and multiple service instances deployed on localhost, the random value is inserted there to make the instance unique. +In Cloud Foundry, the `vcap.application.instance_id` is populated automatically in a Spring Boot application, so the random value is not needed. + +### 1.8. Using the EurekaClient + +Once you have an application that is a discovery client, you can use it to discover service instances from the [Eureka Server](#spring-cloud-eureka-server). +One way to do so is to use the native `com.netflix.discovery.EurekaClient` (as opposed to the Spring Cloud `DiscoveryClient`), as shown in the following example: + +``` +@Autowired +private EurekaClient discoveryClient; + +public String serviceUrl() { + InstanceInfo instance = discoveryClient.getNextServerFromEureka("STORES", false); + return instance.getHomePageUrl(); +} +``` + +| |Do not use the `EurekaClient` in a `@PostConstruct` method or in a `@Scheduled` method (or anywhere where the `ApplicationContext` might not be started yet).
It is initialized in a `SmartLifecycle` (with `phase=0`), so the earliest you can rely on it being available is in another `SmartLifecycle` with a higher phase.| +|---|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +#### 1.8.1. EurekaClient with Jersey + +By default, EurekaClient uses Spring’s `RestTemplate` for HTTP communication. +If you wish to use Jersey instead, you need to add the Jersey dependencies to your classpath. +The following example shows the dependencies you need to add: + +``` + + com.sun.jersey + jersey-client + + + com.sun.jersey + jersey-core + + + com.sun.jersey.contribs + jersey-apache-client4 + +``` + +### 1.9. Alternatives to the Native Netflix EurekaClient + +You need not use the raw Netflix `EurekaClient`. +Also, it is usually more convenient to use it behind a wrapper of some sort. +Spring Cloud has support for [Feign](#spring-cloud-feign) (a REST client builder) and [Spring `RestTemplate`](#spring-cloud-ribbon) through the logical Eureka service identifiers (VIPs) instead of physical URLs. + +You can also use the `org.springframework.cloud.client.discovery.DiscoveryClient`, which provides a simple API (not specific to Netflix) for discovery clients, as shown in the following example: + +``` +@Autowired +private DiscoveryClient discoveryClient; + +public String serviceUrl() { + List list = discoveryClient.getInstances("STORES"); + if (list != null && list.size() > 0 ) { + return list.get(0).getUri(); + } + return null; +} +``` + +### 1.10. Why Is It so Slow to Register a Service? + +Being an instance also involves a periodic heartbeat to the registry +(through the client’s `serviceUrl`) with a default duration of 30 seconds. +A service is not available for discovery by clients until the instance, the server, and the client all have the same metadata in their local +cache (so it could take 3 heartbeats). +You can change the period by setting `eureka.instance.leaseRenewalIntervalInSeconds`. +Setting it to a value of less than 30 speeds up the process of getting clients connected to other services. +In production, it is probably better to stick with the default, because of internal computations in the server that make assumptions about the lease renewal period. + +### 1.11. Zones + +If you have deployed Eureka clients to multiple zones, you may prefer that those clients use services within the same zone before trying services in another zone. +To set that up, you need to configure your Eureka clients correctly. + +First, you need to make sure you have Eureka servers deployed to each zone and that +they are peers of each other. +See the section on [zones and regions](#spring-cloud-eureka-server-zones-and-regions)for more information. + +Next, you need to tell Eureka which zone your service is in. +You can do so by using the `metadataMap` property. +For example, if `service 1` is deployed to both `zone 1` and `zone 2`, you need to set the following Eureka properties in `service 1`: + +**Service 1 in Zone 1** + +``` +eureka.instance.metadataMap.zone = zone1 +eureka.client.preferSameZoneEureka = true +``` + +**Service 1 in Zone 2** + +``` +eureka.instance.metadataMap.zone = zone2 +eureka.client.preferSameZoneEureka = true +``` + +### 1.12. Refreshing Eureka Clients + +By default, the `EurekaClient` bean is refreshable, meaning the Eureka client properties can be changed and refreshed. +When a refresh occurs clients will be unregistered from the Eureka server and there might be a brief moment of time +where all instance of a given service are not available. One way to eliminate this from happening is to disable +the ability to refresh Eureka clients. To do this set `eureka.client.refresh.enable=false`. + +### 1.13. Using Eureka with Spring Cloud LoadBalancer + +We offer support for the Spring Cloud LoadBalancer `ZonePreferenceServiceInstanceListSupplier`. +The `zone` value from the Eureka instance metadata (`eureka.instance.metadataMap.zone`) is used for setting the +value of `spring-cloud-loadbalancer-zone` property that is used to filter service instances by zone. + +If that is missing and if the `spring.cloud.loadbalancer.eureka.approximateZoneFromHostname` flag is set to `true`, +it can use the domain name from the server hostname as a proxy for the zone. + +If there is no other source of zone data, then a guess is made, based on the client configuration (as opposed to the instance configuration). +We take `eureka.client.availabilityZones`, which is a map from region name to a list of zones, and pull out the first zone for the instance’s own region (that is, the `eureka.client.region`, which defaults to "us-east-1", for compatibility with native Netflix). + +## 2. Service Discovery: Eureka Server + +This section describes how to set up a Eureka server. + +### 2.1. How to Include Eureka Server + +To include Eureka Server in your project, use the starter with a group ID of `org.springframework.cloud` and an artifact ID of `spring-cloud-starter-netflix-eureka-server`. +See the [Spring Cloud Project page](https://projects.spring.io/spring-cloud/) for details on setting up your build system with the current Spring Cloud Release Train. + +| |If your project already uses Thymeleaf as its template engine, the Freemarker templates of the Eureka server may not be loaded correctly. In this case it is necessary to configure the template loader manually:| +|---|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +application.yml + +``` +spring: + freemarker: + template-loader-path: classpath:/templates/ + prefer-file-system-access: false +``` + +### 2.2. How to Run a Eureka Server + +The following example shows a minimal Eureka server: + +``` +@SpringBootApplication +@EnableEurekaServer +public class Application { + + public static void main(String[] args) { + new SpringApplicationBuilder(Application.class).web(true).run(args); + } + +} +``` + +The server has a home page with a UI and HTTP API endpoints for the normal Eureka functionality under `/eureka/*`. + +The following links have some Eureka background reading: [flux capacitor](https://github.com/cfregly/fluxcapacitor/wiki/NetflixOSS-FAQ#eureka-service-discovery-load-balancer) and [google group discussion](https://groups.google.com/forum/?fromgroups#!topic/eureka_netflix/g3p2r7gHnN0). + +| |Due to Gradle’s dependency resolution rules and the lack of a parent bom feature, depending on `spring-cloud-starter-netflix-eureka-server` can cause failures on application startup.
To remedy this issue, add the Spring Boot Gradle plugin and import the Spring cloud starter parent bom as follows:

build.gradle

```
buildscript {
dependencies {
classpath("org.springframework.boot:spring-boot-gradle-plugin:{spring-boot-docs-version}")
}
}

apply plugin: "spring-boot"

dependencyManagement {
imports {
mavenBom "org.springframework.cloud:spring-cloud-dependencies:{spring-cloud-version}"
}
}
```| +|---|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +### 2.3. High Availability, Zones and Regions + +The Eureka server does not have a back end store, but the service instances in the registry all have to send heartbeats to keep their registrations up to date (so this can be done in memory). +Clients also have an in-memory cache of Eureka registrations (so they do not have to go to the registry for every request to a service). + +By default, every Eureka server is also a Eureka client and requires (at least one) service URL to locate a peer. +If you do not provide it, the service runs and works, but it fills your logs with a lot of noise about not being able to register with the peer. + +### 2.4. Standalone Mode + +The combination of the two caches (client and server) and the heartbeats make a standalone Eureka server fairly resilient to failure, as long as there is some sort of monitor or elastic runtime (such as Cloud Foundry) keeping it alive. +In standalone mode, you might prefer to switch off the client side behavior so that it does not keep trying and failing to reach its peers. +The following example shows how to switch off the client-side behavior: + +application.yml (Standalone Eureka Server) + +``` +server: + port: 8761 + +eureka: + instance: + hostname: localhost + client: + registerWithEureka: false + fetchRegistry: false + serviceUrl: + defaultZone: http://${eureka.instance.hostname}:${server.port}/eureka/ +``` + +Notice that the `serviceUrl` is pointing to the same host as the local instance. + +### 2.5. Peer Awareness + +Eureka can be made even more resilient and available by running multiple instances and asking them to register with each other. +In fact, this is the default behavior, so all you need to do to make it work is add a valid `serviceUrl` to a peer, as shown in the following example: + +application.yml (Two Peer Aware Eureka Servers) + +``` +--- +spring: + profiles: peer1 +eureka: + instance: + hostname: peer1 + client: + serviceUrl: + defaultZone: https://peer2/eureka/ + +--- +spring: + profiles: peer2 +eureka: + instance: + hostname: peer2 + client: + serviceUrl: + defaultZone: https://peer1/eureka/ +``` + +In the preceding example, we have a YAML file that can be used to run the same server on two hosts (`peer1` and `peer2`) by running it in different Spring profiles. +You could use this configuration to test the peer awareness on a single host (there is not much value in doing that in production) by manipulating `/etc/hosts` to resolve the host names. +In fact, the `eureka.instance.hostname` is not needed if you are running on a machine that knows its own hostname (by default, it is looked up by using `java.net.InetAddress`). + +You can add multiple peers to a system, and, as long as they are all connected to each other by at least one edge, they synchronize +the registrations amongst themselves. +If the peers are physically separated (inside a data center or between multiple data centers), then the system can, in principle, survive “split-brain” type failures. +You can add multiple peers to a system, and as long as they are all +directly connected to each other, they will synchronize +the registrations amongst themselves. + +application.yml (Three Peer Aware Eureka Servers) + +``` +eureka: + client: + serviceUrl: + defaultZone: https://peer1/eureka/,http://peer2/eureka/,http://peer3/eureka/ + +--- +spring: + profiles: peer1 +eureka: + instance: + hostname: peer1 + +--- +spring: + profiles: peer2 +eureka: + instance: + hostname: peer2 + +--- +spring: + profiles: peer3 +eureka: + instance: + hostname: peer3 +``` + +### 2.6. When to Prefer IP Address + +In some cases, it is preferable for Eureka to advertise the IP addresses of services rather than the hostname. +Set `eureka.instance.preferIpAddress` to `true` and, when the application registers with eureka, it uses its IP address rather than its hostname. + +| |If the hostname cannot be determined by Java, then the IP address is sent to Eureka.
Only explict way of setting the hostname is by setting `eureka.instance.hostname` property.
You can set your hostname at the run-time by using an environment variable — for example, `eureka.instance.hostname=${HOST_NAME}`.| +|---|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +### 2.7. Securing The Eureka Server + +You can secure your Eureka server simply by adding Spring Security to your +server’s classpath via `spring-boot-starter-security`. By default when Spring Security is on the classpath it will require that +a valid CSRF token be sent with every request to the app. Eureka clients will not generally possess a valid +cross site request forgery (CSRF) token you will need to disable this requirement for the `/eureka/**` endpoints. +For example: + +``` +@EnableWebSecurity +class WebSecurityConfig extends WebSecurityConfigurerAdapter { + + @Override + protected void configure(HttpSecurity http) throws Exception { + http.csrf().ignoringAntMatchers("/eureka/**"); + super.configure(http); + } +} +``` + +For more information on CSRF see the [Spring Security documentation](https://docs.spring.io/spring-security/site/docs/current/reference/htmlsingle/#csrf). + +A demo Eureka Server can be found in the Spring Cloud Samples [repo](https://github.com/spring-cloud-samples/eureka/tree/Eureka-With-Security). + +### 2.8. JDK 11 Support + +The JAXB modules which the Eureka server depends upon were removed in JDK 11. If you intend to use JDK 11 +when running a Eureka server you must include these dependencies in your POM or Gradle file. + +``` + + org.glassfish.jaxb + jaxb-runtime + +``` + +## 3. Configuration properties + +To see the list of all Spring Cloud Netflix related configuration properties please check [the Appendix page](appendix.html). + diff --git a/docs/en/spring-cloud/spring-cloud-openfeign.md b/docs/en/spring-cloud/spring-cloud-openfeign.md new file mode 100644 index 0000000000000000000000000000000000000000..a6c34acaef631f4aab31a622cf4d7b6323f34243 --- /dev/null +++ b/docs/en/spring-cloud/spring-cloud-openfeign.md @@ -0,0 +1,773 @@ +# Spring Cloud OpenFeign + +## 1. Declarative REST Client: Feign + +[Feign](https://github.com/OpenFeign/feign) is a declarative web service client. +It makes writing web service clients easier. +To use Feign create an interface and annotate it. +It has pluggable annotation support including Feign annotations and JAX-RS annotations. +Feign also supports pluggable encoders and decoders. +Spring Cloud adds support for Spring MVC annotations and for using the same `HttpMessageConverters` used by default in Spring Web. +Spring Cloud integrates Eureka, Spring Cloud CircuitBreaker, as well as Spring Cloud LoadBalancer to provide a load-balanced http client when using Feign. + +### 1.1. How to Include Feign + +To include Feign in your project use the starter with group `org.springframework.cloud`and artifact id `spring-cloud-starter-openfeign`. See the [Spring Cloud Project page](https://projects.spring.io/spring-cloud/)for details on setting up your build system with the current Spring Cloud Release Train. + +Example spring boot app + +``` +@SpringBootApplication +@EnableFeignClients +public class Application { + + public static void main(String[] args) { + SpringApplication.run(Application.class, args); + } + +} +``` + +StoreClient.java + +``` +@FeignClient("stores") +public interface StoreClient { + @RequestMapping(method = RequestMethod.GET, value = "/stores") + List getStores(); + + @RequestMapping(method = RequestMethod.GET, value = "/stores") + Page getStores(Pageable pageable); + + @RequestMapping(method = RequestMethod.POST, value = "/stores/{storeId}", consumes = "application/json") + Store update(@PathVariable("storeId") Long storeId, Store store); + + @RequestMapping(method = RequestMethod.DELETE, value = "/stores/{storeId:\\d+}") + void delete(@PathVariable Long storeId); +} +``` + +In the `@FeignClient` annotation the String value ("stores" above) is an arbitrary client name, which is used to create a [Spring Cloud LoadBalancer client](https://github.com/spring-cloud/spring-cloud-commons/blob/main/spring-cloud-loadbalancer/src/main/java/org/springframework/cloud/loadbalancer/blocking/client/BlockingLoadBalancerClient.java). +You can also specify a URL using the `url` attribute +(absolute value or just a hostname). The name of the bean in the +application context is the fully qualified name of the interface. +To specify your own alias value you can use the `qualifiers` value +of the `@FeignClient` annotation. + +The load-balancer client above will want to discover the physical addresses +for the "stores" service. If your application is a Eureka client then +it will resolve the service in the Eureka service registry. If you +don’t want to use Eureka, you can configure a list of servers +in your external configuration using [`SimpleDiscoveryClient`](https://docs.spring.io/spring-cloud-commons/docs/current/reference/html/#simplediscoveryclient). + +Spring Cloud OpenFeign supports all the features available for the blocking mode of Spring Cloud LoadBalancer. You can read more about them in the [project documentation](https://docs.spring.io/spring-cloud-commons/docs/current/reference/html/#spring-cloud-loadbalancer). + +| |To use `@EnableFeignClients` annotation on `@Configuration`-annotated-classes, make sure to specify where the clients are located, for example:`@EnableFeignClients(basePackages = "com.example.clients")`or list them explicitly:`@EnableFeignClients(clients = InventoryServiceFeignClient.class)`| +|---|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +### 1.2. Overriding Feign Defaults + +A central concept in Spring Cloud’s Feign support is that of the named client. Each feign client is part of an ensemble of components that work together to contact a remote server on demand, and the ensemble has a name that you give it as an application developer using the `@FeignClient` annotation. Spring Cloud creates a new ensemble as an`ApplicationContext` on demand for each named client using `FeignClientsConfiguration`. This contains (amongst other things) an `feign.Decoder`, a `feign.Encoder`, and a `feign.Contract`. +It is possible to override the name of that ensemble by using the `contextId`attribute of the `@FeignClient` annotation. + +Spring Cloud lets you take full control of the feign client by declaring additional configuration (on top of the `FeignClientsConfiguration`) using `@FeignClient`. Example: + +``` +@FeignClient(name = "stores", configuration = FooConfiguration.class) +public interface StoreClient { + //.. +} +``` + +In this case the client is composed from the components already in `FeignClientsConfiguration` together with any in `FooConfiguration` (where the latter will override the former). + +| |`FooConfiguration` does not need to be annotated with `@Configuration`. However, if it is, then take care to exclude it from any `@ComponentScan` that would otherwise include this configuration as it will become the default source for `feign.Decoder`, `feign.Encoder`, `feign.Contract`, etc., when specified. This can be avoided by putting it in a separate, non-overlapping package from any `@ComponentScan` or `@SpringBootApplication`, or it can be explicitly excluded in `@ComponentScan`.| +|---|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +| |Using `contextId` attribute of the `@FeignClient` annotation in addition to changing the name of
the `ApplicationContext` ensemble, it will override the alias of the client name
and it will be used as part of the name of the configuration bean created for that client.| +|---|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +| |Previously, using the `url` attribute, did not require the `name` attribute. Using `name` is now required.| +|---|----------------------------------------------------------------------------------------------------------| + +Placeholders are supported in the `name` and `url` attributes. + +``` +@FeignClient(name = "${feign.name}", url = "${feign.url}") +public interface StoreClient { + //.. +} +``` + +Spring Cloud OpenFeign provides the following beans by default for feign (`BeanType` beanName: `ClassName`): + +* `Decoder` feignDecoder: `ResponseEntityDecoder` (which wraps a `SpringDecoder`) + +* `Encoder` feignEncoder: `SpringEncoder` + +* `Logger` feignLogger: `Slf4jLogger` + +* `MicrometerCapability` micrometerCapability: If `feign-micrometer` is on the classpath and `MeterRegistry` is available + +* `CachingCapability` cachingCapability: If `@EnableCaching` annotation is used. Can be disabled via `feign.cache.enabled`. + +* `Contract` feignContract: `SpringMvcContract` + +* `Feign.Builder` feignBuilder: `FeignCircuitBreaker.Builder` + +* `Client` feignClient: If Spring Cloud LoadBalancer is on the classpath, `FeignBlockingLoadBalancerClient` is used. + If none of them is on the classpath, the default feign client is used. + +| |`spring-cloud-starter-openfeign` supports `spring-cloud-starter-loadbalancer`. However, as is an optional dependency, you need to make sure it been added to your project if you want to use it.| +|---|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +The OkHttpClient and ApacheHttpClient and ApacheHC5 feign clients can be used by setting `feign.okhttp.enabled` or `feign.httpclient.enabled` or `feign.httpclient.hc5.enabled` to `true`, respectively, and having them on the classpath. +You can customize the HTTP client used by providing a bean of either `org.apache.http.impl.client.CloseableHttpClient` when using Apache or `okhttp3.OkHttpClient` when using OK HTTP or `org.apache.hc.client5.http.impl.classic.CloseableHttpClient` when using Apache HC5. + +Spring Cloud OpenFeign *does not* provide the following beans by default for feign, but still looks up beans of these types from the application context to create the feign client: + +* `Logger.Level` + +* `Retryer` + +* `ErrorDecoder` + +* `Request.Options` + +* `Collection` + +* `SetterFactory` + +* `QueryMapEncoder` + +* `Capability` (`MicrometerCapability` and `CachingCapability` are provided by default) + +A bean of `Retryer.NEVER_RETRY` with the type `Retryer` is created by default, which will disable retrying. +Notice this retrying behavior is different from the Feign default one, where it will automatically retry IOExceptions, +treating them as transient network related exceptions, and any RetryableException thrown from an ErrorDecoder. + +Creating a bean of one of those type and placing it in a `@FeignClient` configuration (such as `FooConfiguration` above) allows you to override each one of the beans described. Example: + +``` +@Configuration +public class FooConfiguration { + @Bean + public Contract feignContract() { + return new feign.Contract.Default(); + } + + @Bean + public BasicAuthRequestInterceptor basicAuthRequestInterceptor() { + return new BasicAuthRequestInterceptor("user", "password"); + } +} +``` + +This replaces the `SpringMvcContract` with `feign.Contract.Default` and adds a `RequestInterceptor` to the collection of `RequestInterceptor`. + +`@FeignClient` also can be configured using configuration properties. + +application.yml + +``` +feign: + client: + config: + feignName: + connectTimeout: 5000 + readTimeout: 5000 + loggerLevel: full + errorDecoder: com.example.SimpleErrorDecoder + retryer: com.example.SimpleRetryer + defaultQueryParameters: + query: queryValue + defaultRequestHeaders: + header: headerValue + requestInterceptors: + - com.example.FooRequestInterceptor + - com.example.BarRequestInterceptor + decode404: false + encoder: com.example.SimpleEncoder + decoder: com.example.SimpleDecoder + contract: com.example.SimpleContract + capabilities: + - com.example.FooCapability + - com.example.BarCapability + queryMapEncoder: com.example.SimpleQueryMapEncoder + metrics.enabled: false +``` + +Default configurations can be specified in the `@EnableFeignClients` attribute `defaultConfiguration` in a similar manner as described above. The difference is that this configuration will apply to *all* feign clients. + +If you prefer using configuration properties to configured all `@FeignClient`, you can create configuration properties with `default` feign name. + +You can use `feign.client.config.feignName.defaultQueryParameters` and `feign.client.config.feignName.defaultRequestHeaders` to specify query parameters and headers that will be sent with every request of the client named `feignName`. + +application.yml + +``` +feign: + client: + config: + default: + connectTimeout: 5000 + readTimeout: 5000 + loggerLevel: basic +``` + +If we create both `@Configuration` bean and configuration properties, configuration properties will win. +It will override `@Configuration` values. But if you want to change the priority to `@Configuration`, +you can change `feign.client.default-to-properties` to `false`. + +If we want to create multiple feign clients with the same name or url +so that they would point to the same server but each with a different custom configuration then +we have to use `contextId` attribute of the `@FeignClient` in order to avoid name +collision of these configuration beans. + +``` +@FeignClient(contextId = "fooClient", name = "stores", configuration = FooConfiguration.class) +public interface FooClient { + //.. +} +``` + +``` +@FeignClient(contextId = "barClient", name = "stores", configuration = BarConfiguration.class) +public interface BarClient { + //.. +} +``` + +It is also possible to configure FeignClient not to inherit beans from the parent context. +You can do this by overriding the `inheritParentConfiguration()` in a `FeignClientConfigurer`bean to return `false`: + +``` +@Configuration +public class CustomConfiguration{ + +@Bean +public FeignClientConfigurer feignClientConfigurer() { + return new FeignClientConfigurer() { + + @Override + public boolean inheritParentConfiguration() { + return false; + } + }; + + } +} +``` + +| |By default, Feign clients do not encode slash `/` characters. You can change this behaviour, by setting the value of `feign.client.decodeSlash` to `false`.| +|---|-----------------------------------------------------------------------------------------------------------------------------------------------------------| + +#### 1.2.1. `SpringEncoder` configuration + +In the `SpringEncoder` that we provide, we set `null` charset for binary content types and `UTF-8` for all the other ones. + +You can modify this behaviour to derive the charset from the `Content-Type` header charset instead by setting the value of `feign.encoder.charset-from-content-type` to `true`. + +### 1.3. Timeout Handling + +We can configure timeouts on both the default and the named client. OpenFeign works with two timeout parameters: + +* `connectTimeout` prevents blocking the caller due to the long server processing time. + +* `readTimeout` is applied from the time of connection establishment and is triggered when returning the response takes too long. + +| |In case the server is not running or available a packet results in *connection refused*. The communication ends either with an error message or in a fallback. This can happen *before* the `connectTimeout` if it is set very low. The time taken to perform a lookup and to receive such a packet causes a significant part of this delay. It is subject to change based on the remote host that involves a DNS lookup.| +|---|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +### 1.4. Creating Feign Clients Manually + +In some cases it might be necessary to customize your Feign Clients in a way that is not +possible using the methods above. In this case you can create Clients using the[Feign Builder API](https://github.com/OpenFeign/feign/#basics). Below is an example +which creates two Feign Clients with the same interface but configures each one with +a separate request interceptor. + +``` +@Import(FeignClientsConfiguration.class) +class FooController { + + private FooClient fooClient; + + private FooClient adminClient; + + @Autowired + public FooController(Client client, Encoder encoder, Decoder decoder, Contract contract, MicrometerCapability micrometerCapability) { + this.fooClient = Feign.builder().client(client) + .encoder(encoder) + .decoder(decoder) + .contract(contract) + .addCapability(micrometerCapability) + .requestInterceptor(new BasicAuthRequestInterceptor("user", "user")) + .target(FooClient.class, "https://PROD-SVC"); + + this.adminClient = Feign.builder().client(client) + .encoder(encoder) + .decoder(decoder) + .contract(contract) + .addCapability(micrometerCapability) + .requestInterceptor(new BasicAuthRequestInterceptor("admin", "admin")) + .target(FooClient.class, "https://PROD-SVC"); + } +} +``` + +| |In the above example `FeignClientsConfiguration.class` is the default configuration
provided by Spring Cloud OpenFeign.| +|---|---------------------------------------------------------------------------------------------------------------------------| + +| |`PROD-SVC` is the name of the service the Clients will be making requests to.| +|---|-----------------------------------------------------------------------------| + +| |The Feign `Contract` object defines what annotations and values are valid on interfaces. The
autowired `Contract` bean provides supports for SpringMVC annotations, instead of
the default Feign native annotations.| +|---|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +You can also use the `Builder`to configure FeignClient not to inherit beans from the parent context. +You can do this by overriding calling `inheritParentContext(false)` on the `Builder`. + +### 1.5. Feign Spring Cloud CircuitBreaker Support + +If Spring Cloud CircuitBreaker is on the classpath and `feign.circuitbreaker.enabled=true`, Feign will wrap all methods with a circuit breaker. + +To disable Spring Cloud CircuitBreaker support on a per-client basis create a vanilla `Feign.Builder` with the "prototype" scope, e.g.: + +``` +@Configuration +public class FooConfiguration { + @Bean + @Scope("prototype") + public Feign.Builder feignBuilder() { + return Feign.builder(); + } +} +``` + +The circuit breaker name follows this pattern `#()`. When calling a `@FeignClient` with `FooClient` interface and the called interface method that has no parameters is `bar` then the circuit breaker name will be `FooClient#bar()`. + +| |As of 2020.0.2, the circuit breaker name pattern has changed from `_`.
Using `CircuitBreakerNameResolver` introduced in 2020.0.4, circuit breaker names can retain the old pattern.| +|---|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +Providing a bean of `CircuitBreakerNameResolver`, you can change the circuit breaker name pattern. + +``` +@Configuration +public class FooConfiguration { + @Bean + public CircuitBreakerNameResolver circuitBreakerNameResolver() { + return (String feignClientName, Target target, Method method) -> feignClientName + "_" + method.getName(); + } +} +``` + +To enable Spring Cloud CircuitBreaker group set the `feign.circuitbreaker.group.enabled` property to `true` (by default `false`). + +### 1.6. Feign Spring Cloud CircuitBreaker Fallbacks + +Spring Cloud CircuitBreaker supports the notion of a fallback: a default code path that is executed when the circuit is open or there is an error. To enable fallbacks for a given `@FeignClient` set the `fallback` attribute to the class name that implements the fallback. You also need to declare your implementation as a Spring bean. + +``` +@FeignClient(name = "test", url = "http://localhost:${server.port}/", fallback = Fallback.class) + protected interface TestClient { + + @RequestMapping(method = RequestMethod.GET, value = "/hello") + Hello getHello(); + + @RequestMapping(method = RequestMethod.GET, value = "/hellonotfound") + String getException(); + + } + + @Component + static class Fallback implements TestClient { + + @Override + public Hello getHello() { + throw new NoFallbackAvailableException("Boom!", new RuntimeException()); + } + + @Override + public String getException() { + return "Fixed response"; + } + + } +``` + +If one needs access to the cause that made the fallback trigger, one can use the `fallbackFactory` attribute inside `@FeignClient`. + +``` +@FeignClient(name = "testClientWithFactory", url = "http://localhost:${server.port}/", + fallbackFactory = TestFallbackFactory.class) + protected interface TestClientWithFactory { + + @RequestMapping(method = RequestMethod.GET, value = "/hello") + Hello getHello(); + + @RequestMapping(method = RequestMethod.GET, value = "/hellonotfound") + String getException(); + + } + + @Component + static class TestFallbackFactory implements FallbackFactory { + + @Override + public FallbackWithFactory create(Throwable cause) { + return new FallbackWithFactory(); + } + + } + + static class FallbackWithFactory implements TestClientWithFactory { + + @Override + public Hello getHello() { + throw new NoFallbackAvailableException("Boom!", new RuntimeException()); + } + + @Override + public String getException() { + return "Fixed response"; + } + + } +``` + +### 1.7. Feign and `@Primary` + +When using Feign with Spring Cloud CircuitBreaker fallbacks, there are multiple beans in the `ApplicationContext` of the same type. This will cause `@Autowired` to not work because there isn’t exactly one bean, or one marked as primary. To work around this, Spring Cloud OpenFeign marks all Feign instances as `@Primary`, so Spring Framework will know which bean to inject. In some cases, this may not be desirable. To turn off this behavior set the `primary` attribute of `@FeignClient` to false. + +``` +@FeignClient(name = "hello", primary = false) +public interface HelloClient { + // methods here +} +``` + +### 1.8. Feign Inheritance Support + +Feign supports boilerplate apis via single-inheritance interfaces. +This allows grouping common operations into convenient base interfaces. + +UserService.java + +``` +public interface UserService { + + @RequestMapping(method = RequestMethod.GET, value ="/users/{id}") + User getUser(@PathVariable("id") long id); +} +``` + +UserResource.java + +``` +@RestController +public class UserResource implements UserService { + +} +``` + +UserClient.java + +``` +package project.user; + +@FeignClient("users") +public interface UserClient extends UserService { + +} +``` + +| |`@FeignClient` interfaces should not be shared between server and client and annotating `@FeignClient` interfaces with `@RequestMapping` on class level is no longer supported.| +|---|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +### 1.9. Feign request/response compression + +You may consider enabling the request or response GZIP compression for your +Feign requests. You can do this by enabling one of the properties: + +``` +feign.compression.request.enabled=true +feign.compression.response.enabled=true +``` + +Feign request compression gives you settings similar to what you may set for your web server: + +``` +feign.compression.request.enabled=true +feign.compression.request.mime-types=text/xml,application/xml,application/json +feign.compression.request.min-request-size=2048 +``` + +These properties allow you to be selective about the compressed media types and minimum request threshold length. + +### 1.10. Feign logging + +A logger is created for each Feign client created. By default the name of the logger is the full class name of the interface used to create the Feign client. Feign logging only responds to the `DEBUG` level. + +application.yml + +``` +logging.level.project.user.UserClient: DEBUG +``` + +The `Logger.Level` object that you may configure per client, tells Feign how much to log. Choices are: + +* `NONE`, No logging (**DEFAULT**). + +* `BASIC`, Log only the request method and URL and the response status code and execution time. + +* `HEADERS`, Log the basic information along with request and response headers. + +* `FULL`, Log the headers, body, and metadata for both requests and responses. + +For example, the following would set the `Logger.Level` to `FULL`: + +``` +@Configuration +public class FooConfiguration { + @Bean + Logger.Level feignLoggerLevel() { + return Logger.Level.FULL; + } +} +``` + +### 1.11. Feign Capability support + +The Feign capabilities expose core Feign components so that these components can be modified. For example, the capabilities can take the `Client`, *decorate* it, and give the decorated instance back to Feign. +The support for metrics libraries is a good real-life example for this. See [Feign metrics](#feign-metrics). + +Creating one or more `Capability` beans and placing them in a `@FeignClient` configuration lets you register them and modify the behavior of the involved client. + +``` +@Configuration +public class FooConfiguration { + @Bean + Capability customCapability() { + return new CustomCapability(); + } +} +``` + +### 1.12. Feign metrics + +If all of the following conditions are true, a `MicrometerCapability` bean is created and registered so that your Feign client publishes metrics to Micrometer: + +* `feign-micrometer` is on the classpath + +* A `MeterRegistry` bean is available + +* feign metrics properties are set to `true` (by default) + + * `feign.metrics.enabled=true` (for all clients) + + * `feign.client.config.feignName.metrics.enabled=true` (for a single client) + +| |If your application already uses Micrometer, enabling metrics is as simple as putting `feign-micrometer` onto your classpath.| +|---|-----------------------------------------------------------------------------------------------------------------------------| + +You can also disable the feature by either: + +* excluding `feign-micrometer` from your classpath + +* setting one of the feign metrics properties to `false` + + * `feign.metrics.enabled=false` + + * `feign.client.config.feignName.metrics.enabled=false` + +| |`feign.metrics.enabled=false` disables metrics support for **all** Feign clients regardless of the value of the client-level flags: `feign.client.config.feignName.metrics.enabled`.
If you want to enable or disable merics per client, don’t set `feign.metrics.enabled` and use `feign.client.config.feignName.metrics.enabled`.| +|---|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +You can also customize the `MicrometerCapability` by registering your own bean: + +``` +@Configuration +public class FooConfiguration { + @Bean + public MicrometerCapability micrometerCapability(MeterRegistry meterRegistry) { + return new MicrometerCapability(meterRegistry); + } +} +``` + +### 1.13. Feign Caching + +If `@EnableCaching` annotation is used, a `CachingCapability` bean is created and registered so that your Feign client recognizes `@Cache*` annotations on its interface: + +``` +public interface DemoClient { + + @GetMapping("/demo/{filterParam}") + @Cacheable(cacheNames = "demo-cache", key = "#keyParam") + String demoEndpoint(String keyParam, @PathVariable String filterParam); +} +``` + +You can also disable the feature via property `feign.cache.enabled=false`. + +### 1.14. Feign @QueryMap support + +The OpenFeign `@QueryMap` annotation provides support for POJOs to be used as +GET parameter maps. Unfortunately, the default OpenFeign QueryMap annotation is +incompatible with Spring because it lacks a `value` property. + +Spring Cloud OpenFeign provides an equivalent `@SpringQueryMap` annotation, which +is used to annotate a POJO or Map parameter as a query parameter map. + +For example, the `Params` class defines parameters `param1` and `param2`: + +``` +// Params.java +public class Params { + private String param1; + private String param2; + + // [Getters and setters omitted for brevity] +} +``` + +The following feign client uses the `Params` class by using the `@SpringQueryMap` annotation: + +``` +@FeignClient("demo") +public interface DemoTemplate { + + @GetMapping(path = "/demo") + String demoEndpoint(@SpringQueryMap Params params); +} +``` + +If you need more control over the generated query parameter map, you can implement a custom `QueryMapEncoder` bean. + +### 1.15. HATEOAS support + +Spring provides some APIs to create REST representations that follow the [HATEOAS](https://en.wikipedia.org/wiki/HATEOAS) principle, [Spring Hateoas](https://spring.io/projects/spring-hateoas) and [Spring Data REST](https://spring.io/projects/spring-data-rest). + +If your project use the `org.springframework.boot:spring-boot-starter-hateoas` starter +or the `org.springframework.boot:spring-boot-starter-data-rest` starter, Feign HATEOAS support is enabled by default. + +When HATEOAS support is enabled, Feign clients are allowed to serialize +and deserialize HATEOAS representation models: [EntityModel](https://docs.spring.io/spring-hateoas/docs/1.0.0.M1/apidocs/org/springframework/hateoas/EntityModel.html), [CollectionModel](https://docs.spring.io/spring-hateoas/docs/1.0.0.M1/apidocs/org/springframework/hateoas/CollectionModel.html) and [PagedModel](https://docs.spring.io/spring-hateoas/docs/1.0.0.M1/apidocs/org/springframework/hateoas/PagedModel.html). + +``` +@FeignClient("demo") +public interface DemoTemplate { + + @GetMapping(path = "/stores") + CollectionModel getStores(); +} +``` + +### 1.16. Spring @MatrixVariable Support + +Spring Cloud OpenFeign provides support for the Spring `@MatrixVariable` annotation. + +If a map is passed as the method argument, the `@MatrixVariable` path segment is created by joining key-value pairs from the map with a `=`. + +If a different object is passed, either the `name` provided in the `@MatrixVariable` annotation (if defined) or the annotated variable name is +joined with the provided method argument using `=`. + +IMPORTANT + +Even though, on the server side, Spring does not require the users to name the path segment placeholder same as the matrix variable name, since it would be too ambiguous on the client side, Spring Cloud OpenFeign requires that you add a path segment placeholder with a name matching either the `name` provided in the `@MatrixVariable` annotation (if defined) or the annotated variable name. + +For example: + +``` +@GetMapping("/objects/links/{matrixVars}") +Map> getObjects(@MatrixVariable Map> matrixVars); +``` + +Note that both variable name and the path segment placeholder are called `matrixVars`. + +``` +@FeignClient("demo") +public interface DemoTemplate { + + @GetMapping(path = "/stores") + CollectionModel getStores(); +} +``` + +### 1.17. Feign `CollectionFormat` support + +We support `feign.CollectionFormat` by providing the `@CollectionFormat` annotation. +You can annotate a Feign client method (or the whole class to affect all methods) with it by passing the desired `feign.CollectionFormat` as annotation value. + +In the following example, the `CSV` format is used instead of the default `EXPLODED` to process the method. + +``` +@FeignClient(name = "demo") +protected interface PageableFeignClient { + + @CollectionFormat(feign.CollectionFormat.CSV) + @GetMapping(path = "/page") + ResponseEntity performRequest(Pageable page); + +} +``` + +| |Set the `CSV` format while sending `Pageable` as a query parameter in order for it to be encoded correctly.| +|---|-----------------------------------------------------------------------------------------------------------| + +### 1.18. Reactive Support + +As the [OpenFeign project](https://github.com/OpenFeign/feign) does not currently support reactive clients, such as [Spring WebClient](https://docs.spring.io/spring/docs/current/javadoc-api/org/springframework/web/reactive/function/client/WebClient.html), neither does Spring Cloud OpenFeign.We will add support for it here as soon as it becomes available in the core project. + +Until that is done, we recommend using [feign-reactive](https://github.com/Playtika/feign-reactive) for Spring WebClient support. + +#### 1.18.1. Early Initialization Errors + +Depending on how you are using your Feign clients you may see initialization errors when starting your application. +To work around this problem you can use an `ObjectProvider` when autowiring your client. + +``` +@Autowired +ObjectProvider testFeignClient; +``` + +### 1.19. Spring Data Support + +You may consider enabling Jackson Modules for the support `org.springframework.data.domain.Page` and `org.springframework.data.domain.Sort` decoding. + +``` +feign.autoconfiguration.jackson.enabled=true +``` + +### 1.20. Spring `@RefreshScope` Support + +If Feign client refresh is enabled, each feign client is created with `feign.Request.Options` as a refresh-scoped bean. This means properties such as `connectTimeout` and `readTimeout` can be refreshed against any Feign client instance through `POST /actuator/refresh`. + +By default, refresh behavior in Feign clients is disabled. Use the following property to enable refresh behavior: + +``` +feign.client.refresh-enabled=true +``` + +| |DO NOT annotate the `@FeignClient` interface with the `@RefreshScope` annotation.| +|---|---------------------------------------------------------------------------------| + +### 1.21. OAuth2 Support + +OAuth2 support can be enabled by setting following flag: + +``` +feign.oauth2.enabled=true +``` + +When the flag is set to true, and the oauth2 client context resource details are present, a bean of class `OAuth2FeignRequestInterceptor` is created. Before each request, the interceptor resolves the required access token and includes it as a header. +Sometimes, when load balancing is enabled for Feign clients, you may want to use load balancing for fetching access tokens, too. To do so, you should ensure that the load balancer is on the classpath (spring-cloud-starter-loadbalancer) and explicitly enable load balancing for OAuth2FeignRequestInterceptor by setting the following flag: + +``` +feign.oauth2.load-balanced=true +``` + +## 2. Configuration properties + +To see the list of all Spring Cloud OpenFeign related configuration properties please check [the Appendix page](appendix.html). + diff --git a/docs/en/spring-cloud/spring-cloud-sleuth.md b/docs/en/spring-cloud/spring-cloud-sleuth.md new file mode 100644 index 0000000000000000000000000000000000000000..df9f313dc70c5e4c875fca8a525abb899d0232ce --- /dev/null +++ b/docs/en/spring-cloud/spring-cloud-sleuth.md @@ -0,0 +1,16 @@ +# Spring Cloud Sleuth Reference Documentation + +Adrian Cole, Spencer Gibb, Marcin Grzejszczak, Dave Syer, Jay Bryant + +The reference documentation consists of the following sections: + +| [Legal](legal.html#legal) | Legal information. | +|--------------------------------------------------------------------------------|--------------------------------------------------------------------------------------------| +|[Documentation Overview](documentation-overview.html#sleuth-documentation-about)| About the Documentation, Getting Help, First Steps, and more. | +| [Getting Started](getting-started.html#getting-started) |Introducing Spring Cloud Sleuth, Developing Your First Spring Cloud Sleuth-based Application| +| [Using Spring Cloud Sleuth](using.html#using) | Spring Cloud Sleuth usage examples and workflows. | +| [Spring Cloud Sleuth Features](project-features.html#features) | Span creation, context propagation, and more. | +| [“How-to” Guides](howto.html#howto) | Add sampling, propagate remote tags, and more. | +| [Spring Cloud Sleuth Integrations](integrations.html#sleuth-integration) | Instrumentation configuration, context propagation, and more. | +| [Appendices](appendix.html#appendix) | Span definitions and configuration properties. | + diff --git a/docs/en/spring-cloud/spring-cloud-stream.md b/docs/en/spring-cloud/spring-cloud-stream.md new file mode 100644 index 0000000000000000000000000000000000000000..39c757931780b5ab1a0478ede68953dceb3a3dd2 --- /dev/null +++ b/docs/en/spring-cloud/spring-cloud-stream.md @@ -0,0 +1,43 @@ +# Spring Cloud Stream Reference Documentation + +Sabby Anandan +Marius Bogoevici +Eric Bottard +Mark Fisher +Ilayaperumal Gopinathan +Mark Heckler +Gunnar Hillert +Mark Pollack +Patrick Peralta +Glenn Renfro +Thomas Risberg +Dave Syer +David Turanski +Janne Valkealahti +Benjamin Klein +Vinicius Carvalho +Gary Russell +Oleg Zhurakousky +Jay Bryant +Soby Chacko +Domenico Sibilio + +**3.2.2** + +The reference documentation consists of the following sections: + +| [Overview](spring-cloud-stream.html#spring-cloud-stream-reference) | History, Quick Start, Concepts, Architecture Overview, Binder Abstraction, and Core Features | +|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------|-----------------------------------------------------------------------------------------------------------------------------------------| +| [Rabbit MQ Binder](https://docs.spring.io/spring-cloud-stream-binder-rabbit/docs/3.2.2/reference/html/spring-cloud-stream-binder-rabbit.html) | Spring Cloud Stream binder reference for Rabbit MQ | +| [Apache Kafka Binder](https://docs.spring.io/spring-cloud-stream-binder-kafka/docs/3.2.2/reference/html/spring-cloud-stream-binder-kafka.html#_apache_kafka_binder) | Spring Cloud Stream binder reference for Apache Kafka | +|[Apache Kafka Streams Binder](https://docs.spring.io/spring-cloud-stream-binder-kafka/docs/3.2.2/reference/html/spring-cloud-stream-binder-kafka.html#_kafka_streams_binder)| Spring Cloud Stream binder reference for Apache Kafka Streams | +| [Additional Binders](binders.html#binders) |A collection of Partner maintained binder implementations for Spring Cloud Stream (e.g., Azure Event Hubs, Google PubSub, Solace PubSub+)| +| [Spring Cloud Stream Samples](https://github.com/spring-cloud/spring-cloud-stream-samples/) | A curated collection of repeatable Spring Cloud Stream samples to walk through the features | + +Relevant Links: + +| [Spring Cloud Data Flow](https://cloud.spring.io/spring-cloud-dataflow/) | Spring Cloud Data Flow | +|--------------------------------------------------------------------------------|------------------------------------------------------| +|[Enterprise Integration Patterns](http://www.enterpriseintegrationpatterns.com/)|Patterns and Best Practices for Enterprise Integration| +| [Spring Integration](https://spring.io/projects/spring-integration) | Spring Integration framework | + diff --git a/docs/en/spring-cloud/spring-cloud-task.md b/docs/en/spring-cloud/spring-cloud-task.md new file mode 100644 index 0000000000000000000000000000000000000000..8ca189bca8337f0a0107b65808e707ec9c23db63 --- /dev/null +++ b/docs/en/spring-cloud/spring-cloud-task.md @@ -0,0 +1,1318 @@ +# Spring Cloud Task Reference Guide + +# Preface + +This section provides a brief overview of the Spring Cloud Task reference documentation. +Think of it as a map for the rest of the document. You can read this reference guide in a +linear fashion or you can skip sections if something does not interest you. + +## 1. About the documentation + +The Spring Cloud Task reference guide is available in [html](https://docs.spring.io/spring-cloud-task/docs/current/reference)and [pdf](https://docs.spring.io/spring-cloud-task/docs/current/reference/index.pdf),[epub](https://docs.spring.io/spring-cloud-task/docs/current/reference/index.epub) . The +latest copy is available at [docs.spring.io/spring-cloud-task/docs/current-SNAPSHOT/reference/html/](https://docs.spring.io/spring-cloud-task/docs/current-SNAPSHOT/reference/html/). + +Copies of this document may be made for your own use and for distribution to others, +provided that you do not charge any fee for such copies and further provided that each +copy contains this Copyright Notice, whether distributed in print or electronically. + +## 2. Getting help + +Having trouble with Spring Cloud Task? We would like to help! + +* Ask a question. We monitor [stackoverflow.com](https://stackoverflow.com) for questions + tagged with [`spring-cloud-task`](https://stackoverflow.com/tags/spring-cloud-task). + +* Report bugs with Spring Cloud Task at[github.com/spring-cloud/spring-cloud-task/issues](https://github.com/spring-cloud/spring-cloud-task/issues). + +| |All of Spring Cloud Task is open source, including the documentation. If you find
a problem with the docs or if you just want to improve them, please [get
involved](https://github.com/spring-cloud/spring-cloud-task/tree/master).| +|---|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +## 3. First Steps + +If you are just getting started with Spring Cloud Task or with 'Spring' in general, we +suggesting reading the [Getting started](#getting-started) chapter. + +To get started from scratch, read the following sections: + +* [Introducing Spring Cloud Task](#getting-started-introducing-spring-cloud-task) + +* [System Requirements](#getting-started-system-requirements) + +To follow the tutorial, read[Developing Your First Spring Cloud Task Application](#getting-started-developing-first-task) +To run your example, read[Running the Example](#getting-started-running-the-example) + +# Getting started + +If you are just getting started with Spring Cloud Task, you should read this section. +Here, we answer the basic “what?”, “how?”, and “why?” questions. We start with a +gentle introduction to Spring Cloud Task. We then build a Spring Cloud Task application, +discussing some core principles as we go. + +## 4. Introducing Spring Cloud Task + +Spring Cloud Task makes it easy to create short-lived microservices. It provides +capabilities that let short lived JVM processes be executed on demand in a production +environment. + +## 5. System Requirements + +You need to have Java installed (Java 8 or better). To build, you need to have Maven +installed as well. + +### 5.1. Database Requirements + +Spring Cloud Task uses a relational database to store the results of an executed task. +While you can begin developing a task without a database (the status of the task is logged +as part of the task repository’s updates), for production environments, you want to +use a supported database. Spring Cloud Task currently supports the following databases: + +* DB2 + +* H2 + +* HSQLDB + +* MySql + +* Oracle + +* Postgres + +* SqlServer + +## 6. Developing Your First Spring Cloud Task Application + +A good place to start is with a simple “Hello, World!” application, so we create the +Spring Cloud Task equivalent to highlight the features of the framework. Most IDEs have +good support for Apache Maven, so we use it as the build tool for this project. + +| |The spring.io web site contains many [“`Getting Started`”
guides](https://spring.io/guides) that use Spring Boot. If you need to solve a specific problem, check there first.
You can shortcut the following steps by going to the[Spring Initializr](https://start.spring.io/) and creating a new project. Doing so
automatically generates a new project structure so that you can start coding right away.
We recommend experimenting with the Spring Initializr to become familiar with it.| +|---|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +### 6.1. Creating the Spring Task Project using Spring Initializr + +Now we can create and test an application that prints `Hello, World!` to the console. + +To do so: + +1. Visit the [Spring Initialzr](https://start.spring.io/) site. + + 1. Create a new Maven project with a **Group** name of `io.spring.demo` and an **Artifact** name of `helloworld`. + + 2. In the Dependencies text box, type `task` and then select the `Cloud Task` dependency. + + 3. In the Dependencies text box, type `jdbc` and then select the `JDBC` dependency. + + 4. In the Dependencies text box, type `h2` and then select the `H2`. (or your favorite database) + + 5. Click the **Generate Project** button + +2. Unzip the helloworld.zip file and import the project into your favorite IDE. + +### 6.2. Writing the Code + +To finish our application, we need to update the generated `HelloworldApplication` with the following contents so that it launches a Task. + +``` +package io.spring.demo.helloworld; + +import org.springframework.boot.CommandLineRunner; +import org.springframework.boot.SpringApplication; +import org.springframework.boot.autoconfigure.SpringBootApplication; +import org.springframework.context.annotation.Bean; + +@SpringBootApplication +@EnableTask +public class HelloworldApplication { + + @Bean + public CommandLineRunner commandLineRunner() { + return new HelloWorldCommandLineRunner(); + } + + public static void main(String[] args) { + SpringApplication.run(HelloworldApplication.class, args); + } + + public static class HelloWorldCommandLineRunner implements CommandLineRunner { + + @Override + public void run(String... strings) throws Exception { + System.out.println("Hello, World!"); + } + } +} +``` + +While it may seem small, quite a bit is going on. For more about Spring +Boot specifics, see the[Spring Boot reference documentation](https://docs.spring.io/spring-boot/docs/current/reference/html/). + +Now we can open the `application.properties` file in `src/main/resources`. +We need to configure two properties in `application.properties`: + +* `application.name`: To set the application name (which is translated to the task name) + +* `logging.level`: To set the logging for Spring Cloud Task to `DEBUG` in order to + get a view of what is going on. + +The following example shows how to do both: + +``` +logging.level.org.springframework.cloud.task=DEBUG +spring.application.name=helloWorld +``` + +#### 6.2.1. Task Auto Configuration + +When including Spring Cloud Task Starter dependency, Task auto configures all beans to bootstrap it’s functionality. +Part of this configuration registers the `TaskRepository` and the infrastructure for its use. + +In our demo, the `TaskRepository` uses an embedded H2 database to record the results +of a task. This H2 embedded database is not a practical solution for a production environment, since +the H2 DB goes away once the task ends. However, for a quick getting-started +experience, we can use this in our example as well as echoing to the logs what is being updated +in that repository. In the [Configuration](#features-configuration) section (later in this +documentation), we cover how to customize the configuration of the pieces provided by +Spring Cloud Task. + +When our sample application runs, Spring Boot launches our `HelloWorldCommandLineRunner`and outputs our “Hello, World!” message to standard out. The `TaskLifecycleListener`records the start of the task and the end of the task in the repository. + +#### 6.2.2. The main method + +The main method serves as the entry point to any java application. Our main method +delegates to Spring Boot’s [SpringApplication](https://docs.spring.io/spring-boot/docs/current/reference/html/boot-features-spring-application.html) class. + +#### 6.2.3. The CommandLineRunner + +Spring includes many ways to bootstrap an application’s logic. Spring Boot provides +a convenient method of doing so in an organized manner through its `*Runner` interfaces +(`CommandLineRunner` or `ApplicationRunner`). A well behaved task can bootstrap any +logic by using one of these two runners. + +The lifecycle of a task is considered from before the `*Runner#run` methods are executed +to once they are all complete. Spring Boot lets an application use multiple`*Runner` implementations, as does Spring Cloud Task. + +| |Any processing bootstrapped from mechanisms other than a `CommandLineRunner` or`ApplicationRunner` (by using `InitializingBean#afterPropertiesSet` for example) is not
recorded by Spring Cloud Task.| +|---|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +### 6.3. Running the Example + +At this point, our application should work. Since this application is Spring Boot-based, +we can run it from the command line by using `$ mvn spring-boot:run` from the root +of our application, as shown (with its output) in the following example: + +``` +$ mvn clean spring-boot:run +....... . . . +....... . . . (Maven log output here) +....... . . . + + . ____ _ __ _ _ + /\\ / ___'_ __ _ _(_)_ __ __ _ \ \ \ \ +( ( )\___ | '_ | '_| | '_ \/ _` | \ \ \ \ + \\/ ___)| |_)| | | | | || (_| | ) ) ) ) + ' |____| .__|_| |_|_| |_\__, | / / / / + =========|_|==============|___/=/_/_/_/ + :: Spring Boot :: (v2.0.3.RELEASE) + +2018-07-23 17:44:34.426 INFO 1978 --- [ main] i.s.d.helloworld.HelloworldApplication : Starting HelloworldApplication on Glenns-MBP-2.attlocal.net with PID 1978 (/Users/glennrenfro/project/helloworld/target/classes started by glennrenfro in /Users/glennrenfro/project/helloworld) +2018-07-23 17:44:34.430 INFO 1978 --- [ main] i.s.d.helloworld.HelloworldApplication : No active profile set, falling back to default profiles: default +2018-07-23 17:44:34.472 INFO 1978 --- [ main] s.c.a.AnnotationConfigApplicationContext : Refreshing org.spring[email protected]1d24f32d: startup date [Mon Jul 23 17:44:34 EDT 2018]; root of context hierarchy +2018-07-23 17:44:35.280 INFO 1978 --- [ main] com.zaxxer.hikari.HikariDataSource : HikariPool-1 - Starting... +2018-07-23 17:44:35.410 INFO 1978 --- [ main] com.zaxxer.hikari.HikariDataSource : HikariPool-1 - Start completed. +2018-07-23 17:44:35.419 DEBUG 1978 --- [ main] o.s.c.t.c.SimpleTaskConfiguration : Using org.springframework.cloud.task.configuration.DefaultTaskConfigurer TaskConfigurer +2018-07-23 17:44:35.420 DEBUG 1978 --- [ main] o.s.c.t.c.DefaultTaskConfigurer : No EntityManager was found, using DataSourceTransactionManager +2018-07-23 17:44:35.522 DEBUG 1978 --- [ main] o.s.c.t.r.s.TaskRepositoryInitializer : Initializing task schema for h2 database +2018-07-23 17:44:35.525 INFO 1978 --- [ main] o.s.jdbc.datasource.init.ScriptUtils : Executing SQL script from class path resource [org/springframework/cloud/task/schema-h2.sql] +2018-07-23 17:44:35.558 INFO 1978 --- [ main] o.s.jdbc.datasource.init.ScriptUtils : Executed SQL script from class path resource [org/springframework/cloud/task/schema-h2.sql] in 33 ms. +2018-07-23 17:44:35.728 INFO 1978 --- [ main] o.s.j.e.a.AnnotationMBeanExporter : Registering beans for JMX exposure on startup +2018-07-23 17:44:35.730 INFO 1978 --- [ main] o.s.j.e.a.AnnotationMBeanExporter : Bean with name 'dataSource' has been autodetected for JMX exposure +2018-07-23 17:44:35.733 INFO 1978 --- [ main] o.s.j.e.a.AnnotationMBeanExporter : Located MBean 'dataSource': registering with JMX server as MBean [com.zaxxer.hikari:name=dataSource,type=HikariDataSource] +2018-07-23 17:44:35.738 INFO 1978 --- [ main] o.s.c.support.DefaultLifecycleProcessor : Starting beans in phase 0 +2018-07-23 17:44:35.762 DEBUG 1978 --- [ main] o.s.c.t.r.support.SimpleTaskRepository : Creating: TaskExecution{executionId=0, parentExecutionId=null, exitCode=null, taskName='application', startTime=Mon Jul 23 17:44:35 EDT 2018, endTime=null, exitMessage='null', externalExecutionId='null', errorMessage='null', arguments=[]} +2018-07-23 17:44:35.772 INFO 1978 --- [ main] i.s.d.helloworld.HelloworldApplication : Started HelloworldApplication in 1.625 seconds (JVM running for 4.764) +Hello, World! +2018-07-23 17:44:35.782 DEBUG 1978 --- [ main] o.s.c.t.r.support.SimpleTaskRepository : Updating: TaskExecution with executionId=1 with the following {exitCode=0, endTime=Mon Jul 23 17:44:35 EDT 2018, exitMessage='null', errorMessage='null'} +``` + +The preceding output has three lines that of interest to us here: + +* `SimpleTaskRepository` logged the creation of the entry in the `TaskRepository`. + +* The execution of our `CommandLineRunner`, demonstrated by the “Hello, World!” output. + +* `SimpleTaskRepository` logs the completion of the task in the `TaskRepository`. + +| |A simple task application can be found in the samples module of the Spring Cloud
Task Project[here](https://github.com/spring-cloud/spring-cloud-task/tree/master/spring-cloud-task-samples/timestamp).| +|---|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +# Features + +This section goes into more detail about Spring Cloud Task, including how to use it, how +to configure it, and the appropriate extension points. + +## 7. The lifecycle of a Spring Cloud Task + +In most cases, the modern cloud environment is designed around the execution of processes +that are not expected to end. If they do end, they are typically restarted. While most +platforms do have some way to run a process that is not restarted when it ends, the +results of that run are typically not maintained in a consumable way. Spring Cloud +Task offers the ability to execute short-lived processes in an environment and record the +results. Doing so allows for a microservices architecture around short-lived processes as +well as longer running services through the integration of tasks by messages. + +While this functionality is useful in a cloud environment, the same issues can arise in a +traditional deployment model as well. When running Spring Boot applications with a +scheduler such as cron, it can be useful to be able to monitor the results of the +application after its completion. + +Spring Cloud Task takes the approach that a Spring Boot application can have a start and +an end and still be successful. Batch applications are one example of how processes that +are expected to end (and that are often short-lived) can be helpful. + +Spring Cloud Task records the lifecycle events of a given task. Most long-running +processes, typified by most web applications, do not save their lifecycle events. The +tasks at the heart of Spring Cloud Task do. + +The lifecycle consists of a single task execution. This is a physical execution of a +Spring Boot application configured to be a task (that is, it has the Sprint Cloud Task dependencies). + +At the beginning of a task, before any `CommandLineRunner` or `ApplicationRunner`implementations have been run, an entry in the `TaskRepository` that records the start +event is created. This event is triggered through `SmartLifecycle#start` being triggered +by the Spring Framework. This indicates to the system that all beans are ready for use and +comes before running any of the `CommandLineRunner` or `ApplicationRunner` implementations +provided by Spring Boot. + +| |The recording of a task only occurs upon the successful bootstrapping of an`ApplicationContext`. If the context fails to bootstrap at all, the task’s run is not
recorded.| +|---|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +Upon completion of all of the `*Runner#run` calls from Spring Boot or the failure of an`ApplicationContext` (indicated by an `ApplicationFailedEvent`), the task execution is +updated in the repository with the results. + +| |If the application requires the `ApplicationContext` to be closed at the
completion of a task (all `*Runner#run` methods have been called and the task
repository has been updated), set the property `spring.cloud.task.closecontextEnabled`to true.| +|---|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +### 7.1. The TaskExecution + +The information stored in the `TaskRepository` is modeled in the `TaskExecution` class and +consists of the following information: + +| Field | Description | +|--------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +|`executionid` | The unique ID for the task’s run. | +| `exitCode` |The exit code generated from an `ExitCodeExceptionMapper` implementation. If there is no
exit code generated but an `ApplicationFailedEvent` is thrown, 1 is set. Otherwise, it is
assumed to be 0.| +| `taskName` | The name for the task, as determined by the configured `TaskNameResolver`. | +| `startTime` | The time the task was started, as indicated by the `SmartLifecycle#start` call. | +| `endTime` | The time the task was completed, as indicated by the `ApplicationReadyEvent`. | +|`exitMessage` | Any information available at the time of exit. This can programmatically be set by a`TaskExecutionListener`. | +|`errorMessage`| If an exception is the cause of the end of the task (as indicated by an`ApplicationFailedEvent`), the stack trace for that exception is stored here. | +| `arguments` | A `List` of the string command line arguments as they were passed into the executable
boot application. | + +### 7.2. Mapping Exit Codes + +When a task completes, it tries to return an exit code to the OS. If we take a look +at our [original example](#getting-started-developing-first-task), we can see that we are +not controlling that aspect of our application. So, if an exception is thrown, the JVM +returns a code that may or may not be of any use to you in debugging. + +Consequently, Spring Boot provides an interface, `ExitCodeExceptionMapper`, that lets you +map uncaught exceptions to exit codes. Doing so lets you indicate, at the level of exit +codes, what went wrong. Also, by mapping exit codes in this manner, Spring Cloud Task +records the returned exit code. + +If the task terminates with a SIG-INT or a SIG-TERM, the exit code is zero unless +otherwise specified within the code. + +| |While the task is running, the exit code is stored as a null in the repository.
Once the task completes, the appropriate exit code is stored based on the guidelines described
earlier in this section.| +|---|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +## 8. Configuration + +Spring Cloud Task provides a ready-to-use configuration, as defined in the`DefaultTaskConfigurer` and `SimpleTaskConfiguration` classes. This section walks through +the defaults and how to customize Spring Cloud Task for your needs. + +### 8.1. DataSource + +Spring Cloud Task uses a datasource for storing the results of task executions. By +default, we provide an in-memory instance of H2 to provide a simple method of +bootstrapping development. However, in a production environment, you probably want to +configure your own `DataSource`. + +If your application uses only a single `DataSource` and that serves as both your business +schema and the task repository, all you need to do is provide any `DataSource` (the +easiest way to do so is through Spring Boot’s configuration conventions). This`DataSource` is automatically used by Spring Cloud Task for the repository. + +If your application uses more than one `DataSource`, you need to configure the task +repository with the appropriate `DataSource`. This customization can be done through an +implementation of `TaskConfigurer`. + +### 8.2. Table Prefix + +One modifiable property of `TaskRepository` is the table prefix for the task tables. By +default, they are all prefaced with `TASK_`. `TASK_EXECUTION` and `TASK_EXECUTION_PARAMS`are two examples. However, there are potential reasons to modify this prefix. If the +schema name needs to be prepended to the table names or if more than one set of task +tables is needed within the same schema, you must change the table prefix. You can do so +by setting the `spring.cloud.task.tablePrefix` to the prefix you need, as follows: + +`spring.cloud.task.tablePrefix=yourPrefix` + +By using the `spring.cloud.task.tablePrefix`, a user assumes the responsibility to +create the task tables that meet both the criteria for the task table schema but +with modifications that are required for a user’s business needs. +You can utilize the Spring Cloud Task Schema DDL as a guide when creating your own Task DDL as seen[here](https://github.com/spring-cloud/spring-cloud-task/tree/master/spring-cloud-task-core/src/main/resources/org/springframework/cloud/task). + +### 8.3. Enable/Disable table initialization + +In cases where you are creating the task tables and do not wish for Spring Cloud Task to +create them at task startup, set the `spring.cloud.task.initialize-enabled` property to`false`, as follows: + +`spring.cloud.task.initialize-enabled=false` + +It defaults to `true`. + +| |The property `spring.cloud.task.initialize.enable` has been deprecated.| +|---|-----------------------------------------------------------------------| + +### 8.4. Externally Generated Task ID + +In some cases, you may want to allow for the time difference between when a task is +requested and when the infrastructure actually launches it. Spring Cloud Task lets you +create a `TaskExecution` when the task is requested. Then pass the execution ID of the +generated `TaskExecution` to the task so that it can update the `TaskExecution` through +the task’s lifecycle. + +A `TaskExecution` can be created by calling the `createTaskExecution` method on an +implementation of the `TaskRepository` that references the datastore that holds +the `TaskExecution` objects. + +In order to configure your Task to use a generated `TaskExecutionId`, add the +following property: + +`spring.cloud.task.executionid=yourtaskId` + +### 8.5. External Task Id + +Spring Cloud Task lets you store an external task ID for each`TaskExecution`. An example of this would be a task ID provided by +Cloud Foundry when a task is launched on the platform. +In order to configure your Task to use a generated `TaskExecutionId`, add the +following property: + +`spring.cloud.task.external-execution-id=` + +### 8.6. Parent Task Id + +Spring Cloud Task lets you store a parent task ID for each `TaskExecution`. An example of +this would be a task that executes another task or tasks and you want to record which task +launched each of the child tasks. In order to configure your Task to set a parent`TaskExecutionId` add the following property on the child task: + +`spring.cloud.task.parent-execution-id=` + +### 8.7. TaskConfigurer + +The `TaskConfigurer` is a strategy interface that lets you customize the way components of +Spring Cloud Task are configured. By default, we provide the `DefaultTaskConfigurer` that +provides logical defaults: `Map`-based in-memory components (useful for development if no`DataSource` is provided) and JDBC based components (useful if there is a `DataSource`available). + +The `TaskConfigurer` lets you configure three main components: + +| Component | Description | Default (provided by `DefaultTaskConfigurer`) | +|----------------------------|------------------------------------------------------------------------------------------------------------------|-------------------------------------------------------------------------------------------------------| +| `TaskRepository` | The implementation of the `TaskRepository` to be used. | `SimpleTaskRepository` | +| `TaskExplorer` |The implementation of the `TaskExplorer` (a component for read-only access to the task
repository) to be used.| `SimpleTaskExplorer` | +|`PlatformTransactionManager`| A transaction manager to be used when running updates for tasks. |`DataSourceTransactionManager` if a `DataSource` is used.`ResourcelessTransactionManager` if it is not.| + +You can customize any of the components described in the preceding table by creating a +custom implementation of the `TaskConfigurer` interface. Typically, extending the`DefaultTaskConfigurer` (which is provided if a `TaskConfigurer` is not found) and +overriding the required getter is sufficient. However, implementing your own from scratch +may be required. + +| |Users should not directly use getter methods from a `TaskConfigurer` directly
unless they are using it to supply implementations to be exposed as Spring Beans.| +|---|-------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +### 8.8. Task Name + +In most cases, the name of the task is the application name as configured in Spring +Boot. However, there are some cases where you may want to map the run of a task to a +different name. Spring Cloud Data Flow is an example of this (because you probably want +the task to be run with the name of the task definition). Because of this, we offer the +ability to customize how the task is named, through the `TaskNameResolver` interface. + +By default, Spring Cloud Task provides the `SimpleTaskNameResolver`, which uses the +following options (in order of precedence): + +1. A Spring Boot property (configured in any of the ways Spring Boot allows) called`spring.cloud.task.name`. + +2. The application name as resolved using Spring Boot’s rules (obtained through`ApplicationContext#getId`). + +### 8.9. Task Execution Listener + +`TaskExecutionListener` lets you register listeners for specific events that occur during +the task lifecycle. To do so, create a class that implements the`TaskExecutionListener` interface. The class that implements the `TaskExecutionListener`interface is notified of the following events: + +* `onTaskStartup`: Prior to storing the `TaskExecution` into the `TaskRepository`. + +* `onTaskEnd`: Prior to updating the `TaskExecution` entry in the `TaskRepository` and + marking the final state of the task. + +* `onTaskFailed`: Prior to the `onTaskEnd` method being invoked when an unhandled + exception is thrown by the task. + +Spring Cloud Task also lets you add `TaskExecution` Listeners to methods within a bean +by using the following method annotations: + +* `@BeforeTask`: Prior to the storing the `TaskExecution` into the `TaskRepository` + +* `@AfterTask`: Prior to the updating of the `TaskExecution` entry in the `TaskRepository`marking the final state of the task. + +* `@FailedTask`: Prior to the `@AfterTask` method being invoked when an unhandled + exception is thrown by the task. + +The following example shows the three annotations in use: + +``` + public class MyBean { + + @BeforeTask + public void methodA(TaskExecution taskExecution) { + } + + @AfterTask + public void methodB(TaskExecution taskExecution) { + } + + @FailedTask + public void methodC(TaskExecution taskExecution, Throwable throwable) { + } +} +``` + +| |Inserting an `ApplicationListener` earlier in the chain than `TaskLifecycleListener` exists may cause unexpected effects.| +|---|-------------------------------------------------------------------------------------------------------------------------| + +#### 8.9.1. Exceptions Thrown by Task Execution Listener + +If an exception is thrown by a `TaskExecutionListener` event handler, all listener +processing for that event handler stops. For example, if three `onTaskStartup` listeners +have started and the first `onTaskStartup` event handler throws an exception, the other +two `onTaskStartup` methods are not called. However, the other event handlers (`onTaskEnd`and `onTaskFailed`) for the `TaskExecutionListeners` are called. + +The exit code returned when a exception is thrown by a `TaskExecutionListener`event handler is the exit code that was reported by the[ExitCodeEvent](https://docs.spring.io/spring-boot/docs/current/api/org/springframework/boot/ExitCodeEvent.html). +If no `ExitCodeEvent` is emitted, the Exception thrown is evaluated to see +if it is of type[ExitCodeGenerator](https://docs.spring.io/spring-boot/docs/current/reference/htmlsingle/#boot-features-application-exit). +If so, it returns the exit code from the `ExitCodeGenerator`. Otherwise, `1`is returned. + +In the case that an exception is thrown in an `onTaskStartup` method, the exit code for the application will be `1`. +If an exception is thrown in either a `onTaskEnd` or `onTaskFailed`method, the exit code for the application will be the one established using the rules enumerated above. + +| |In the case of an exception being thrown in a `onTaskStartup`, `onTaskEnd`, or `onTaskFailed`you can not override the exit code for the application using `ExitCodeExceptionMapper`.| +|---|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +#### 8.9.2. Exit Messages + +You can set the exit message for a task programmatically by using a`TaskExecutionListener`. This is done by setting the `TaskExecution’s` `exitMessage`, +which then gets passed into the `TaskExecutionListener`. The following example shows +a method that is annotated with the `@AfterTask` `ExecutionListener` : + +``` +@AfterTask +public void afterMe(TaskExecution taskExecution) { + taskExecution.setExitMessage("AFTER EXIT MESSAGE"); +} +``` + +An `ExitMessage` can be set at any of the listener events (`onTaskStartup`,`onTaskFailed`, and `onTaskEnd`). The order of precedence for the three listeners follows: + +1. `onTaskEnd` + +2. `onTaskFailed` + +3. `onTaskStartup` + +For example, if you set an `exitMessage` for the `onTaskStartup` and `onTaskFailed`listeners and the task ends without failing, the `exitMessage` from the `onTaskStartup`is stored in the repository. Otherwise, if a failure occurs, the `exitMessage` from +the `onTaskFailed` is stored. Also if you set the `exitMessage` with an`onTaskEnd` listener, the `exitMessage` from the `onTaskEnd` supersedes +the exit messages from both the `onTaskStartup` and `onTaskFailed`. + +### 8.10. Restricting Spring Cloud Task Instances + +Spring Cloud Task lets you establish that only one task with a given task name can be run +at a time. To do so, you need to establish the [task name](#features-task-name) and set`spring.cloud.task.single-instance-enabled=true` for each task execution. While the first +task execution is running, any other time you try to run a task with the same[task name](#features-task-name) and`spring.cloud.task.single-instance-enabled=true`, the +task fails with the following error message: `Task with name "application" is already +running.` The default value for `spring.cloud.task.single-instance-enabled` is `false`. The +following example shows how to set `spring.cloud.task.single-instance-enabled` to `true`: + +`spring.cloud.task.single-instance-enabled=true or false` + +To use this feature, you must add the following Spring Integration dependencies to your +application: + +``` + + org.springframework.integration + spring-integration-core + + + org.springframework.integration + spring-integration-jdbc + +``` + +| |The exit code for the application will be 1 if the task fails because this feature
is enabled and another task is running with the same task name.| +|---|------------------------------------------------------------------------------------------------------------------------------------------------------| + +### 8.11. Disabling Spring Cloud Task Auto Configuration + +In cases where Spring Cloud Task should not be auto configured for an implementation, you can disable Task’s auto configuration. +This can be done either by adding the following annotation to your Task application: + +``` +@EnableAutoConfiguration(exclude={SimpleTaskAutoConfiguration.class}) +``` + +You may also disable Task auto configuration by setting the `spring.cloud.task.autoconfiguration.enabled` property to `false`. + +### 8.12. Closing the Context + +If the application requires the `ApplicationContext` to be closed at the +completion of a task (all `*Runner#run` methods have been called and the task +repository has been updated), set the property `spring.cloud.task.closecontextEnabled`to `true`. + +Another case to close the context is when the Task Execution completes however the application does not terminate. +In these cases the context is held open because a thread has been allocated +(for example: if you are using a TaskExecutor). In these cases +set the `spring.cloud.task.closecontextEnabled` property to `true` when launching your task. +This will close the application’s context once the task is complete. +Thus allowing the application to terminate. + +# Batch + +This section goes into more detail about Spring Cloud Task’s integration with Spring +Batch. Tracking the association between a job execution and the task in which it was +executed as well as remote partitioning through Spring Cloud Deployer are covered in +this section. + +## 9. Associating a Job Execution to the Task in which It Was Executed + +Spring Boot provides facilities for the execution of batch jobs within an über-jar. +Spring Boot’s support of this functionality lets a developer execute multiple batch jobs +within that execution. Spring Cloud Task provides the ability to associate the execution +of a job (a job execution) with a task’s execution so that one can be traced back to the +other. + +Spring Cloud Task achieves this functionality by using the `TaskBatchExecutionListener`. +By default, +this listener is auto configured in any context that has both a Spring Batch Job +configured (by having a bean of type `Job` defined in the context) and the`spring-cloud-task-batch` jar on the classpath. The listener is injected into all jobs +that meet those conditions. + +### 9.1. Overriding the TaskBatchExecutionListener + +To prevent the listener from being injected into any batch jobs within the current +context, you can disable the autoconfiguration by using standard Spring Boot mechanisms. + +To only have the listener injected into particular jobs within the context, override the`batchTaskExecutionListenerBeanPostProcessor` and provide a list of job bean IDs, as shown +in the following example: + +``` +public TaskBatchExecutionListenerBeanPostProcessor batchTaskExecutionListenerBeanPostProcessor() { + TaskBatchExecutionListenerBeanPostProcessor postProcessor = + new TaskBatchExecutionListenerBeanPostProcessor(); + + postProcessor.setJobNames(Arrays.asList(new String[] {"job1", "job2"})); + + return postProcessor; +} +``` + +| |You can find a sample batch application in the samples module of the Spring Cloud
Task Project,[here](https://github.com/spring-cloud/spring-cloud-task/tree/master/spring-cloud-task-samples/batch-job).| +|---|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +## 10. Remote Partitioning + +Spring Cloud Deployer provides facilities for launching Spring Boot-based applications on +most cloud infrastructures. The `DeployerPartitionHandler` and`DeployerStepExecutionHandler` delegate the launching of worker step executions to Spring +Cloud Deployer. + +To configure the `DeployerStepExecutionHandler`, you must provide a `Resource`representing the Spring Boot über-jar to be executed, a `TaskLauncher`, and a`JobExplorer`. You can configure any environment properties as well as the max number of +workers to be executing at once, the interval to poll for the results (defaults to 10 +seconds), and a timeout (defaults to -1 or no timeout). The following example shows how +configuring this `PartitionHandler` might look: + +``` +@Bean +public PartitionHandler partitionHandler(TaskLauncher taskLauncher, + JobExplorer jobExplorer) throws Exception { + + MavenProperties mavenProperties = new MavenProperties(); + mavenProperties.setRemoteRepositories(new HashMap<>(Collections.singletonMap("springRepo", + new MavenProperties.RemoteRepository(repository)))); + + Resource resource = + MavenResource.parse(String.format("%s:%s:%s", + "io.spring.cloud", + "partitioned-batch-job", + "1.1.0.RELEASE"), mavenProperties); + + DeployerPartitionHandler partitionHandler = + new DeployerPartitionHandler(taskLauncher, jobExplorer, resource, "workerStep"); + + List commandLineArgs = new ArrayList<>(3); + commandLineArgs.add("--spring.profiles.active=worker"); + commandLineArgs.add("--spring.cloud.task.initialize.enable=false"); + commandLineArgs.add("--spring.batch.initializer.enabled=false"); + + partitionHandler.setCommandLineArgsProvider( + new PassThroughCommandLineArgsProvider(commandLineArgs)); + partitionHandler.setEnvironmentVariablesProvider(new NoOpEnvironmentVariablesProvider()); + partitionHandler.setMaxWorkers(2); + partitionHandler.setApplicationName("PartitionedBatchJobTask"); + + return partitionHandler; +} +``` + +| |When passing environment variables to partitions, each partition may
be on a different machine with different environment settings.
Consequently, you should pass only those environment variables that are required.| +|---|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +Notice in the example above that we have set the maximum number of workers to 2. +Setting the maximum of workers establishes the maximum number of +partitions that should be running at one time. + +The `Resource` to be executed is expected to be a Spring Boot über-jar with a`DeployerStepExecutionHandler` configured as a `CommandLineRunner` in the current context. +The repository enumerated in the preceding example should be the remote repository in +which the über-jar is located. Both the manager and worker are expected to have visibility +into the same data store being used as the job repository and task repository. Once the +underlying infrastructure has bootstrapped the Spring Boot jar and Spring Boot has +launched the `DeployerStepExecutionHandler`, the step handler executes the requested`Step`. The following example shows how to configure the `DeployerStepExecutionHandler`: + +``` +@Bean +public DeployerStepExecutionHandler stepExecutionHandler(JobExplorer jobExplorer) { + DeployerStepExecutionHandler handler = + new DeployerStepExecutionHandler(this.context, jobExplorer, this.jobRepository); + + return handler; +} +``` + +| |You can find a sample remote partition application in the samples module of the
Spring Cloud Task project,[here](https://github.com/spring-cloud/spring-cloud-task/tree/master/spring-cloud-task-samples/partitioned-batch-job).| +|---|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +### 10.1. Notes on Developing a Batch-partitioned application for the Kubernetes Platform + +* When deploying partitioned apps on the Kubernetes platform, you must use the following + dependency for the Spring Cloud Kubernetes Deployer: + + ``` + + org.springframework.cloud + spring-cloud-starter-deployer-kubernetes + + ``` + +* The application name for the task application and its partitions need to follow + the following regex pattern: `[a-z0-9]([-a-z0-9]*[a-z0-9])`. + Otherwise, an exception is thrown. + +### 10.2. Notes on Developing a Batch-partitioned Application for the Cloud Foundry Platform + +* When deploying partitioned apps on the Cloud Foundry platform, you must use the + following dependencies for the Spring Cloud Foundry Deployer: + + ``` + + org.springframework.cloud + spring-cloud-deployer-cloudfoundry + + + io.projectreactor + reactor-core + 3.1.5.RELEASE + + + io.projectreactor.ipc + reactor-netty + 0.7.5.RELEASE + + ``` + +* When configuring the partition handler, Cloud Foundry Deployment + environment variables need to be established so that the partition handler + can start the partitions. The following list shows the required environment + variables: + + * `spring_cloud_deployer_cloudfoundry_url` + + * `spring_cloud_deployer_cloudfoundry_org` + + * `spring_cloud_deployer_cloudfoundry_space` + + * `spring_cloud_deployer_cloudfoundry_domain` + + * `spring_cloud_deployer_cloudfoundry_username` + + * `spring_cloud_deployer_cloudfoundry_password` + + * `spring_cloud_deployer_cloudfoundry_services` + + * `spring_cloud_deployer_cloudfoundry_taskTimeout` + +An example set of deployment environment variables for a partitioned task that +uses a `mysql` database service might resemble the following: + +``` +spring_cloud_deployer_cloudfoundry_url=https://api.local.pcfdev.io +spring_cloud_deployer_cloudfoundry_org=pcfdev-org +spring_cloud_deployer_cloudfoundry_space=pcfdev-space +spring_cloud_deployer_cloudfoundry_domain=local.pcfdev.io +spring_cloud_deployer_cloudfoundry_username=admin +spring_cloud_deployer_cloudfoundry_password=admin +spring_cloud_deployer_cloudfoundry_services=mysql +spring_cloud_deployer_cloudfoundry_taskTimeout=300 +``` + +| |When using PCF-Dev, the following environment variable is also required:`spring_cloud_deployer_cloudfoundry_skipSslValidation=true`| +|---|-----------------------------------------------------------------------------------------------------------------------------------| + +## 11. Batch Informational Messages + +Spring Cloud Task provides the ability for batch jobs to emit informational messages. The +“[Spring Batch Events](#stream-integration-batch-events)” section covers this feature in detail. + +## 12. Batch Job Exit Codes + +As discussed [earlier](#features-lifecycle-exit-codes), Spring Cloud Task +applications support the ability to record the exit code of a task execution. However, in +cases where you run a Spring Batch Job within a task, regardless of how the Batch Job +Execution completes, the result of the task is always zero when using the default +Batch/Boot behavior. Keep in mind that a task is a boot application and that the exit code +returned from the task is the same as a boot application. +To override this behavior and allow the task to return an exit code other than zero when a +batch job returns an[BatchStatus](https://docs.spring.io/spring-batch/4.0.x/reference/html/step.html#batchStatusVsExitStatus)of `FAILED`, set `spring.cloud.task.batch.fail-on-job-failure` to `true`. Then the exit code +can be 1 (the default) or be based on the[specified`ExitCodeGenerator`](https://docs.spring.io/spring-boot/docs/current/reference/html/boot-features-spring-application.html#boot-features-application-exit)) + +This functionality uses a new `CommandLineRunner` that replaces the one provided by Spring +Boot. By default, it is configured with the same order. However, if you want to customize +the order in which the `CommandLineRunner` is run, you can set its order by setting the`spring.cloud.task.batch.commandLineRunnerOrder` property. To have your task return the +exit code based on the result of the batch job execution, you need to write your own`CommandLineRunner`. + +# Single Step Batch Job Starter + +This section goes into how to develop a Spring Batch `Job` with a single `Step` by using the +starter included in Spring Cloud Task. This starter lets you use configuration +to define an `ItemReader`, an `ItemWriter`, or a full single-step Spring Batch `Job`. +For more about Spring Batch and its capabilities, see the[Spring Batch documentation](https://spring.io/projects/spring-batch). + +To obtain the starter for Maven, add the following to your build: + +``` + + org.springframework.cloud + spring-cloud-starter-single-step-batch-job + 2.3.0 + +``` + +To obtain the starter for Gradle, add the following to your build: + +``` +compile "org.springframework.cloud:spring-cloud-starter-single-step-batch-job:2.3.0" +``` + +## 13. Defining a Job + +You can use the starter to define as little as an `ItemReader` or an `ItemWriter` or as much as a full `Job`. +In this section, we define which properties are required to be defined to configure a`Job`. + +### 13.1. Properties + +To begin, the starter provides a set of properties that let you configure the basics of a Job with one Step: + +| Property | Type |Default Value| Description | +|----------------------------|---------|-------------|----------------------------------------------------| +| `spring.batch.job.jobName` |`String` | `null` | The name of the job. | +|`spring.batch.job.stepName` |`String` | `null` | The name of the step. | +|`spring.batch.job.chunkSize`|`Integer`| `null` |The number of items to be processed per transaction.| + +With the above properties configured, you have a job with a single, chunk-based step. +This chunk-based step reads, processes, and writes `Map` instances as the +items. However, the step does not yet do anything. You need to configure an `ItemReader`, an +optional `ItemProcessor`, and an `ItemWriter` to give it something to do. To configure one +of these, you can either use properties and configure one of the options that has provided +autoconfiguration or you can configure your own with the standard Spring configuration +mechanisms. + +| |If you configure your own, the input and output types must match the others in the step.
The `ItemReader` implementations and `ItemWriter` implementations in this starter all use
a `Map` as the input and the output item.| +|---|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +## 14. Autoconfiguration for ItemReader Implementations + +This starter provides autoconfiguration for four different `ItemReader` implementations:`AmqpItemReader`, `FlatFileItemReader`, `JdbcCursorItemReader`, and `KafkaItemReader`. +In this section, we outline how to configure each of these by using the provided +autoconfiguration. + +### 14.1. AmqpItemReader + +You can read from a queue or topic with AMQP by using the `AmqpItemReader`. The +autoconfiguration for this `ItemReader` implementation is dependent upon two sets of +configuration. The first is the configuration of an `AmqpTemplate`. You can either +configure this yourself or use the autoconfiguration provided by Spring Boot. See the[Spring Boot AMQP documentation](https://docs.spring.io/spring-boot/docs/2.4.x/reference/htmlsingle/#boot-features-amqp). +Once you have configured the `AmqpTemplate`, you can enable the batch capabilities to support it +by setting the following properties: + +| Property | Type |Default Value| Description | +|------------------------------------------------------|---------|-------------|---------------------------------------------------------------------------------------| +| `spring.batch.job.amqpitemreader.enabled` |`boolean`| `false` | If `true`, the autoconfiguration will execute. | +|`spring.batch.job.amqpitemreader.jsonConverterEnabled`|`boolean`| `true` |Indicates if the `Jackson2JsonMessageConverter` should be registered to parse messages.| + +For more information, see the [`AmqpItemReader` documentation](https://docs.spring.io/spring-batch/docs/4.3.x/api/org/springframework/batch/item/amqp/AmqpItemReader.html). + +### 14.2. FlatFileItemReader + +`FlatFileItemReader` lets you read from flat files (such as CSVs +and other file formats). To read from a file, you can provide some components +yourself through normal Spring configuration (`LineTokenizer`, `RecordSeparatorPolicy`,`FieldSetMapper`, `LineMapper`, or `SkippedLinesCallback`). You can also use the +following properties to configure the reader: + +| Property | Type | Default Value | Description | +|------------------------------------------------------|---------------|------------------------------------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| `spring.batch.job.flatfileitemreader.saveState` | `boolean` | `true` | Determines if the state should be saved for restarts. | +| `spring.batch.job.flatfileitemreader.name` | `String` | `null` | Name used to provide unique keys in the `ExecutionContext`. | +| `spring.batch.job.flatfileitemreader.maxItemcount` | `int` | `Integer.MAX_VALUE` | Maximum number of items to be read from the file. | +|`spring.batch.job.flatfileitemreader.currentItemCount`| `int` | 0 | Number of items that have already been read. Used on restarts. | +| `spring.batch.job.flatfileitemreader.comments` |`List` | empty List | A list of Strings that indicate commented lines (lines to be ignored) in the file. | +| `spring.batch.job.flatfileitemreader.resource` | `Resource` | `null` | The resource to be read. | +| `spring.batch.job.flatfileitemreader.strict` | `boolean` | `true` | If set to `true`, the reader throws an exception if the resource is not found. | +| `spring.batch.job.flatfileitemreader.encoding` | `String` | `FlatFileItemReader.DEFAULT_CHARSET` | Encoding to be used when reading the file. | +| `spring.batch.job.flatfileitemreader.linesToSkip` | `int` | 0 | Indicates the number of lines to skip at the start of a file. | +| `spring.batch.job.flatfileitemreader.delimited` | `boolean` | `false` | Indicates whether the file is a delimited file (CSV and other formats). Only one of this property or `spring.batch.job.flatfileitemreader.fixedLength` can be `true` at the same time. | +| `spring.batch.job.flatfileitemreader.delimiter` | `String` | `DelimitedLineTokenizer.DELIMITER_COMMA` | If reading a delimited file, indicates the delimiter to parse on. | +| `spring.batch.job.flatfileitemreader.quoteCharacter` | `char` |`DelimitedLineTokenizer.DEFAULT_QUOTE_CHARACTER`| Used to determine the character used to quote values. | +| `spring.batch.job.flatfileitemreader.includedFields` |`List`| empty list | A list of indices to determine which fields in a record to include in the item. | +| `spring.batch.job.flatfileitemreader.fixedLength` | `boolean` | `false` | Indicates if a file’s records are parsed by column numbers. Only one of this property or `spring.batch.job.flatfileitemreader.delimited` can be `true` at the same time. | +| `spring.batch.job.flatfileitemreader.ranges` | `List` | empty list |List of column ranges by which to parse a fixed width record. See the [Range documentation](https://docs.spring.io/spring-batch/docs/4.3.x/api/org/springframework/batch/item/file/transform/Range.html).| +| `spring.batch.job.flatfileitemreader.names` | `String []` | `null` | List of names for each field parsed from a record. These names are the keys in the `Map` in the items returned from this `ItemReader`. | +| `spring.batch.job.flatfileitemreader.parsingStrict` | `boolean` | `true` | If set to `true`, the mapping fails if the fields cannot be mapped. | + +See the [`FlatFileItemReader` documentation](https://docs.spring.io/spring-batch/docs/4.3.x/api/org/springframework/batch/item/file/FlatFileItemReader.html). + +### 14.3. JdbcCursorItemReader + +The `JdbcCursorItemReader` runs a query against a relational database and iterates over +the resulting cursor (`ResultSet`) to provide the resulting items. This autoconfiguration +lets you provide a `PreparedStatementSetter`, a `RowMapper`, or both. You +can also use the following properties to configure a `JdbcCursorItemReader`: + +| Property | Type | Default Value | Description | +|-------------------------------------------------------------------|---------|-------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| `spring.batch.job.jdbccursoritemreader.saveState` |`boolean`| `true` | Determines whether the state should be saved for restarts. | +| `spring.batch.job.jdbccursoritemreader.name` |`String` | `null` | Name used to provide unique keys in the `ExecutionContext`. | +| `spring.batch.job.jdbccursoritemreader.maxItemcount` | `int` |`Integer.MAX_VALUE`| Maximum number of items to be read from the file. | +| `spring.batch.job.jdbccursoritemreader.currentItemCount` | `int` | 0 | Number of items that have already been read. Used on restarts. | +| `spring.batch.job.jdbccursoritemreader.fetchSize` | `int` | |A hint to the driver to indicate how many records to retrieve per call to the database system. For best performance, you usually want to set it to match the chunk size.| +| `spring.batch.job.jdbccursoritemreader.maxRows` | `int` | | Maximum number of items to read from the database. | +| `spring.batch.job.jdbccursoritemreader.queryTimeout` | `int` | | Number of milliseconds for the query to timeout. | +| `spring.batch.job.jdbccursoritemreader.ignoreWarnings` |`boolean`| `true` | Determines whether the reader should ignore SQL warnings when processing. | +| `spring.batch.job.jdbccursoritemreader.verifyCursorPosition` |`boolean`| `true` | Indicates whether the cursor’s position should be verified after each read to verify that the `RowMapper` did not advance the cursor. | +| `spring.batch.job.jdbccursoritemreader.driverSupportsAbsolute` |`boolean`| `false` | Indicates whether the driver supports absolute positioning of a cursor. | +|`spring.batch.job.jdbccursoritemreader.useSharedExtendedConnection`|`boolean`| `false` | Indicates whether the connection is shared with other processing (and is therefore part of a transaction). | +| `spring.batch.job.jdbccursoritemreader.sql` |`String` | `null` | SQL query from which to read. | + +See the [`JdbcCursorItemReader` documentation](https://docs.spring.io/spring-batch/docs/4.3.x/api/org/springframework/batch/item/database/JdbcCursorItemReader.html). + +### 14.4. KafkaItemReader + +Ingesting a partition of data from a Kafka topic is useful and exactly what the`KafkaItemReader` can do. To configure a `KafkaItemReader`, two pieces +of configuration are required. First, configuring Kafka with Spring Boot’s Kafka +autoconfiguration is required (see the[Spring Boot Kafka documentation](https://docs.spring.io/spring-boot/docs/2.4.x/reference/htmlsingle/#boot-features-kafka)). +Once you have configured the Kafka properties from Spring Boot, you can configure the `KafkaItemReader`itself by setting the following properties: + +| Property | Type |Default Value| Description | +|-------------------------------------------------------|---------------|-------------|-----------------------------------------------------------| +| `spring.batch.job.kafkaitemreader.name` | `String` | `null` |Name used to provide unique keys in the `ExecutionContext`.| +| `spring.batch.job.kafkaitemreader.topic` | `String` | `null` | Name of the topic from which to read. | +| `spring.batch.job.kafkaitemreader.partitions` |`List`| empty list | List of partition indices from which to read. | +|`spring.batch.job.kafkaitemreader.pollTimeOutInSeconds`| `long` | 30 | Timeout for the `poll()` operations. | +| `spring.batch.job.kafkaitemreader.saveState` | `boolean` | `true` |Determines whether the state should be saved for restarts. | + +See the [`KafkaItemReader` documentation](https://docs.spring.io/spring-batch/docs/4.3.x/api/org/springframework/batch/item/kafka/KafkaItemReader.html). + +## 15. ItemProcessor Configuration + +The single-step batch job autoconfiguration accepts an `ItemProcessor` if one +is available within the `ApplicationContext`. If one is found of the correct type +(`ItemProcessor, Map>`), it is autowired +into the step. + +## 16. Autoconfiguration for ItemWriter implementations + +This starter provides autoconfiguration for `ItemWriter` implementations that +match the supported `ItemReader` implementations: `AmqpItemWriter`,`FlatFileItemWriter`, `JdbcItemWriter`, and `KafkaItemWriter`. This section +covers how to use autoconfiguration to configure a supported `ItemWriter`. + +### 16.1. AmqpItemWriter + +To write to a RabbitMQ queue, you need two sets of configuration. First, you need an`AmqpTemplate`. The easiest way to get this is by using Spring Boot’s +RabbitMQ autoconfiguration. See the [Spring Boot RabbitMQ documentation](https://docs.spring.io/spring-boot/docs/2.4.x/reference/htmlsingle/#boot-features-amqp). +Once you have configured the `AmqpTemplate`, you can configure the `AmqpItemWriter` by setting the +following properties: + +| Property | Type |Default Value| Description | +|------------------------------------------------------|---------|-------------|------------------------------------------------------------------------------------------| +| `spring.batch.job.amqpitemwriter.enabled` |`boolean`| `false` | If `true`, the autoconfiguration runs. | +|`spring.batch.job.amqpitemwriter.jsonConverterEnabled`|`boolean`| `true` |Indicates whether `Jackson2JsonMessageConverter` should be registered to convert messages.| + +### 16.2. FlatFileItemWriter + +To write a file as the output of the step, you can configure `FlatFileItemWriter`. +Autoconfiguration accepts components that have been explicitly configured (such as `LineAggregator`,`FieldExtractor`, `FlatFileHeaderCallback`, or a `FlatFileFooterCallback`) and +components that have been configured by setting the following properties specified: + +| Property | Type | Default Value | Description | +|----------------------------------------------------------|-----------|-------------------------------------------|-----------------------------------------------------------------------------------------------------------------------------------------------------| +| `spring.batch.job.flatfileitemwriter.resource` |`Resource` | `null` | The resource to be read. | +| `spring.batch.job.flatfileitemwriter.delimited` | `boolean` | `false` | Indicates whether the output file is a delimited file. If `true`, `spring.batch.job.flatfileitemwriter.formatted` must be `false`. | +| `spring.batch.job.flatfileitemwriter.formatted` | `boolean` | `false` | Indicates whether the output file a formatted file. If `true`, `spring.batch.job.flatfileitemwriter.delimited` must be `false`. | +| `spring.batch.job.flatfileitemwriter.format` | `String` | `null` | The format used to generate the output for a formatted file. The formatting is performed by using `String.format`. | +| `spring.batch.job.flatfileitemwriter.locale` | `Locale` | `Locale.getDefault()` | The `Locale` to be used when generating the file. | +| `spring.batch.job.flatfileitemwriter.maximumLength` | `int` | 0 | Max length of the record. If 0, the size is unbounded. | +| `spring.batch.job.flatfileitemwriter.minimumLength` | `int` | 0 | The minimum record length. | +| `spring.batch.job.flatfileitemwriter.delimiter` | `String` | `,` | The `String` used to delimit fields in a delimited file. | +| `spring.batch.job.flatfileitemwriter.encoding` | `String` | `FlatFileItemReader.DEFAULT_CHARSET` | Encoding to use when writing the file. | +| `spring.batch.job.flatfileitemwriter.forceSync` | `boolean` | `false` | Indicates whether a file should be force-synced to the disk on flush. | +| `spring.batch.job.flatfileitemwriter.names` |`String []`| `null` |List of names for each field parsed from a record. These names are the keys in the `Map` for the items received by this `ItemWriter`.| +| `spring.batch.job.flatfileitemwriter.append` | `boolean` | `false` | Indicates whether a file should be appended to if the output file is found. | +| `spring.batch.job.flatfileitemwriter.lineSeparator` | `String` |`FlatFileItemWriter.DEFAULT_LINE_SEPARATOR`| What `String` to use to separate lines in the output file. | +| `spring.batch.job.flatfileitemwriter.name` | `String` | `null` | Name used to provide unique keys in the `ExecutionContext`. | +| `spring.batch.job.flatfileitemwriter.saveState` | `boolean` | `true` | Determines whether the state should be saved for restarts. | +|`spring.batch.job.flatfileitemwriter.shouldDeleteIfEmpty` | `boolean` | `false` | If set to `true`, an empty file (there is no output) is deleted when the job completes. | +|`spring.batch.job.flatfileitemwriter.shouldDeleteIfExists`| `boolean` | `true` | If set to `true` and a file is found where the output file should be, it is deleted before the step begins. | +| `spring.batch.job.flatfileitemwriter.transactional` | `boolean` |`FlatFileItemWriter.DEFAULT_TRANSACTIONAL` | Indicates whether the reader is a transactional queue (indicating that the items read are returned to the queue upon a failure). | + +See the [`FlatFileItemWriter` documentation](https://docs.spring.io/spring-batch/docs/4.3.x/api/org/springframework/batch/item/file/FlatFileItemWriter.html). + +### 16.3. JdbcBatchItemWriter + +To write the output of a step to a relational database, this starter provides the ability +to autoconfigure a `JdbcBatchItemWriter`. The autoconfiguration lets you provide your +own `ItemPreparedStatementSetter` or `ItemSqlParameterSourceProvider` and +configuration options by setting the following properties: + +| Property | Type |Default Value| Description | +|----------------------------------------------------|---------|-------------|---------------------------------------------------------------------------------| +| `spring.batch.job.jdbcbatchitemwriter.name` |`String` | `null` | Name used to provide unique keys in the `ExecutionContext`. | +| `spring.batch.job.jdbcbatchitemwriter.sql` |`String` | `null` | The SQL used to insert each item. | +|`spring.batch.job.jdbcbatchitemwriter.assertUpdates`|`boolean`| `true` |Whether to verify that every insert results in the update of at least one record.| + +See the [`JdbcBatchItemWriter` documentation](https://docs.spring.io/spring-batch/docs/4.3.x/api/org/springframework/batch/item/database/JdbcBatchItemWriter.html). + +### 16.4. KafkaItemWriter + +To write step output to a Kafka topic, you need `KafkaItemWriter`. This starter +provides autoconfiguration for a `KafkaItemWriter` by using facilities from two places. +The first is Spring Boot’s Kafka autoconfiguration. (See the [Spring Boot Kafka documentation](https://docs.spring.io/spring-boot/docs/2.4.x/reference/htmlsingle/#boot-features-kafka).) +Second, this starter lets you configure two properties on the writer. + +| Property | Type |Default Value| Description | +|-----------------------------------------|---------|-------------|----------------------------------------------------------------------------------------------| +|`spring.batch.job.kafkaitemwriter.topic` |`String` | `null` | The Kafka topic to which to write. | +|`spring.batch.job.kafkaitemwriter.delete`|`boolean`| `false` |Whether the items being passed to the writer are all to be sent as delete events to the topic.| + +For more about the configuration options for the `KafkaItemWriter`, see the [`KafkaItemWiter` documentation](https://docs.spring.io/spring-batch/docs/4.3.x/api/org/springframework/batch/item/kafka/KafkaItemWriter.html). + +# Spring Cloud Stream Integration + +A task by itself can be useful, but integration of a task into a larger ecosystem lets it +be useful for more complex processing and orchestration. This section +covers the integration options for Spring Cloud Task with Spring Cloud Stream. + +## 17. Launching a Task from a Spring Cloud Stream + +You can launch tasks from a stream. To do so, create a sink that listens for a message +that contains a `TaskLaunchRequest` as its payload. The `TaskLaunchRequest` contains: + +* `uri`: To the task artifact that is to be executed. + +* `applicationName`: The name that is associated with the task. If no + applicationName is set, the `TaskLaunchRequest` generates a task name + comprised of the following: `Task-`. + +* `commandLineArguments`: A list containing the command line arguments for the task. + +* `environmentProperties`: A map containing the environment variables to be used by the + task. + +* `deploymentProperties`: A map containing the properties that are used by the deployer to + deploy the task. + +| |If the payload is of a different type, the sink throws an exception.| +|---|--------------------------------------------------------------------| + +For example, a stream can be created that has a processor that takes in data from an +HTTP source and creates a `GenericMessage` that contains the `TaskLaunchRequest` and sends +the message to its output channel. The task sink would then receive the message from its +input channnel and then launch the task. + +To create a taskSink, you need only create a Spring Boot application that includes the`EnableTaskLauncher` annotation, as shown in the following example: + +``` +@SpringBootApplication +@EnableTaskLauncher +public class TaskSinkApplication { + public static void main(String[] args) { + SpringApplication.run(TaskSinkApplication.class, args); + } +} +``` + +The [samples +module](https://github.com/spring-cloud/spring-cloud-task/tree/master/spring-cloud-task-samples) of the Spring Cloud Task project contains a sample Sink and Processor. To install +these samples into your local maven repository, run a maven build from the`spring-cloud-task-samples` directory with the `skipInstall` property set to `false`, as +shown in the following example: + +`mvn clean install` + +| |The `maven.remoteRepositories.springRepo.url` property must be set to the location
of the remote repository in which the über-jar is located. If not set, there is no remote
repository, so it relies upon the local repository only.| +|---|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +### 17.1. Spring Cloud Data Flow + +To create a stream in Spring Cloud Data Flow, you must first register the Task Sink +Application we created. In the following example, we are registering the Processor and +Sink sample applications by using the Spring Cloud Data Flow shell: + +``` +app register --name taskSink --type sink --uri maven://io.spring.cloud:tasksink: +app register --name taskProcessor --type processor --uri maven:io.spring.cloud:taskprocessor: +``` + +The following example shows how to create a stream from the Spring Cloud Data Flow shell: + +``` +stream create foo --definition "http --server.port=9000|taskProcessor|taskSink" --deploy +``` + +## 18. Spring Cloud Task Events + +Spring Cloud Task provides the ability to emit events through a Spring Cloud Stream +channel when the task is run through a Spring Cloud Stream channel. A task listener is +used to publish the `TaskExecution` on a message channel named `task-events`. This feature +is autowired into any task that has `spring-cloud-stream`, `spring-cloud-stream-`, +and a defined task on its classpath. + +| |To disable the event emitting listener, set the `spring.cloud.task.events.enabled`property to `false`.| +|---|------------------------------------------------------------------------------------------------------| + +With the appropriate classpath defined, the following task emits the `TaskExecution` as an +event on the `task-events` channel (at both the start and the end of the task): + +``` +@SpringBootApplication +public class TaskEventsApplication { + + public static void main(String[] args) { + SpringApplication.run(TaskEventsApplication.class, args); + } + + @Configuration + public static class TaskConfiguration { + + @Bean + public CommandLineRunner commandLineRunner() { + return new CommandLineRunner() { + @Override + public void run(String... args) throws Exception { + System.out.println("The CommandLineRunner was executed"); + } + }; + } + } +} +``` + +| |A binder implementation is also required to be on the classpath.| +|---|----------------------------------------------------------------| + +| |A sample task event application can be found in the samples module
of the Spring Cloud Task Project,[here](https://github.com/spring-cloud/spring-cloud-task/tree/master/spring-cloud-task-samples/task-events).| +|---|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +### 18.1. Disabling Specific Task Events + +To disable task events, you can set the `spring.cloud.task.events.enabled` property to`false`. + +## 19. Spring Batch Events + +When executing a Spring Batch job through a task, Spring Cloud Task can be configured to +emit informational messages based on the Spring Batch listeners available in Spring Batch. +Specifically, the following Spring Batch listeners are autoconfigured into each batch job +and emit messages on the associated Spring Cloud Stream channels when run through Spring +Cloud Task: + +* `JobExecutionListener` listens for `job-execution-events` + +* `StepExecutionListener` listens for `step-execution-events` + +* `ChunkListener` listens for `chunk-events` + +* `ItemReadListener` listens for `item-read-events` + +* `ItemProcessListener` listens for `item-process-events` + +* `ItemWriteListener` listens for `item-write-events` + +* `SkipListener` listens for `skip-events` + +These listeners are autoconfigured into any `AbstractJob` when the appropriate +beans (a `Job` and a `TaskLifecycleListener`) exist in the context. Configuration to +listen to these events is handled the same way binding to any other Spring +Cloud Stream channel is done. Our task (the one running the batch job) serves as a`Source`, with the listening applications serving as either a `Processor` or a `Sink`. + +An example could be to have an application listening to the `job-execution-events` channel +for the start and stop of a job. To configure the listening application, you would +configure the input to be `job-execution-events` as follows: + +`spring.cloud.stream.bindings.input.destination=job-execution-events` + +| |A binder implementation is also required to be on the classpath.| +|---|----------------------------------------------------------------| + +| |A sample batch event application can be found in the samples module
of the Spring Cloud Task Project,[here](https://github.com/spring-cloud/spring-cloud-task/tree/master/spring-cloud-task-samples/batch-events).| +|---|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +### 19.1. Sending Batch Events to Different Channels + +One of the options that Spring Cloud Task offers for batch events is the ability to alter +the channel to which a specific listener can emit its messages. To do so, use the +following configuration:`spring.cloud.stream.bindings..destination=`. For example, +if `StepExecutionListener` needs to emit its messages to another channel called`my-step-execution-events` instead of the default `step-execution-events`, you can add the +following configuration: + +`spring.cloud.stream.bindings.step-execution-events.destination=my-step-execution-events` + +### 19.2. Disabling Batch Events + +To disable the listener functionality for all batch events, use the following +configuration: + +`spring.cloud.task.batch.events.enabled=false` + +To disable a specific batch event, use the following configuration: + +`spring.cloud.task.batch.events..enabled=false`: + +The following listing shows individual listeners that you can disable: + +``` +spring.cloud.task.batch.events.job-execution.enabled=false +spring.cloud.task.batch.events.step-execution.enabled=false +spring.cloud.task.batch.events.chunk.enabled=false +spring.cloud.task.batch.events.item-read.enabled=false +spring.cloud.task.batch.events.item-process.enabled=false +spring.cloud.task.batch.events.item-write.enabled=false +spring.cloud.task.batch.events.skip.enabled=false +``` + +### 19.3. Emit Order for Batch Events + +By default, batch events have `Ordered.LOWEST_PRECEDENCE`. To change this value (for +example, to 5 ), use the following configuration: + +``` +spring.cloud.task.batch.events.job-execution-order=5 +spring.cloud.task.batch.events.step-execution-order=5 +spring.cloud.task.batch.events.chunk-order=5 +spring.cloud.task.batch.events.item-read-order=5 +spring.cloud.task.batch.events.item-process-order=5 +spring.cloud.task.batch.events.item-write-order=5 +spring.cloud.task.batch.events.skip-order=5 +``` + +# Appendices + +## 20. Task Repository Schema + +This appendix provides an ERD for the database schema used in the task repository. + +![task schema](https://docs.spring.io/spring-cloud-task/docs/2.4.1/reference/html/images/task_schema.png) + +### 20.1. Table Information + +TASK\_EXECUTION + +Stores the task execution information. + +| Column Name |Required| Type |Field Length| Notes | +|---------------------------|--------|--------|------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| TASK\_EXECUTION\_ID | TRUE | BIGINT | X | Spring Cloud Task Framework at app startup establishes the next available id as obtained from the `TASK_SEQ`. Or if the record is created outside of task then the value must be populated at record creation time. | +| START\_TIME | FALSE |DATETIME| X | Spring Cloud Task Framework at app startup establishes the value. | +| END\_TIME | FALSE |DATETIME| X | Spring Cloud Task Framework at app exit establishes the value. | +| TASK\_NAME | FALSE |VARCHAR | 100 | Spring Cloud Task Framework at app startup will set this to "Application" unless user establish the name using the spring.cloud.task.name as discussed [here](#features-task-name) | +| EXIT\_CODE | FALSE |INTEGER | X | Follows Spring Boot defaults unless overridden by the user as discussed [here](https://docs.spring.io/spring-cloud-task/docs/current/reference/#features-lifecycle-exit-codes). | +| EXIT\_MESSAGE | FALSE |VARCHAR | 2500 | User Defined as discussed [here](https://docs.spring.io/spring-cloud-task/docs/current/reference/#features-task-execution-listener-exit-messages). | +| ERROR\_MESSAGE | FALSE |VARCHAR | 2500 | Spring Cloud Task Framework at app exit establishes the value. | +| LAST\_UPDATED | TRUE |DATETIME| X | Spring Cloud Task Framework at app startup establishes the value. Or if the record is created outside of task then the value must be populated at record creation time. | +| EXTERNAL\_EXECUTION\_ID | FALSE |VARCHAR | 250 |If the `spring.cloud.task.external-execution-id` property is set then Spring Cloud Task Framework at app startup will set this to the value specified. More information can be found [here](#features-external_task_id)| +|PARENT\_TASK\_EXECUTION\_ID| FALSE | BIGINT | X | If the `spring.cloud.task.parent-execution-id` property is set then Spring Cloud Task Framework at app startup will set this to the value specified. More information can be found [here](#features-parent_task_id) | + +TASK\_EXECUTION\_PARAMS + +Stores the parameters used for a task execution + +| Column Name |Required| Type |Field Length| +|-------------------|--------|-------|------------| +|TASK\_EXECUTION\_ID| TRUE |BIGINT | X | +| TASK\_PARAM | FALSE |VARCHAR| 2500 | + +TASK\_TASK\_BATCH + +Used to link the task execution to the batch execution. + +| Column Name |Required| Type |Field Length| +|-------------------|--------|------|------------| +|TASK\_EXECUTION\_ID| TRUE |BIGINT| X | +|JOB\_EXECUTION\_ID | TRUE |BIGINT| X | + +TASK\_LOCK + +Used for the `single-instance-enabled` feature discussed [here](#features-single-instance-enabled). + +| Column Name |Required| Type |Field Length| Notes | +|-------------|--------|--------|------------|----------------------------------------------------------------| +| LOCK\_KEY | TRUE | CHAR | 36 | UUID for the this lock | +| REGION | TRUE |VARCHAR | 100 | User can establish a group of locks using this field. | +| CLIENT\_ID | TRUE | CHAR | 36 |The task execution id that contains the name of the app to lock.| +|CREATED\_DATE| TRUE |DATETIME| X | The date that the entry was created | + +| |The DDL for setting up tables for each database type can be found [here](https://github.com/spring-cloud/spring-cloud-task/tree/master/spring-cloud-task-core/src/main/resources/org/springframework/cloud/task).| +|---|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +### 20.2. SQL Server + +By default Spring Cloud Task uses a sequence table for determining the `TASK_EXECUTION_ID` for the `TASK_EXECUTION` table. +However, when launching multiple tasks simultaneously while using SQL Server, this can cause a deadlock to occur on the `TASK_SEQ` table. +The resolution is to drop the `TASK_EXECUTION_SEQ` table and create a sequence using the same name. For example: + +``` +DROP TABLE TASK_SEQ; + +CREATE SEQUENCE [DBO].[TASK_SEQ] AS BIGINT + START WITH 1 + INCREMENT BY 1; +``` + +| |Set the `START WITH` to a higher value than your current execution id.| +|---|----------------------------------------------------------------------| + +## 21. Building This Documentation + +This project uses Maven to generate this documentation. To generate it for yourself, +run the following command: `$ ./mvnw clean package -P full`. + +## 22. Running a Task App on Cloud Foundry + +The simplest way to launch a Spring Cloud Task application as a task on Cloud Foundry +is to use Spring Cloud Data Flow. Via Spring Cloud Data Flow you can register your task application, +create a definition for it and then launch it. You then can track the task execution(s) +via a RESTful API, the Spring Cloud Data Flow Shell, or the UI. To learn out to get started installing Data Flow +follow the instructions in the[Getting Started](https://docs.spring.io/spring-cloud-dataflow/docs/current/reference/htmlsingle/#getting-started)section of the reference documentation. For info on how to register and launch tasks, see the [Lifecycle of a Task](https://docs.spring.io/spring-cloud-dataflow/docs/current/reference/htmlsingle/#_the_lifecycle_of_a_task) documentation. + diff --git a/docs/en/spring-cloud/spring-cloud-vault.md b/docs/en/spring-cloud/spring-cloud-vault.md new file mode 100644 index 0000000000000000000000000000000000000000..0d911a357ecbfd2b3902fe4cb26a3811ac59a8cc --- /dev/null +++ b/docs/en/spring-cloud/spring-cloud-vault.md @@ -0,0 +1,1901 @@ +# Spring Cloud Vault + +## 1. New & Noteworthy + +This section briefly covers items that are new and noteworthy in the latest releases. + +### 1.1. New in Spring Cloud Vault 3.0 + +* Migration of `PropertySource` initialization from Spring Cloud’s Bootstrap Context to Spring Boot’s [ConfigData API](#vault.configdata). + +* Support for the [Couchbase Database](#vault.config.backends.couchbase) backend. + +* Configuration of keystore/truststore types through `spring.cloud.vault.ssl.key-store-type=…`/`spring.cloud.vault.ssl.trust-store-type=…` including PEM support. + +* Support for `ReactiveDiscoveryClient` by configuring a `ReactiveVaultEndpointProvider`. + +* Support to configure [Multiple Databases](#vault.config.backends.databases). + +## 2. Quick Start + +**Prerequisites** + +To get started with Vault and this guide you need a \*NIX-like operating systems that provides: + +* `wget`, `openssl` and `unzip` + +* at least Java 8 and a properly configured `JAVA_HOME` environment variable + +| |This guide explains Vault setup from a Spring Cloud Vault perspective for integration testing.
You can find a getting started guide directly on the Vault project site: [learn.hashicorp.com/vault](https://learn.hashicorp.com/vault)| +|---|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +**Install Vault** + +``` +$ wget https://releases.hashicorp.com/vault/${vault_version}/vault_${vault_version}_${platform}.zip +$ unzip vault_${vault_version}_${platform}.zip +``` + +| |These steps can be achieved by downloading and running [`install_vault.sh`](https://github.com/spring-cloud/spring-cloud-vault/blob/master/src/test/bash/install_vault.sh).| +|---|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +**Create SSL certificates for Vault** + +Next, you’r required to generate a set of certificates: + +* Root CA + +* Vault Certificate (decrypted key `work/ca/private/localhost.decrypted.key.pem` and certificate `work/ca/certs/localhost.cert.pem`) + +Make sure to import the Root Certificate into a Java-compliant truststore. + +The easiest way to achieve this is by using OpenSSL. + +| |[`create_certificates.sh`](https://github.com/spring-cloud/spring-cloud-vault/blob/master/src/test/bash/) creates certificates in `work/ca` and a JKS truststore `work/keystore.jks`.
If you want to run Spring Cloud Vault using this quickstart guide you need to configure the truststore the `spring.cloud.vault.ssl.trust-store` property to `file:work/keystore.jks`.| +|---|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +**Start Vault server** + +Next create a config file along the lines of: + +``` +backend "inmem" { +} + +listener "tcp" { + address = "0.0.0.0:8200" + tls_cert_file = "work/ca/certs/localhost.cert.pem" + tls_key_file = "work/ca/private/localhost.decrypted.key.pem" +} + +disable_mlock = true +``` + +| |You can find an example config file at [`vault.conf`](https://github.com/spring-clod/spring-cloud-vault/blob/master/src/test/bash/vault.conf).| +|---|----------------------------------------------------------------------------------------------------------------------------------------------| + +``` +$ vault server -config=vault.conf +``` + +Vault is started listening on `0.0.0.0:8200` using the `inmem` storage and `https`. +Vault is sealed and not initialized when starting up. + +| |If you want to run tests, leave Vault uninitialized.
The tests will initialize Vault and create a root token `00000000-0000-0000-0000-000000000000`.| +|---|--------------------------------------------------------------------------------------------------------------------------------------------------------| + +If you want to use Vault for your application or give it a try then you need to initialize it first. + +``` +$ export VAULT_ADDR="https://localhost:8200" +$ export VAULT_SKIP_VERIFY=true # Don't do this for production +$ vault operator init +``` + +You should see something like: + +``` +Key 1: 7149c6a2e16b8833f6eb1e76df03e47f6113a3288b3093faf5033d44f0e70fe701 +Key 2: 901c534c7988c18c20435a85213c683bdcf0efcd82e38e2893779f152978c18c02 +Key 3: 03ff3948575b1165a20c20ee7c3e6edf04f4cdbe0e82dbff5be49c63f98bc03a03 +Key 4: 216ae5cc3ddaf93ceb8e1d15bb9fc3176653f5b738f5f3d1ee00cd7dccbe926e04 +Key 5: b2898fc8130929d569c1677ee69dc5f3be57d7c4b494a6062693ce0b1c4d93d805 +Initial Root Token: 19aefa97-cccc-bbbb-aaaa-225940e63d76 + +Vault initialized with 5 keys and a key threshold of 3. Please +securely distribute the above keys. When the Vault is re-sealed, +restarted, or stopped, you must provide at least 3 of these keys +to unseal it again. + +Vault does not store the master key. Without at least 3 keys, +your Vault will remain permanently sealed. +``` + +Vault will initialize and return a set of unsealing keys and the root token. +Pick 3 keys and unseal Vault. +Store the Vault token in the `VAULT_TOKEN`environment variable. + +``` +$ vault operator unseal (Key 1) +$ vault operator unseal (Key 2) +$ vault operator unseal (Key 3) +$ export VAULT_TOKEN=(Root token) +# Required to run Spring Cloud Vault tests after manual initialization +$ vault token create -id="00000000-0000-0000-0000-000000000000" -policy="root" +``` + +Spring Cloud Vault accesses different resources. +By default, the secret backend is enabled which accesses secret config settings via JSON endpoints. + +The HTTP service has resources in the form: + +``` +/secret/{application}/{profile} +/secret/{application} +/secret/{defaultContext}/{profile} +/secret/{defaultContext} +``` + +where the "application" is injected as the `spring.application.name` in the`SpringApplication` (i.e. what is normally "application" in a regular Spring Boot app), "profile" is an active profile (or comma-separated list of properties). +Properties retrieved from Vault will be used "as-is" without further prefixing of the property names. + +## 3. Client Side Usage + +To use these features in an application, just build it as a Spring Boot application that depends on `spring-cloud-vault-config` (e.g. see the test cases). +Example Maven configuration: + +Example 1. pom.xml + +``` + + org.springframework.boot + spring-boot-starter-parent + 2.4.0.RELEASE + + + + + + org.springframework.cloud + spring-cloud-starter-vault-config + 3.1.0 + + + org.springframework.boot + spring-boot-starter-test + test + + + + + + + org.springframework.boot + spring-boot-maven-plugin + + + + + +``` + +Then you can create a standard Spring Boot application, like this simple HTTP server: + +``` +@SpringBootApplication +@RestController +public class Application { + + @RequestMapping("/") + public String home() { + return "Hello World!"; + } + + public static void main(String[] args) { + SpringApplication.run(Application.class, args); + } +} +``` + +When it runs it will pick up the external configuration from the default local Vault server on port `8200` if it is running. +To modify the startup behavior you can change the location of the Vault server using `application.properties`, for example + +Example 2. application.yml + +``` +spring.cloud.vault: + host: localhost + port: 8200 + scheme: https + uri: https://localhost:8200 + connection-timeout: 5000 + read-timeout: 15000 + config: +spring.config.import: vault:// +``` + +* `host` sets the hostname of the Vault host. + The host name will be used for SSL certificate validation + +* `port` sets the Vault port + +* `scheme` setting the scheme to `http` will use plain HTTP. + Supported schemes are `http` and `https`. + +* `uri` configure the Vault endpoint with an URI. Takes precedence over host/port/scheme configuration + +* `connection-timeout` sets the connection timeout in milliseconds + +* `read-timeout` sets the read timeout in milliseconds + +* `spring.config.import` mounts Vault as `PropertySource` using all enabled secret backends (key-value enabled by default) + +Enabling further integrations requires additional dependencies and configuration. +Depending on how you have set up Vault you might need additional configuration like[SSL](https://cloud.spring.io/spring-cloud-vault/reference/html/#vault.config.ssl) and[authentication](https://cloud.spring.io/spring-cloud-vault/reference/html/#vault.config.authentication). + +If the application imports the `spring-boot-starter-actuator` project, the status of the vault server will be available via the `/health` endpoint. + +The vault health indicator can be enabled or disabled through the property `management.health.vault.enabled` (default to `true`). + +| |With Spring Cloud Vault 3.0 and Spring Boot 2.4, the bootstrap context initialization (`bootstrap.yml`, `bootstrap.properties`) of property sources was deprecated.
Instead, Spring Cloud Vault favors Spring Boot’s Config Data API which allows importing configuration from Vault. With Spring Boot Config Data approach, you need to set the `spring.config.import` property in order to bind to Vault. You can read more about it in the [Config Data Locations section](#vault.configdata.locations).
You can enable the bootstrap context either by setting the configuration property `spring.cloud.bootstrap.enabled=true` or by including the dependency `org.springframework.cloud:spring-cloud-starter-bootstrap`.| +|---|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +### 3.1. Authentication + +Vault requires an [authentication mechanism](https://www.vaultproject.io/docs/concepts/auth.html) to [authorize client requests](https://www.vaultproject.io/docs/concepts/tokens.html). + +Spring Cloud Vault supports multiple [authentication mechanisms](https://cloud.spring.io/spring-cloud-vault/reference/html/#vault.config.authentication) to authenticate applications with Vault. + +For a quickstart, use the root token printed by the [Vault initialization](#quickstart.vault.start). + +Example 3. application.yml + +``` +spring.cloud.vault: + token: 19aefa97-cccc-bbbb-aaaa-225940e63d76 +spring.config.import: vault:// +``` + +| |Consider carefully your security requirements.
Static token authentication is fine if you want quickly get started with Vault, but a static token is not protected any further.
Any disclosure to unintended parties allows Vault use with the associated token roles.| +|---|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +## 4. ConfigData API + +Spring Boot provides since version 2.4 a ConfigData API that allows the declaration of configuration sources and importing these as property sources. + +Spring Cloud Vault uses as of version 3.0 the ConfigData API to mount Vault’s secret backends as property sources. +In previous versions, the Bootstrap context was used. +The ConfigData API is much more flexible as it allows specifying which configuration systems to import and in which order. + +| |You can enable the deprecated bootstrap context either by setting the configuration property `spring.cloud.bootstrap.enabled=true` or by including the dependency `org.springframework.cloud:spring-cloud-starter-bootstrap`.| +|---|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +### 4.1. ConfigData Locations + +You can mount Vault configuration through one or more `PropertySource` that are materialized from Vault. +Spring Cloud Vault supports two config locations: + +* `vault://` (default location) + +* `vault:///` (contextual location) + +Using the default location mounts property sources for all enabled [Secret Backends](#vault.config.backends). +Without further configuration, Spring Cloud Vault mounts the key-value backend at `/secret/${spring.application.name}`. +Each activated profile adds another context path following the form `/secret/${spring.application.name}/${profile}`. +Adding further modules to the classpath, such as `spring-cloud-config-databases`, provides additional secret backend configuration options which get mounted as property sources if enabled. + +If you want to control which context paths are mounted from Vault as `PropertySource`, you can either use a contextual location (`vault:///my/context/path`) or configure a [`VaultConfigurer`](#vault.config.backends.configurer). + +Contextual locations are specified and mounted individually. +Spring Cloud Vault mounts each location as a unique `PropertySource`. +You can mix the default locations with contextual locations (or other config systems) to control the order of property sources. +This approach is useful in particular if you want to disable the default key-value path computation and mount each key-value backend yourself instead. + +Example 4. application.yml + +``` +spring.config.import: vault://first/context/path, vault://other/path, vault:// +``` + +Property names within a Spring `Environment` must be unique to avoid shadowing. +If you use the same secret names in different context paths and you want to expose these as individual properties you can distinguish them by adding a `prefix` query parameter to the location. + +Example 5. application.yml + +``` +spring.config.import: vault://my/path?prefix=foo., vault://my/other/path?prefix=bar. +secret: ${foo.secret} +other.secret: ${bar.secret} +``` + +| |Prefixes are added as-is to all property names returned by Vault. If you want key names to be separated with a dot between the prefix and key name, make sure to add a trailing dot to the prefix.| +|---|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +### 4.2. Conditionally enable/disable Vault Configuration + +In some cases, it can be required to launch an application without Vault. You can express whether a Vault config location should be optional or mandatory (default) through the location string: + +* `optional:vault://` (default location) + +* `optional:vault:///` (contextual location) + +Optional locations are skipped during application startup if Vault support was disabled through `spring.cloud.vault.enabled=false`. + +| |Vault context paths that cannot be found (HTTP Status 404) are skipped regardless of whether the config location is marked optional. [Vault Client Fail Fast](#vault.config.fail-fast) allows failing on start if a Vault context path cannot be found because of HTTP Status 404.| +|---|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +### 4.3. Infrastructure Customization + +Spring Cloud Vault requires infrastructure classes to interact with Vault. When not using the ConfigData API (meaning that you haven’t specified `spring.config.import=vault://` or a contextual Vault path), Spring Cloud Vault defines its beans through `VaultAutoConfiguration` and `VaultReactiveAutoConfiguration`. +Spring Boot bootstraps the application before a Spring Context is available. Therefore `VaultConfigDataLoader` registers beans itself to propagate these later on into the application context. + +You can customize the infrastructure used by Spring Cloud Vault by registering custom instances using the `Bootstrapper` API: + +``` +InstanceSupplier builderSupplier = ctx -> RestTemplateBuilder + .builder() + .requestFactory(ctx.get(ClientFactoryWrapper.class).getClientHttpRequestFactory()) + .defaultHeader("X-Vault-Namespace", "my-namespace"); + +SpringApplication application = new SpringApplication(MyApplication.class); +application.addBootstrapper(registry -> registry.register(RestTemplateBuilder.class, builderSupplier)); +``` + +See also [Customize which secret backends to expose as PropertySource](#vault.config.backends.configurer) and the source of `VaultConfigDataLoader` for customization hooks. + +## 5. Authentication methods + +Different organizations have different requirements for security and authentication. +Vault reflects that need by shipping multiple authentication methods. +Spring Cloud Vault supports token and AppId authentication. + +### 5.1. Token authentication + +Tokens are the core method for authentication within Vault. +Token authentication requires a static token to be provided using the configuration. +As a fallback, the token may also be retrieved from `~/.vault-token` which is the default location used by the Vault CLI to cache tokens. + +| |Token authentication is the default authentication method.
If a token is disclosed an unintended party gains access to Vault and can access secrets for the intended client.| +|---|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +Example 6. application.yml + +``` +spring.cloud.vault: + authentication: TOKEN + token: 00000000-0000-0000-0000-000000000000 +``` + +* `authentication` setting this value to `TOKEN` selects the Token authentication method + +* `token` sets the static token to use. If missing or empty, then an attempt will be made to retrieve a token from \~/.vault-token. + +See also: + +* [Vault Documentation: Tokens](https://www.vaultproject.io/docs/concepts/tokens.html) + +* [Vault Documentation: CLI login](https://www.vaultproject.io/docs/commands/login) + +* [Vault Documentation: CLI default to \~/.vault-token](https://www.vaultproject.io/docs/commands/token-helper) + +### 5.2. Vault Agent authentication + +Vault ships a sidecar utility with Vault Agent since version 0.11.0. Vault Agent implements the functionality of Spring Vault’s `SessionManager`with its Auto-Auth feature. +Applications can reuse cached session credentials by relying on Vault Agent running on `localhost`. +Spring Vault can send requests without the`X-Vault-Token` header. +Disable Spring Vault’s authentication infrastructure to disable client authentication and session management. + +Example 7. application.yml + +``` +spring.cloud.vault: + authentication: NONE +``` + +* `authentication` setting this value to `NONE` disables `ClientAuthentication`and `SessionManager`. + +See also: [Vault Documentation: Agent](https://www.vaultproject.io/docs/agent/index.html) + +### 5.3. AppId authentication + +Vault supports [AppId](https://www.vaultproject.io/docs/auth/app-id.html)authentication that consists of two hard to guess tokens. +The AppId defaults to `spring.application.name` that is statically configured. +The second token is the UserId which is a part determined by the application, usually related to the runtime environment. +IP address, Mac address or a Docker container name are good examples. +Spring Cloud Vault Config supports IP address, Mac address and static UserId’s (e.g. supplied via System properties). +The IP and Mac address are represented as Hex-encoded SHA256 hash. + +IP address-based UserId’s use the local host’s IP address. + +Example 8. application.yml using SHA256 IP-Address UserId’s + +``` +spring.cloud.vault: + authentication: APPID + app-id: + user-id: IP_ADDRESS +``` + +* `authentication` setting this value to `APPID` selects the AppId authentication method + +* `app-id-path` sets the path of the AppId mount to use + +* `user-id` sets the UserId method. + Possible values are `IP_ADDRESS`,`MAC_ADDRESS` or a class name implementing a custom `AppIdUserIdMechanism` + +The corresponding command to generate the IP address UserId from a command line is: + +``` +$ echo -n 192.168.99.1 | sha256sum +``` + +| |Including the line break of `echo` leads to a different hash value so make sure to include the `-n` flag.| +|---|---------------------------------------------------------------------------------------------------------| + +Mac address-based UserId’s obtain their network device from the localhost-bound device. +The configuration also allows specifying a `network-interface` hint to pick the right device. +The value of`network-interface` is optional and can be either an interface name or interface index (0-based). + +Example 9. application.yml using SHA256 Mac-Address UserId’s + +``` +spring.cloud.vault: + authentication: APPID + app-id: + user-id: MAC_ADDRESS + network-interface: eth0 +``` + +* `network-interface` sets network interface to obtain the physical address + +The corresponding command to generate the IP address UserId from a command line is: + +``` +$ echo -n 0AFEDE1234AC | sha256sum +``` + +| |The Mac address is specified uppercase and without colons.
Including the line break of `echo` leads to a different hash value so make sure to include the `-n` flag.| +|---|------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +#### 5.3.1. Custom UserId + +The UserId generation is an open mechanism. +You can set`spring.cloud.vault.app-id.user-id` to any string and the configured value will be used as static UserId. + +A more advanced approach lets you set `spring.cloud.vault.app-id.user-id` to a classname. +This class must be on your classpath and must implement the `org.springframework.cloud.vault.AppIdUserIdMechanism` interface and the `createUserId` method. +Spring Cloud Vault will obtain the UserId by calling `createUserId` each time it authenticates using AppId to obtain a token. + +Example 10. application.yml + +``` +spring.cloud.vault: + authentication: APPID + app-id: + user-id: com.examlple.MyUserIdMechanism +``` + +Example 11. MyUserIdMechanism.java + +``` +public class MyUserIdMechanism implements AppIdUserIdMechanism { + + @Override + public String createUserId() { + String userId = ... + return userId; + } +} +``` + +See also: [Vault Documentation: Using the App ID auth backend](https://www.vaultproject.io/docs/auth/app-id.html) + +### 5.4. AppRole authentication + +[AppRole](https://www.vaultproject.io/docs/auth/app-id.html) is intended for machine authentication, like the deprecated (since Vault 0.6.1) [AppId authentication](#vault.config.authentication.appid). +AppRole authentication consists of two hard to guess (secret) tokens: RoleId and SecretId. + +Spring Vault supports various AppRole scenarios (push/pull mode and wrapped). + +RoleId and optionally SecretId must be provided by configuration, Spring Vault will not look up these or create a custom SecretId. + +Example 12. application.yml with AppRole authentication properties + +``` +spring.cloud.vault: + authentication: APPROLE + app-role: + role-id: bde2076b-cccb-3cf0-d57e-bca7b1e83a52 +``` + +The following scenarios are supported along the required configuration details: + +| **Method** |**RoleId**|**SecretId**|**RoleName**|**Token**| +|---------------------------------|----------|------------|------------|---------| +| Provided RoleId/SecretId | Provided | Provided | | | +|Provided RoleId without SecretId | Provided | | | | +| Provided RoleId, Pull SecretId | Provided | Provided | Provided |Provided | +| Pull RoleId, provided SecretId | | Provided | Provided |Provided | +| Full Pull Mode | | | Provided |Provided | +| Wrapped | | | |Provided | +|Wrapped RoleId, provided SecretId| Provided | | |Provided | +|Provided RoleId, wrapped SecretId| | Provided | |Provided | + +|**RoleId**|**SecretId**|**Supported**| +|----------|------------|-------------| +| Provided | Provided | ✅ | +| Provided | Pull | ✅ | +| Provided | Wrapped | ✅ | +| Provided | Absent | ✅ | +| Pull | Provided | ✅ | +| Pull | Pull | ✅ | +| Pull | Wrapped | ❌ | +| Pull | Absent | ❌ | +| Wrapped | Provided | ✅ | +| Wrapped | Pull | ❌ | +| Wrapped | Wrapped | ✅ | +| Wrapped | Absent | ❌ | + +| |You can use still all combinations of push/pull/wrapped modes by providing a configured `AppRoleAuthentication` bean within the context.
Spring Cloud Vault cannot derive all possible AppRole combinations from the configuration properties.| +|---|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +| |AppRole authentication is limited to simple pull mode using reactive infrastructure.
Full pull mode is not yet supported.
Using Spring Cloud Vault with the Spring WebFlux stack enables Vault’s reactive auto-configuration which can be disabled by setting `spring.cloud.vault.reactive.enabled=false`.| +|---|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +Example 13. application.yml with all AppRole authentication properties + +``` +spring.cloud.vault: + authentication: APPROLE + app-role: + role-id: bde2076b-cccb-3cf0-d57e-bca7b1e83a52 + secret-id: 1696536f-1976-73b1-b241-0b4213908d39 + role: my-role + app-role-path: approle +``` + +* `role-id` sets the RoleId. + +* `secret-id` sets the SecretId. + SecretId can be omitted if AppRole is configured without requiring SecretId (See `bind_secret_id`). + +* `role`: sets the AppRole name for pull mode. + +* `app-role-path` sets the path of the approle authentication mount to use. + +See also: [Vault Documentation: Using the AppRole auth backend](https://www.vaultproject.io/docs/auth/approle.html) + +### 5.5. AWS-EC2 authentication + +The [aws-ec2](https://www.vaultproject.io/docs/auth/aws-ec2.html)auth backend provides a secure introduction mechanism for AWS EC2 instances, allowing automated retrieval of a Vault token. +Unlike most Vault authentication backends, this backend does not require first-deploying, or provisioning security-sensitive credentials (tokens, username/password, client certificates, etc.). +Instead, it treats AWS as a Trusted Third Party and uses the cryptographically signed dynamic metadata information that uniquely represents each EC2 instance. + +Example 14. application.yml using AWS-EC2 Authentication + +``` +spring.cloud.vault: + authentication: AWS_EC2 +``` + +AWS-EC2 authentication enables nonce by default to follow the Trust On First Use (TOFU) principle. +Any unintended party that gains access to the PKCS#7 identity metadata can authenticate against Vault. + +During the first login, Spring Cloud Vault generates a nonce that is stored in the auth backend aside the instance Id. +Re-authentication requires the same nonce to be sent. +Any other party does not have the nonce and can raise an alert in Vault for further investigation. + +The nonce is kept in memory and is lost during application restart. +You can configure a static nonce with `spring.cloud.vault.aws-ec2.nonce`. + +AWS-EC2 authentication roles are optional and default to the AMI. +You can configure the authentication role by setting the`spring.cloud.vault.aws-ec2.role` property. + +Example 15. application.yml with configured role + +``` +spring.cloud.vault: + authentication: AWS_EC2 + aws-ec2: + role: application-server +``` + +Example 16. application.yml with all AWS EC2 authentication properties + +``` +spring.cloud.vault: + authentication: AWS_EC2 + aws-ec2: + role: application-server + aws-ec2-path: aws-ec2 + identity-document: http://... + nonce: my-static-nonce +``` + +* `authentication` setting this value to `AWS_EC2` selects the AWS EC2 authentication method + +* `role` sets the name of the role against which the login is being attempted. + +* `aws-ec2-path` sets the path of the AWS EC2 mount to use + +* `identity-document` sets URL of the PKCS#7 AWS EC2 identity document + +* `nonce` used for AWS-EC2 authentication. + An empty nonce defaults to nonce generation + +See also: [Vault Documentation: Using the aws auth backend](https://www.vaultproject.io/docs/auth/aws.html) + +### 5.6. AWS-IAM authentication + +The [aws](https://www.vaultproject.io/docs/auth/aws-ec2.html) backend provides a secure authentication mechanism for AWS IAM roles, allowing the automatic authentication with vault based on the current IAM role of the running application. +Unlike most Vault authentication backends, this backend does not require first-deploying, or provisioning security-sensitive credentials (tokens, username/password, client certificates, etc.). +Instead, it treats AWS as a Trusted Third Party and uses the 4 pieces of information signed by the caller with their IAM credentials to verify that the caller is indeed using that IAM role. + +The current IAM role the application is running in is automatically calculated. +If you are running your application on AWS ECS then the application will use the IAM role assigned to the ECS task of the running container. +If you are running your application naked on top of an EC2 instance then the IAM role used will be the one assigned to the EC2 instance. + +When using the AWS-IAM authentication you must create a role in Vault and assign it to your IAM role. +An empty `role` defaults to the friendly name the current IAM role. + +Example 17. application.yml with required AWS-IAM Authentication properties + +``` +spring.cloud.vault: + authentication: AWS_IAM +``` + +Example 18. application.yml with all AWS-IAM Authentication properties + +``` +spring.cloud.vault: + authentication: AWS_IAM + aws-iam: + role: my-dev-role + aws-path: aws + server-name: some.server.name + endpoint-uri: https://sts.eu-central-1.amazonaws.com +``` + +* `role` sets the name of the role against which the login is being attempted. + This should be bound to your IAM role. + If one is not supplied then the friendly name of the current IAM user will be used as the vault role. + +* `aws-path` sets the path of the AWS mount to use + +* `server-name` sets the value to use for the `X-Vault-AWS-IAM-Server-ID` header preventing certain types of replay attacks. + +* `endpoint-uri` sets the value to use for the AWS STS API used for the `iam_request_url` parameter. + +AWS-IAM requires the AWS Java SDK dependency (`com.amazonaws:aws-java-sdk-core`) as the authentication implementation uses AWS SDK types for credentials and request signing. + +See also: [Vault Documentation: Using the aws auth backend](https://www.vaultproject.io/docs/auth/aws.html) + +### 5.7. Azure MSI authentication + +The [azure](https://www.vaultproject.io/docs/auth/azure.html)auth backend provides a secure introduction mechanism for Azure VM instances, allowing automated retrieval of a Vault token. +Unlike most Vault authentication backends, this backend does not require first-deploying, or provisioning security-sensitive credentials (tokens, username/password, client certificates, etc.). +Instead, it treats Azure as a Trusted Third Party and uses the managed service identity and instance metadata information that can be bound to a VM instance. + +Example 19. application.yml with required Azure Authentication properties + +``` +spring.cloud.vault: + authentication: AZURE_MSI + azure-msi: + role: my-dev-role +``` + +Example 20. application.yml with all Azure Authentication properties + +``` +spring.cloud.vault: + authentication: AZURE_MSI + azure-msi: + role: my-dev-role + azure-path: azure + metadata-service: http://169.254.169.254/metadata/instance… + identity-token-service: http://169.254.169.254/metadata/identity… +``` + +* `role` sets the name of the role against which the login is being attempted. + +* `azure-path` sets the path of the Azure mount to use + +* `metadata-service` sets the URI at which to access the instance metadata service + +* `identity-token-service` sets the URI at which to access the identity token service + +Azure MSI authentication obtains environmental details about the virtual machine (subscription Id, resource group, VM name) from the instance metadata service. +The Vault server has Resource Id defaults to `[vault.hashicorp.com](https://vault.hashicorp.com)`. +To change this, set `spring.cloud.vault.azure-msi.identity-token-service` accordingly. + +See also: + +* [Vault Documentation: Using the azure auth backend](https://www.vaultproject.io/docs/auth/azure.html) + +* [Azure Documentation: Azure Instance Metadata Service](https://docs.microsoft.com/en-us/azure/virtual-machines/windows/instance-metadata-service) + +### 5.8. TLS certificate authentication + +The `cert` auth backend allows authentication using SSL/TLS client certificates that are either signed by a CA or self-signed. + +To enable `cert` authentication you need to: + +1. Use SSL, see [Vault Client SSL configuration](#vault.config.ssl) + +2. Configure a Java `Keystore` that contains the client certificate and the private key + +3. Set the `spring.cloud.vault.authentication` to `CERT` + +Example 21. application.yml + +``` +spring.cloud.vault: + authentication: CERT + ssl: + key-store: classpath:keystore.jks + key-store-password: changeit + key-store-type: JKS + cert-auth-path: cert +``` + +See also: [Vault Documentation: Using the Cert auth backend](https://www.vaultproject.io/docs/auth/cert.html) + +### 5.9. Cubbyhole authentication + +Cubbyhole authentication uses Vault primitives to provide a secured authentication workflow. +Cubbyhole authentication uses tokens as primary login method. +An ephemeral token is used to obtain a second, login VaultToken from Vault’s Cubbyhole secret backend. +The login token is usually longer-lived and used to interact with Vault. +The login token will be retrieved from a wrapped response stored at `/cubbyhole/response`. + +**Creating a wrapped token** + +| |Response Wrapping for token creation requires Vault 0.6.0 or higher.| +|---|--------------------------------------------------------------------| + +Example 22. Creating and storing tokens + +``` +$ vault token-create -wrap-ttl="10m" +Key Value +--- ----- +wrapping_token: 397ccb93-ff6c-b17b-9389-380b01ca2645 +wrapping_token_ttl: 0h10m0s +wrapping_token_creation_time: 2016-09-18 20:29:48.652957077 +0200 CEST +wrapped_accessor: 46b6aebb-187f-932a-26d7-4f3d86a68319 +``` + +Example 23. application.yml + +``` +spring.cloud.vault: + authentication: CUBBYHOLE + token: 397ccb93-ff6c-b17b-9389-380b01ca2645 +``` + +See also: + +* [Vault Documentation: Tokens](https://www.vaultproject.io/docs/concepts/tokens.html) + +* [Vault Documentation: Cubbyhole Secret Backend](https://www.vaultproject.io/docs/secrets/cubbyhole/index.html) + +* [Vault Documentation: Response Wrapping](https://www.vaultproject.io/docs/concepts/response-wrapping.html) + +### 5.10. GCP-GCE authentication + +The [gcp](https://www.vaultproject.io/docs/auth/gcp.html)auth backend allows Vault login by using existing GCP (Google Cloud Platform) IAM and GCE credentials. + +GCP GCE (Google Compute Engine) authentication creates a signature in the form of a JSON Web Token (JWT) for a service account. +A JWT for a Compute Engine instance is obtained from the GCE metadata service using [Instance identification](https://cloud.google.com/compute/docs/instances/verifying-instance-identity). +This API creates a JSON Web Token that can be used to confirm the instance identity. + +Unlike most Vault authentication backends, this backend does not require first-deploying, or provisioning security-sensitive credentials (tokens, username/password, client certificates, etc.). +Instead, it treats GCP as a Trusted Third Party and uses the cryptographically signed dynamic metadata information that uniquely represents each GCP service account. + +Example 24. application.yml with required GCP-GCE Authentication properties + +``` +spring.cloud.vault: + authentication: GCP_GCE + gcp-gce: + role: my-dev-role +``` + +Example 25. application.yml with all GCP-GCE Authentication properties + +``` +spring.cloud.vault: + authentication: GCP_GCE + gcp-gce: + gcp-path: gcp + role: my-dev-role + service-account: [email protected] +``` + +* `role` sets the name of the role against which the login is being attempted. + +* `gcp-path` sets the path of the GCP mount to use + +* `service-account` allows overriding the service account Id to a specific value. + Defaults to the `default` service account. + +See also: + +* [Vault Documentation: Using the GCP auth backend](https://www.vaultproject.io/docs/auth/gcp.html) + +* [GCP Documentation: Verifying the Identity of Instances](https://cloud.google.com/compute/docs/instances/verifying-instance-identity) + +### 5.11. GCP-IAM authentication + +The [gcp](https://www.vaultproject.io/docs/auth/gcp.html)auth backend allows Vault login by using existing GCP (Google Cloud Platform) IAM and GCE credentials. + +GCP IAM authentication creates a signature in the form of a JSON Web Token (JWT) for a service account. +A JWT for a service account is obtained by calling GCP IAM’s [`projects.serviceAccounts.signJwt`](https://cloud.google.com/iam/reference/rest/v1/projects.serviceAccounts/signJwt) API. The caller authenticates against GCP IAM and proves thereby its identity. +This Vault backend treats GCP as a Trusted Third Party. + +IAM credentials can be obtained from either the runtime environment , specifically the [`GOOGLE_APPLICATION_CREDENTIALS`](https://cloud.google.com/docs/authentication/production)environment variable, the Google Compute metadata service, or supplied externally as e.g. JSON or base64 encoded. +JSON is the preferred form as it carries the project id and service account identifier required for calling `projects.serviceAccounts.signJwt`. + +Example 26. application.yml with required GCP-IAM Authentication properties + +``` +spring.cloud.vault: + authentication: GCP_IAM + gcp-iam: + role: my-dev-role +``` + +Example 27. application.yml with all GCP-IAM Authentication properties + +``` +spring.cloud.vault: + authentication: GCP_IAM + gcp-iam: + credentials: + location: classpath:credentials.json + encoded-key: e+KApn0= + gcp-path: gcp + jwt-validity: 15m + project-id: my-project-id + role: my-dev-role + service-account-id: [email protected] +``` + +* `role` sets the name of the role against which the login is being attempted. + +* `credentials.location` path to the credentials resource that contains Google credentials in JSON format. + +* `credentials.encoded-key` the base64 encoded contents of an OAuth2 account private key in the JSON format. + +* `gcp-path` sets the path of the GCP mount to use + +* `jwt-validity` configures the JWT token validity. + Defaults to 15 minutes. + +* `project-id` allows overriding the project Id to a specific value. + Defaults to the project Id from the obtained credential. + +* `service-account` allows overriding the service account Id to a specific value. + Defaults to the service account from the obtained credential. + +GCP IAM authentication requires the Google Cloud Java SDK dependency (`com.google.apis:google-api-services-iam` and `com.google.auth:google-auth-library-oauth2-http`) as the authentication implementation uses Google APIs for credentials and JWT signing. + +| |Google credentials require an OAuth 2 token maintaining the token lifecycle.
All API is synchronous therefore, `GcpIamAuthentication` does not support `AuthenticationSteps` which is required for reactive usage.| +|---|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +See also: + +* [Vault Documentation: Using the GCP auth backend](https://www.vaultproject.io/docs/auth/gcp.html) + +* [GCP Documentation: projects.serviceAccounts.signJwt](https://cloud.google.com/iam/reference/rest/v1/projects.serviceAccounts/signJwt) + +### 5.12. Kubernetes authentication + +Kubernetes authentication mechanism (since Vault 0.8.3) allows to authenticate with Vault using a Kubernetes Service Account Token. +The authentication is role based and the role is bound to a service account name and a namespace. + +A file containing a JWT token for a pod’s service account is automatically mounted at `/var/run/secrets/kubernetes.io/serviceaccount/token`. + +Example 28. application.yml with all Kubernetes authentication properties + +``` +spring.cloud.vault: + authentication: KUBERNETES + kubernetes: + role: my-dev-role + kubernetes-path: kubernetes + service-account-token-file: /var/run/secrets/kubernetes.io/serviceaccount/token +``` + +* `role` sets the Role. + +* `kubernetes-path` sets the path of the Kubernetes mount to use. + +* `service-account-token-file` sets the location of the file containing the Kubernetes Service Account Token. + Defaults to `/var/run/secrets/kubernetes.io/serviceaccount/token`. + +See also: + +* [Vault Documentation: Kubernetes](https://www.vaultproject.io/docs/auth/kubernetes.html) + +* [Kubernetes Documentation: Configure Service Accounts for Pods](https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/) + +### 5.13. Pivotal CloudFoundry authentication + +The [pcf](https://www.vaultproject.io/docs/auth/pcf.html)auth backend provides a secure introduction mechanism for applications running within Pivotal’s CloudFoundry instances allowing automated retrieval of a Vault token. +Unlike most Vault authentication backends, this backend does not require first-deploying, or provisioning security-sensitive credentials (tokens, username/password, client certificates, etc.) as identity provisioning is handled by PCF itself. +Instead, it treats PCF as a Trusted Third Party and uses the managed instance identity. + +Example 29. application.yml with required PCF Authentication properties + +``` +spring.cloud.vault: + authentication: PCF + pcf: + role: my-dev-role +``` + +Example 30. application.yml with all PCF Authentication properties + +``` +spring.cloud.vault: + authentication: PCF + pcf: + role: my-dev-role + pcf-path: path + instance-certificate: /etc/cf-instance-credentials/instance.crt + instance-key: /etc/cf-instance-credentials/instance.key +``` + +* `role` sets the name of the role against which the login is being attempted. + +* `pcf-path` sets the path of the PCF mount to use. + +* `instance-certificate` sets the path to the PCF instance identity certificate. + Defaults to `${CF_INSTANCE_CERT}` env variable. + +* `instance-key` sets the path to the PCF instance identity key. + Defaults to `${CF_INSTANCE_KEY}` env variable. + +| |PCF authentication requires BouncyCastle (bcpkix-jdk15on) to be on the classpath for RSA PSS signing.| +|---|-----------------------------------------------------------------------------------------------------| + +See also: [Vault Documentation: Using the pcf auth backend](https://www.vaultproject.io/docs/auth/pcf.html) + +## 6. ACL Requirements + +This section explains which paths are accessed by Spring Vault so you can derive your policy declarations from the required capabilities. + +|Capability|Associated HTTP verbs| +|----------|---------------------| +| create | `POST`/`PUT` | +| read | `GET` | +| update | `POST`/`PUT` | +| delete | `DELETE` | +| list | `LIST` (`GET`) | + +See also [www.vaultproject.io/guides/identity/policies](https://www.vaultproject.io/guides/identity/policies). + +### 6.1. Authentication + +Login: `POST auth/$authMethod/login` + +### 6.2. KeyValue Mount Discovery + +`GET sys/internal/ui/mounts/$mountPath` + +### 6.3. SecretLeaseContainer + +`SecretLeaseContainer` uses different paths depending on the configured lease endpoint. + +`LeaseEndpoints.Legacy` + +* Revocation: `PUT sys/revoke` + +* Renewal: `PUT sys/renew` + +`LeaseEndpoints.Leases` (`SysLeases`) + +* Revocation: `PUT sys/leases/revoke` + +* Renewal: `PUT sys/leases/renew` + +### 6.4. Session Management + +* Token lookup: `GET auth/token/lookup-self` + +* Renewal: `POST auth/token/renew-self` + +* Revoke: `POST auth/token/revoke-self` + +## 7. Secret Backends + +### 7.1. Key-Value Backend + +Spring Cloud Vault supports both Key-Value secret backends, the versioned (v2) and unversioned (v1). +The key-value backend allows storage of arbitrary values as key-value store. +A single context can store one or many key-value tuples. +Contexts can be organized hierarchically. +Spring Cloud Vault determines itself whether a secret is using versioning and maps the path to its appropriate URL. +Spring Cloud Vault allows using the Application name, and a default context name (`application`) in combination with active profiles. + +``` +/secret/{application}/{profile} +/secret/{application} +/secret/{default-context}/{profile} +/secret/{default-context} +``` + +The application name is determined by the properties: + +* `spring.cloud.vault.kv.application-name` + +* `spring.cloud.vault.application-name` + +* `spring.application.name` + +The profiles are determined by the properties: + +* `spring.cloud.vault.kv.profiles` + +* `spring.profiles.active` + +Secrets can be obtained from other contexts within the key-value backend by adding their paths to the application name, separated by commas. +For example, given the application name `usefulapp,mysql1,projectx/aws`, each of these folders will be used: + +* `/secret/usefulapp` + +* `/secret/mysql1` + +* `/secret/projectx/aws` + +Spring Cloud Vault adds all active profiles to the list of possible context paths. +No active profiles will skip accessing contexts with a profile name. + +Properties are exposed like they are stored (i.e. without additional prefixes). + +| |Spring Cloud Vault adds the `data/` context between the mount path and the actual context path depending on whether the mount uses the versioned key-value backend.| +|---|-------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +``` +spring.cloud.vault: + kv: + enabled: true + backend: secret + profile-separator: '/' + default-context: application + application-name: my-app + profiles: local, cloud +``` + +* `enabled` setting this value to `false` disables the secret backend config usage + +* `backend` sets the path of the secret mount to use + +* `default-context` sets the context name used by all applications + +* `application-name` overrides the application name for use in the key-value backend + +* `profiles` overrides the active profiles for use in the key-value backend + +* `profile-separator` separates the profile name from the context in property sources with profiles + +| |The key-value secret backend can be operated in versioned (v2) and non-versioned (v1) modes.| +|---|--------------------------------------------------------------------------------------------| + +See also: + +* [Vault Documentation: Using the KV Secrets Engine - Version 1 (generic secret backend)](https://www.vaultproject.io/docs/secrets/kv/kv-v1.html) + +* [Vault Documentation: Using the KV Secrets Engine - Version 2 (versioned key-value backend)](https://www.vaultproject.io/docs/secrets/kv/kv-v2.html) + +### 7.2. Consul + +Spring Cloud Vault can obtain credentials for HashiCorp Consul. +The Consul integration requires the `spring-cloud-vault-config-consul`dependency. + +Example 31. pom.xml + +``` + + + org.springframework.cloud + spring-cloud-vault-config-consul + 3.1.0 + + +``` + +The integration can be enabled by setting`spring.cloud.vault.consul.enabled=true` (default `false`) and providing the role name with `spring.cloud.vault.consul.role=…`. + +The obtained token is stored in `spring.cloud.consul.token`so using Spring Cloud Consul can pick up the generated credentials without further configuration. +You can configure the property name by setting `spring.cloud.vault.consul.token-property`. + +``` +spring.cloud.vault: + consul: + enabled: true + role: readonly + backend: consul + token-property: spring.cloud.consul.token +``` + +* `enabled` setting this value to `true` enables the Consul backend config usage + +* `role` sets the role name of the Consul role definition + +* `backend` sets the path of the Consul mount to use + +* `token-property` sets the property name in which the Consul ACL token is stored + +See also: [Vault Documentation: Setting up Consul with Vault](https://www.vaultproject.io/docs/secrets/consul/index.html) + +### 7.3. RabbitMQ + +Spring Cloud Vault can obtain credentials for RabbitMQ. + +The RabbitMQ integration requires the `spring-cloud-vault-config-rabbitmq`dependency. + +Example 32. pom.xml + +``` + + + org.springframework.cloud + spring-cloud-vault-config-rabbitmq + 3.1.0 + + +``` + +The integration can be enabled by setting`spring.cloud.vault.rabbitmq.enabled=true` (default `false`) and providing the role name with `spring.cloud.vault.rabbitmq.role=…`. + +Username and password are stored in `spring.rabbitmq.username`and `spring.rabbitmq.password` so using Spring Boot will pick up the generated credentials without further configuration. +You can configure the property names by setting `spring.cloud.vault.rabbitmq.username-property` and`spring.cloud.vault.rabbitmq.password-property`. + +``` +spring.cloud.vault: + rabbitmq: + enabled: true + role: readonly + backend: rabbitmq + username-property: spring.rabbitmq.username + password-property: spring.rabbitmq.password +``` + +* `enabled` setting this value to `true` enables the RabbitMQ backend config usage + +* `role` sets the role name of the RabbitMQ role definition + +* `backend` sets the path of the RabbitMQ mount to use + +* `username-property` sets the property name in which the RabbitMQ username is stored + +* `password-property` sets the property name in which the RabbitMQ password is stored + +See also: [Vault Documentation: Setting up RabbitMQ with Vault](https://www.vaultproject.io/docs/secrets/rabbitmq/index.html) + +### 7.4. AWS + +Spring Cloud Vault can obtain credentials for AWS. + +The AWS integration requires the `spring-cloud-vault-config-aws`dependency. + +Example 33. pom.xml + +``` + + + org.springframework.cloud + spring-cloud-vault-config-aws + 3.1.0 + + +``` + +The integration can be enabled by setting`spring.cloud.vault.aws=true` (default `false`) and providing the role name with `spring.cloud.vault.aws.role=…`. + +Supported AWS credential Types: + +* iam\_user (Defaults) + +* assumed\_role (STS) + +* federation\_token (STS) + +The access key and secret key are stored in `cloud.aws.credentials.accessKey`and `cloud.aws.credentials.secretKey`. So using Spring Cloud AWS will pick up the generated credentials without further configuration. + +You can configure the property names by setting `spring.cloud.vault.aws.access-key-property` and`spring.cloud.vault.aws.secret-key-property`. + +For STS security token, you can configure the property name by setting `spring.cloud.vault.aws.session-token-key-property`. The security token is stored under `cloud.aws.credentials.sessionToken` (defaults). + +Example: iam\_user + +``` +spring.cloud.vault: + aws: + enabled: true + role: readonly + backend: aws + access-key-property: cloud.aws.credentials.accessKey + secret-key-property: cloud.aws.credentials.secretKey +``` + +Example: assumed\_role (STS) + +``` +spring.cloud.vault: + aws: + enabled: true + role: sts-vault-role + backend: aws + credential-type: assumed_role + access-key-property: cloud.aws.credentials.accessKey + secret-key-property: cloud.aws.credentials.secretKey + session-token-key-property: cloud.aws.credentials.sessionToken + ttl: 3600s + role-arn: arn:aws:iam::${AWS_ACCOUNT}:role/sts-app-role +``` + +* `enabled` setting this value to `true` enables the AWS backend config usage + +* `role` sets the role name of the AWS role definition + +* `backend` sets the path of the AWS mount to use + +* `access-key-property` sets the property name in which the AWS access key is stored + +* `secret-key-property` sets the property name in which the AWS secret key is stored + +* `session-token-key-property` sets the property name in which the AWS STS security token is stored. + +* `credential-type` sets the aws credential type to use for this backend. Defaults to `iam_user` + +* `ttl` sets the ttl for the STS token when using `assumed_role` or `federation_token`. Defaults to the ttl specified by the vault role. Min/Max values are also limited to what AWS would support for STS. + +* `role-arn` sets the IAM role to assume if more than one are configured for the vault role when using `assumed_role`. + +See also: [Vault Documentation: Setting up AWS with Vault](https://www.vaultproject.io/docs/secrets/aws/index.html) + +## 8. Database backends + +Vault supports several database secret backends to generate database credentials dynamically based on configured roles. +This means services that need to access a database no longer need to configure credentials: they can request them from Vault, and use Vault’s leasing mechanism to more easily roll keys. + +Spring Cloud Vault integrates with these backends: + +* [Database](#vault.config.backends.database) + +* [Apache Cassandra](#vault.config.backends.cassandra) + +* [Couchbase Database](#vault.config.backends.couchbase) + +* [Elasticsearch](#vault.config.backends.elasticsearch) + +* [MongoDB](#vault.config.backends.mongodb) + +* [MySQL](#vault.config.backends.mysql) + +* [PostgreSQL](#vault.config.backends.postgresql) + +Using a database secret backend requires to enable the backend in the configuration and the `spring-cloud-vault-config-databases`dependency. + +Vault ships since 0.7.1 with a dedicated `database` secret backend that allows database integration via plugins. +You can use that specific backend by using the generic database backend. +Make sure to specify the appropriate backend path, e.g. `spring.cloud.vault.mysql.role.backend=database`. + +Example 34. pom.xml + +``` + + + org.springframework.cloud + spring-cloud-vault-config-databases + 3.1.0 + + +``` + +| |Enabling multiple JDBC-compliant databases will generate credentials and store them by default in the same property keys hence property names for JDBC secrets need to be configured separately.| +|---|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +### 8.1. Database + +Spring Cloud Vault can obtain credentials for any database listed at[www.vaultproject.io/api/secret/databases/index.html](https://www.vaultproject.io/api/secret/databases/index.html). +The integration can be enabled by setting`spring.cloud.vault.database.enabled=true` (default `false`) and providing the role name with `spring.cloud.vault.database.role=…`. + +While the database backend is a generic one, `spring.cloud.vault.database`specifically targets JDBC databases. +Username and password are available from `spring.datasource.username` and `spring.datasource.password` properties +so using Spring Boot will pick up the generated credentials for your `DataSource` without further configuration. +You can configure the property names by setting`spring.cloud.vault.database.username-property` and`spring.cloud.vault.database.password-property`. + +``` +spring.cloud.vault: + database: + enabled: true + role: readonly + backend: database + username-property: spring.datasource.username + password-property: spring.datasource.password +``` + +### 8.2. Multiple Databases + +Sometimes, credentials for a single database isn’t sufficient because an application might connect to two or more databases of the same kind. +Beginning with version 3.0.5, Spring Vault supports the configuration of multiple database secret backends under the `spring.cloud.vault.databases.*` namespace. + +The configuration accepts multiple database backends to materialize credentials into the specified properties. Make sure to configure `username-property` and `password-property` appropriately. + +``` +spring.cloud.vault: + databases: + primary: + enabled: true + role: readwrite + backend: database + username-property: spring.primary-datasource.username + password-property: spring.primary-datasource.password + other-database: + enabled: true + role: readonly + backend: database + username-property: spring.secondary-datasource.username + password-property: spring.secondary-datasource.password +``` + +* `` descriptive name of the database configuration. + +* `.enabled` setting this value to `true` enables the Database backend config usage + +* `.role` sets the role name of the Database role definition + +* `.backend` sets the path of the Database mount to use + +* `.username-property` sets the property name in which the Database username is stored. Make sure to use unique property names to avoid property shadowing. + +* `.password-property` sets the property name in which the Database password is stored Make sure to use unique property names to avoid property shadowing. + +See also: [Vault Documentation: Database Secrets backend](https://www.vaultproject.io/docs/secrets/databases/index.html) + +| |Spring Cloud Vault does not support getting new credentials and configuring your `DataSource` with them when the maximum lease time has been reached.
That is, if `max_ttl` of the Database role in Vault is set to `24h` that means that 24 hours after your application has started it can no longer authenticate with the database.| +|---|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +### 8.3. Apache Cassandra + +| |The `cassandra` backend has been deprecated in Vault 0.7.1 and it is recommended to use the `database` backend and mount it as `cassandra`.| +|---|-------------------------------------------------------------------------------------------------------------------------------------------| + +Spring Cloud Vault can obtain credentials for Apache Cassandra. +The integration can be enabled by setting`spring.cloud.vault.cassandra.enabled=true` (default `false`) and providing the role name with `spring.cloud.vault.cassandra.role=…`. + +Username and password are available from `spring.data.cassandra.username`and `spring.data.cassandra.password` properties so using Spring Boot will pick up the generated credentials without further configuration. +You can configure the property names by setting`spring.cloud.vault.cassandra.username-property` and`spring.cloud.vault.cassandra.password-property`. + +``` +spring.cloud.vault: + cassandra: + enabled: true + role: readonly + backend: cassandra + username-property: spring.data.cassandra.username + password-property: spring.data.cassandra.password +``` + +* `enabled` setting this value to `true` enables the Cassandra backend config usage + +* `role` sets the role name of the Cassandra role definition + +* `backend` sets the path of the Cassandra mount to use + +* `username-property` sets the property name in which the Cassandra username is stored + +* `password-property` sets the property name in which the Cassandra password is stored + +See also: [Vault Documentation: Setting up Apache Cassandra with Vault](https://www.vaultproject.io/docs/secrets/cassandra/index.html) + +### 8.4. Couchbase Database + +Spring Cloud Vault can obtain credentials for Couchbase. +The integration can be enabled by setting`spring.cloud.vault.couchbase.enabled=true` (default `false`) and providing the role name with `spring.cloud.vault.couchbase.role=…`. + +Username and password are available from `spring.couchbase.username`and `spring.couchbase.password` properties so using Spring Boot will pick up the generated credentials without further configuration. +You can configure the property names by setting`spring.cloud.vault.couchbase.username-property` and`spring.cloud.vault.couchbase.password-property`. + +``` +spring.cloud.vault: + couchbase: + enabled: true + role: readonly + backend: database + username-property: spring.couchbase.username + password-property: spring.couchbase.password +``` + +* `enabled` setting this value to `true` enables the Couchbase backend config usage + +* `role` sets the role name of the Couchbase role definition + +* `backend` sets the path of the Couchbase mount to use + +* `username-property` sets the property name in which the Couchbase username is stored + +* `password-property` sets the property name in which the Couchbase password is stored + +See also: [Couchbase Database Plugin Documentation](https://github.com/hashicorp/vault-plugin-database-couchbase) + +### 8.5. Elasticsearch + +Spring Cloud Vault can obtain since version 3.0 credentials for Elasticsearch. +The integration can be enabled by setting`spring.cloud.vault.elasticsearch.enabled=true` (default `false`) and providing the role name with `spring.cloud.vault.elasticsearch.role=…`. + +Username and password are available from `spring.elasticsearch.rest.username`and `spring.elasticsearch.rest.password` properties so using Spring Boot will pick up the generated credentials without further configuration. +You can configure the property names by setting`spring.cloud.vault.elasticsearch.username-property` and`spring.cloud.vault.elasticsearch.password-property`. + +``` +spring.cloud.vault: + elasticsearch: + enabled: true + role: readonly + backend: mongodb + username-property: spring.elasticsearch.rest.username + password-property: spring.elasticsearch.rest.password +``` + +* `enabled` setting this value to `true` enables the Elasticsearch database backend config usage + +* `role` sets the role name of the Elasticsearch role definition + +* `backend` sets the path of the Elasticsearch mount to use + +* `username-property` sets the property name in which the Elasticsearch username is stored + +* `password-property` sets the property name in which the Elasticsearch password is stored + +See also: [Vault Documentation: Setting up Elasticsearch with Vault](https://www.vaultproject.io/docs/secrets/databases/elasticdb) + +### 8.6. MongoDB + +| |The `mongodb` backend has been deprecated in Vault 0.7.1 and it is recommended to use the `database` backend and mount it as `mongodb`.| +|---|---------------------------------------------------------------------------------------------------------------------------------------| + +Spring Cloud Vault can obtain credentials for MongoDB. +The integration can be enabled by setting`spring.cloud.vault.mongodb.enabled=true` (default `false`) and providing the role name with `spring.cloud.vault.mongodb.role=…`. + +Username and password are stored in `spring.data.mongodb.username`and `spring.data.mongodb.password` so using Spring Boot will pick up the generated credentials without further configuration. +You can configure the property names by setting`spring.cloud.vault.mongodb.username-property` and`spring.cloud.vault.mongodb.password-property`. + +``` +spring.cloud.vault: + mongodb: + enabled: true + role: readonly + backend: mongodb + username-property: spring.data.mongodb.username + password-property: spring.data.mongodb.password +``` + +* `enabled` setting this value to `true` enables the MongodB backend config usage + +* `role` sets the role name of the MongoDB role definition + +* `backend` sets the path of the MongoDB mount to use + +* `username-property` sets the property name in which the MongoDB username is stored + +* `password-property` sets the property name in which the MongoDB password is stored + +See also: [Vault Documentation: Setting up MongoDB with Vault](https://www.vaultproject.io/docs/secrets/mongodb/index.html) + +### 8.7. MySQL + +| |The `mysql` backend has been deprecated in Vault 0.7.1 and it is recommended to use the `database` backend and mount it as `mysql`.
Configuration for `spring.cloud.vault.mysql` will be removed in a future version.| +|---|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +Spring Cloud Vault can obtain credentials for MySQL. +The integration can be enabled by setting`spring.cloud.vault.mysql.enabled=true` (default `false`) and providing the role name with `spring.cloud.vault.mysql.role=…`. + +Username and password are available from `spring.datasource.username`and `spring.datasource.password` properties so using Spring Boot will pick up the generated credentials without further configuration. +You can configure the property names by setting`spring.cloud.vault.mysql.username-property` and`spring.cloud.vault.mysql.password-property`. + +``` +spring.cloud.vault: + mysql: + enabled: true + role: readonly + backend: mysql + username-property: spring.datasource.username + password-property: spring.datasource.password +``` + +* `enabled` setting this value to `true` enables the MySQL backend config usage + +* `role` sets the role name of the MySQL role definition + +* `backend` sets the path of the MySQL mount to use + +* `username-property` sets the property name in which the MySQL username is stored + +* `password-property` sets the property name in which the MySQL password is stored + +See also: [Vault Documentation: Setting up MySQL with Vault](https://www.vaultproject.io/docs/secrets/mysql/index.html) + +### 8.8. PostgreSQL + +| |The `postgresql` backend has been deprecated in Vault 0.7.1 and it is recommended to use the `database` backend and mount it as `postgresql`.
Configuration for `spring.cloud.vault.postgresql` will be removed in a future version.| +|---|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +Spring Cloud Vault can obtain credentials for PostgreSQL. +The integration can be enabled by setting`spring.cloud.vault.postgresql.enabled=true` (default `false`) and providing the role name with `spring.cloud.vault.postgresql.role=…`. + +Username and password are available from `spring.datasource.username`and `spring.datasource.password` properties so using Spring Boot will pick up the generated credentials without further configuration. +You can configure the property names by setting`spring.cloud.vault.postgresql.username-property` and`spring.cloud.vault.postgresql.password-property`. + +``` +spring.cloud.vault: + postgresql: + enabled: true + role: readonly + backend: postgresql + username-property: spring.datasource.username + password-property: spring.datasource.password +``` + +* `enabled` setting this value to `true` enables the PostgreSQL backend config usage + +* `role` sets the role name of the PostgreSQL role definition + +* `backend` sets the path of the PostgreSQL mount to use + +* `username-property` sets the property name in which the PostgreSQL username is stored + +* `password-property` sets the property name in which the PostgreSQL password is stored + +See also: [Vault Documentation: Setting up PostgreSQL with Vault](https://www.vaultproject.io/docs/secrets/postgresql/index.html) + +## 9. Customize which secret backends to expose as PropertySource + +Spring Cloud Vault uses property-based configuration to create `PropertySource`s for key-value and discovered secret backends. + +Discovered backends provide `VaultSecretBackendDescriptor` beans to describe the configuration state to use secret backend as `PropertySource`. +A `SecretBackendMetadataFactory` is required to create a `SecretBackendMetadata` object which contains path, name and property transformation configuration. + +`SecretBackendMetadata` is used to back a particular `PropertySource`. + +You can register a `VaultConfigurer` for customization. +Default key-value and discovered backend registration is disabled if you provide a `VaultConfigurer`. +You can however enable default registration with`SecretBackendConfigurer.registerDefaultKeyValueSecretBackends()` and `SecretBackendConfigurer.registerDefaultDiscoveredSecretBackends()`. + +``` +public class CustomizationBean implements VaultConfigurer { + + @Override + public void addSecretBackends(SecretBackendConfigurer configurer) { + + configurer.add("secret/my-application"); + + configurer.registerDefaultKeyValueSecretBackends(false); + configurer.registerDefaultDiscoveredSecretBackends(true); + } +} +``` + +``` +SpringApplication application = new SpringApplication(MyApplication.class); +application.addBootstrapper(VaultBootstrapper.fromConfigurer(new CustomizationBean())); +``` + +## 10. Custom Secret Backend Implementations + +Spring Cloud Vault ships with secret backend support for the most common backend integrations. +You can integrate with any kind of backend by providing an implementation that describes how to obtain data from the backend you want to use and how to surface data provided by that backend by providing a `PropertyTransformer`. + +Adding a custom implementation for a backend requires implementation of two interfaces: + +* `org.springframework.cloud.vault.config.VaultSecretBackendDescriptor` + +* `org.springframework.cloud.vault.config.SecretBackendMetadataFactory` + +`VaultSecretBackendDescriptor` is typically an object that holds configuration data, such as `VaultDatabaseProperties`. Spring Cloud Vault requires that your type is annotated with `@ConfigurationProperties` to materialize the class from the configuration. + +`SecretBackendMetadataFactory` accepts `VaultSecretBackendDescriptor` to create the actual `SecretBackendMetadata` object which holds the context path within your Vault server, any path variables required to resolve parametrized context paths and `PropertyTransformer`. + +Both, `VaultSecretBackendDescriptor` and `SecretBackendMetadataFactory` types must be registered in `spring.factories` which is an extension mechanism provided by Spring, similar to Java’s ServiceLoader. + +## 11. Service Registry Configuration + +You can use a `DiscoveryClient` (such as from Spring Cloud Consul) to locate a Vault server by setting spring.cloud.vault.discovery.enabled=true (default `false`). +The net result of that is that your apps need a application.yml (or an environment variable) with the appropriate discovery configuration. +The benefit is that the Vault can change its co-ordinates, as long as the discovery service is a fixed point. +The default service id is `vault` but you can change that on the client with`spring.cloud.vault.discovery.serviceId`. + +The discovery client implementations all support some kind of metadata map (e.g. for Eureka we have eureka.instance.metadataMap). +Some additional properties of the service may need to be configured in its service registration metadata so that clients can connect correctly. +Service registries that do not provide details about transport layer security need to provide a `scheme` metadata entry to be set either to `https` or `http`. +If no scheme is configured and the service is not exposed as secure service, then configuration defaults to `spring.cloud.vault.scheme` which is `https` when it’s not set. + +``` +spring.cloud.vault.discovery: + enabled: true + service-id: my-vault-service +``` + +## 12. Vault Client Fail Fast + +In some cases, it may be desirable to fail startup of a service if it cannot connect to the Vault Server. +If this is the desired behavior, set the bootstrap configuration property`spring.cloud.vault.fail-fast=true` and the client will halt with an Exception. + +``` +spring.cloud.vault: + fail-fast: true +``` + +## 13. Vault Enterprise Namespace Support + +Vault Enterprise allows using namespaces to isolate multiple Vaults on a single Vault server. +Configuring a namespace by setting`spring.cloud.vault.namespace=…` enables the namespace header`X-Vault-Namespace` on every outgoing HTTP request when using the Vault`RestTemplate` or `WebClient`. + +Please note that this feature is not supported by Vault Community edition and has no effect on Vault operations. + +``` +spring.cloud.vault: + namespace: my-namespace +``` + +See also: [Vault Enterprise: Namespaces](https://www.vaultproject.io/docs/enterprise/namespaces/index.html) + +## 14. Vault Client SSL configuration + +SSL can be configured declaratively by setting various properties. +You can set either `javax.net.ssl.trustStore` to configure JVM-wide SSL settings or `spring.cloud.vault.ssl.trust-store`to set SSL settings only for Spring Cloud Vault Config. + +``` +spring.cloud.vault: + ssl: + trust-store: classpath:keystore.jks + trust-store-password: changeit + trust-store-type: JKS + enabled-protocols: TLSv1.2,TLSv1.3 + enabled-cipher-suites: TLS_AES_128_GCM_SHA256 +``` + +* `trust-store` sets the resource for the trust-store. + SSL-secured Vault communication will validate the Vault SSL certificate with the specified trust-store. + +* `trust-store-password` sets the trust-store password + +* `trust-store-type` sets the trust-store type. Supported values are all supported `KeyStore` types including `PEM`. + +* `enabled-protocols` sets the list of enabled SSL/TLS protocols (since 3.0.2). + +* `enabled-cipher-suites` sets the list of enabled SSL/TLS cipher suites (since 3.0.2). + +Please note that configuring `spring.cloud.vault.ssl.*` can be only applied when either Apache Http Components or the OkHttp client is on your class-path. + +## 15. Lease lifecycle management (renewal and revocation) + +With every secret, Vault creates a lease: +metadata containing information such as a time duration, renewability, and more. + +Vault promises that the data will be valid for the given duration, or Time To Live (TTL). +Once the lease is expired, Vault can revoke the data, and the consumer of the secret can no longer be certain that it is valid. + +Spring Cloud Vault maintains a lease lifecycle beyond the creation of login tokens and secrets. +That said, login tokens and secrets associated with a lease are scheduled for renewal just before the lease expires until terminal expiry. +Application shutdown revokes obtained login tokens and renewable leases. + +Secret service and database backends (such as MongoDB or MySQL) usually generate a renewable lease so generated credentials will be disabled on application shutdown. + +| |Static tokens are not renewed or revoked.| +|---|-----------------------------------------| + +Lease renewal and revocation is enabled by default and can be disabled by setting `spring.cloud.vault.config.lifecycle.enabled`to `false`. +This is not recommended as leases can expire and Spring Cloud Vault cannot longer access Vault or services using generated credentials and valid credentials remain active after application shutdown. + +``` +spring.cloud.vault: + config.lifecycle: + enabled: true + min-renewal: 10s + expiry-threshold: 1m + lease-endpoints: Legacy +``` + +* `enabled` controls whether leases associated with secrets are considered to be renewed and expired secrets are rotated. + Enabled by default. + +* `min-renewal` sets the duration that is at least required before renewing a lease. + This setting prevents renewals from happening too often. + +* `expiry-threshold` sets the expiry threshold. + A lease is renewed the configured period of time before it expires. + +* `lease-endpoints` sets the endpoints for renew and revoke. + Legacy for vault versions before 0.8 and SysLeases for later. + +See also: [Vault Documentation: Lease, Renew, and Revoke](https://www.vaultproject.io/docs/concepts/lease.html) + +## 16. Session token lifecycle management (renewal, re-login and revocation) + +A Vault session token (also referred to as `LoginToken`) is quite similar to a lease as it has a TTL, max TTL, and may expire. +Once a login token expires, it cannot be used anymore to interact with Vault. +Therefore, Spring Vault ships with a `SessionManager` API for imperative and reactive use. + +Spring Cloud Vault maintains the session token lifecycle by default. +Session tokens are obtained lazily so the actual login is deferred until the first session-bound use of Vault. +Once Spring Cloud Vault obtains a session token, it retains it until expiry. +The next time a session-bound activity is used, Spring Cloud Vault re-logins into Vault and obtains a new session token. +On application shut down, Spring Cloud Vault revokes the token if it was still active to terminate the session. + +Session lifecycle is enabled by default and can be disabled by setting `spring.cloud.vault.session.lifecycle.enabled`to `false`. +Disabling is not recommended as session tokens can expire and Spring Cloud Vault cannot longer access Vault. + +``` +spring.cloud.vault: + session.lifecycle: + enabled: true + refresh-before-expiry: 10s + expiry-threshold: 20s +``` + +* `enabled` controls whether session lifecycle management is enabled to renew session tokens. + Enabled by default. + +* `refresh-before-expiry` controls the point in time when the session token gets renewed. + The refresh time is calculated by subtracting `refresh-before-expiry` from the token expiry time. + Defaults to `5 seconds`. + +* `expiry-threshold` sets the expiry threshold. + The threshold represents a minimum TTL duration to consider a session token as valid. + Tokens with a shorter TTL are considered expired and are not used anymore. + Should be greater than `refresh-before-expiry` to prevent token expiry. + Defaults to `7 seconds`. + +See also: [Vault Documentation: Token Renewal](https://www.vaultproject.io/api-docs/auth/token#renew-a-token-self) + +## Appendix A: Common application properties + +Various properties can be specified inside your `application.properties` file, inside your `application.yml` file, or as command line switches. +This appendix provides a list of common Spring Cloud Vault properties and references to the underlying classes that consume them. + +| |Property contributions can come from additional jar files on your classpath, so you should not consider this an exhaustive list.
Also, you can define your own properties.| +|---|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +| Name | Default | Description | +|----------------------------------------------------------|-------------------------------------------------------------------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| spring.cloud.vault.app-id.app-id-path | `app-id` | Mount path of the AppId authentication backend. | +| spring.cloud.vault.app-id.network-interface | | Network interface hint for the "MAC\_ADDRESS" UserId mechanism. | +| spring.cloud.vault.app-id.user-id | `MAC_ADDRESS` | UserId mechanism. Can be either "MAC\_ADDRESS", "IP\_ADDRESS", a string or a class name. | +| spring.cloud.vault.app-role.app-role-path | `approle` | Mount path of the AppRole authentication backend. | +| spring.cloud.vault.app-role.role | | Name of the role, optional, used for pull-mode. | +| spring.cloud.vault.app-role.role-id | | The RoleId. | +| spring.cloud.vault.app-role.secret-id | | The SecretId. | +| spring.cloud.vault.application-name | `application` | Application name for AppId authentication. | +| spring.cloud.vault.authentication | | | +| spring.cloud.vault.aws-ec2.aws-ec2-path | `aws-ec2` | Mount path of the AWS-EC2 authentication backend. | +| spring.cloud.vault.aws-ec2.identity-document |`[169.254.169.254/latest/dynamic/instance-identity/pkcs7](http://169.254.169.254/latest/dynamic/instance-identity/pkcs7)`| URL of the AWS-EC2 PKCS7 identity document. | +| spring.cloud.vault.aws-ec2.nonce | | Nonce used for AWS-EC2 authentication. An empty nonce defaults to nonce generation. | +| spring.cloud.vault.aws-ec2.role | | Name of the role, optional. | +| spring.cloud.vault.aws-iam.aws-path | `aws` | Mount path of the AWS authentication backend. | +| spring.cloud.vault.aws-iam.endpoint-uri | | STS server URI. @since 2.2 | +| spring.cloud.vault.aws-iam.role | | Name of the role, optional. Defaults to the friendly IAM name if not set. | +| spring.cloud.vault.aws-iam.server-name | | Name of the server used to set {@code X-Vault-AWS-IAM-Server-ID} header in the headers of login requests. | +| spring.cloud.vault.aws.access-key-property | `cloud.aws.credentials.accessKey` | Target property for the obtained access key. | +| spring.cloud.vault.aws.backend | `aws` | aws backend path. | +| spring.cloud.vault.aws.credential-type | | aws credential type | +| spring.cloud.vault.aws.enabled | `false` | Enable aws backend usage. | +| spring.cloud.vault.aws.role | | Role name for credentials. | +| spring.cloud.vault.aws.role-arn | | Role arn for assumed\_role in case we have multiple roles associated with the vault role. @since 3.0.2 | +| spring.cloud.vault.aws.secret-key-property | `cloud.aws.credentials.secretKey` | Target property for the obtained secret key. | +| spring.cloud.vault.aws.session-token-key-property | `cloud.aws.credentials.sessionToken` | Target property for the obtained secret key. | +| spring.cloud.vault.aws.ttl | `0` | TTL for sts tokens. Defaults to whatever the vault Role may have for Max. Also limited to what AWS supports to be the max for STS. @since 3.0.2 | +| spring.cloud.vault.azure-msi.azure-path | `azure` | Mount path of the Azure MSI authentication backend. | +| spring.cloud.vault.azure-msi.identity-token-service | | Identity token service URI. @since 3.0 | +| spring.cloud.vault.azure-msi.metadata-service | | Instance metadata service URI. @since 3.0 | +| spring.cloud.vault.azure-msi.role | | Name of the role. | +| spring.cloud.vault.cassandra.backend | `cassandra` | Cassandra backend path. | +| spring.cloud.vault.cassandra.enabled | `false` | Enable cassandra backend usage. | +| spring.cloud.vault.cassandra.password-property | `spring.data.cassandra.password` | Target property for the obtained password. | +| spring.cloud.vault.cassandra.role | | Role name for credentials. | +| spring.cloud.vault.cassandra.static-role | `false` | Enable static role usage. @since 2.2 | +| spring.cloud.vault.cassandra.username-property | `spring.data.cassandra.username` | Target property for the obtained username. | +| spring.cloud.vault.config.lifecycle.enabled | `true` | Enable lifecycle management. | +| spring.cloud.vault.config.lifecycle.expiry-threshold | | The expiry threshold. {@link Lease} is renewed the given {@link Duration} before it expires. @since 2.2 | +| spring.cloud.vault.config.lifecycle.lease-endpoints | |Set the {@link LeaseEndpoints} to delegate renewal/revocation calls to. {@link LeaseEndpoints} encapsulates differences between Vault versions that affect the location of renewal/revocation endpoints. Can be {@link LeaseEndpoints#SysLeases} for version 0.8 or above of Vault or {@link LeaseEndpoints#Legacy} for older versions (the default). @since 2.2| +| spring.cloud.vault.config.lifecycle.min-renewal | | The time period that is at least required before renewing a lease. @since 2.2 | +| spring.cloud.vault.config.order | `0` | Used to set a {@link org.springframework.core.env.PropertySource} priority. This is useful to use Vault as an override on other property sources. @see org.springframework.core.PriorityOrdered | +| spring.cloud.vault.connection-timeout | `5000` | Connection timeout. | +| spring.cloud.vault.consul.backend | `consul` | Consul backend path. | +| spring.cloud.vault.consul.enabled | `false` | Enable consul backend usage. | +| spring.cloud.vault.consul.role | | Role name for credentials. | +| spring.cloud.vault.consul.token-property | `spring.cloud.consul.token` | Target property for the obtained token. | +| spring.cloud.vault.couchbase.backend | `database` | Couchbase backend path. | +| spring.cloud.vault.couchbase.enabled | `false` | Enable couchbase backend usage. | +| spring.cloud.vault.couchbase.password-property | `spring.couchbase.password` | Target property for the obtained password. | +| spring.cloud.vault.couchbase.role | | Role name for credentials. | +| spring.cloud.vault.couchbase.static-role | `false` | Enable static role usage. | +| spring.cloud.vault.couchbase.username-property | `spring.couchbase.username` | Target property for the obtained username. | +| spring.cloud.vault.database.backend | `database` | Database backend path. | +| spring.cloud.vault.database.enabled | `false` | Enable database backend usage. | +| spring.cloud.vault.database.password-property | `spring.datasource.password` | Target property for the obtained password. | +| spring.cloud.vault.database.role | | Role name for credentials. | +| spring.cloud.vault.database.static-role | `false` | Enable static role usage. | +| spring.cloud.vault.database.username-property | `spring.datasource.username` | Target property for the obtained username. | +| spring.cloud.vault.databases | | | +| spring.cloud.vault.discovery.enabled | `false` | Flag to indicate that Vault server discovery is enabled (vault server URL will be looked up via discovery). | +| spring.cloud.vault.discovery.service-id | `vault` | Service id to locate Vault. | +| spring.cloud.vault.elasticsearch.backend | `database` | Database backend path. | +| spring.cloud.vault.elasticsearch.enabled | `false` | Enable elasticsearch backend usage. | +| spring.cloud.vault.elasticsearch.password-property | `spring.elasticsearch.rest.password` | Target property for the obtained password. | +| spring.cloud.vault.elasticsearch.role | | Role name for credentials. | +| spring.cloud.vault.elasticsearch.static-role | `false` | Enable static role usage. | +| spring.cloud.vault.elasticsearch.username-property | `spring.elasticsearch.rest.username` | Target property for the obtained username. | +| spring.cloud.vault.enabled | `true` | Enable Vault config server. | +| spring.cloud.vault.fail-fast | `false` | Fail fast if data cannot be obtained from Vault. | +| spring.cloud.vault.gcp-gce.gcp-path | `gcp` | Mount path of the Kubernetes authentication backend. | +| spring.cloud.vault.gcp-gce.role | | Name of the role against which the login is being attempted. | +| spring.cloud.vault.gcp-gce.service-account | | Optional service account id. Using the default id if left unconfigured. | +| spring.cloud.vault.gcp-iam.credentials.encoded-key | | The base64 encoded contents of an OAuth2 account private key in JSON format. | +| spring.cloud.vault.gcp-iam.credentials.location | | Location of the OAuth2 credentials private key. \ Since this is a Resource, the private key can be in a multitude of locations, such as a local file system, classpath, URL, etc. | +| spring.cloud.vault.gcp-iam.gcp-path | `gcp` | Mount path of the Kubernetes authentication backend. | +| spring.cloud.vault.gcp-iam.jwt-validity | `15m` | Validity of the JWT token. | +| spring.cloud.vault.gcp-iam.project-id | | Overrides the GCP project Id. | +| spring.cloud.vault.gcp-iam.role | | Name of the role against which the login is being attempted. | +| spring.cloud.vault.gcp-iam.service-account-id | | Overrides the GCP service account Id. | +| spring.cloud.vault.host | `localhost` | Vault server host. | +| spring.cloud.vault.kubernetes.kubernetes-path | `kubernetes` | Mount path of the Kubernetes authentication backend. | +| spring.cloud.vault.kubernetes.role | | Name of the role against which the login is being attempted. | +| spring.cloud.vault.kubernetes.service-account-token-file | `/var/run/secrets/kubernetes.io/serviceaccount/token` | Path to the service account token file. | +| spring.cloud.vault.kv.application-name | `application` | Application name to be used for the context. | +| spring.cloud.vault.kv.backend | `secret` | Name of the default backend. | +| spring.cloud.vault.kv.backend-version | `2` | Key-Value backend version. Currently supported versions are: \ \Version 1 (unversioned key-value backend).\ \Version 2 (versioned key-value backend).\ \ | +| spring.cloud.vault.kv.default-context | `application` | Name of the default context. | +| spring.cloud.vault.kv.enabled | `true` | Enable the kev-value backend. | +| spring.cloud.vault.kv.profile-separator | `/` | Profile-separator to combine application name and profile. | +| spring.cloud.vault.kv.profiles | | List of active profiles. @since 3.0 | +| spring.cloud.vault.mongodb.backend | `mongodb` | MongoDB backend path. | +| spring.cloud.vault.mongodb.enabled | `false` | Enable mongodb backend usage. | +| spring.cloud.vault.mongodb.password-property | `spring.data.mongodb.password` | Target property for the obtained password. | +| spring.cloud.vault.mongodb.role | | Role name for credentials. | +| spring.cloud.vault.mongodb.static-role | `false` | Enable static role usage. @since 2.2 | +| spring.cloud.vault.mongodb.username-property | `spring.data.mongodb.username` | Target property for the obtained username. | +| spring.cloud.vault.mysql.backend | `mysql` | mysql backend path. | +| spring.cloud.vault.mysql.enabled | `false` | Enable mysql backend usage. | +| spring.cloud.vault.mysql.password-property | `spring.datasource.password` | Target property for the obtained username. | +| spring.cloud.vault.mysql.role | | Role name for credentials. | +| spring.cloud.vault.mysql.username-property | `spring.datasource.username` | Target property for the obtained username. | +| spring.cloud.vault.namespace | | Vault namespace (requires Vault Enterprise). | +| spring.cloud.vault.pcf.instance-certificate | | Path to the instance certificate (PEM). Defaults to {@code CF\_INSTANCE\_CERT} env variable. | +| spring.cloud.vault.pcf.instance-key | | Path to the instance key (PEM). Defaults to {@code CF\_INSTANCE\_KEY} env variable. | +| spring.cloud.vault.pcf.pcf-path | `pcf` | Mount path of the Kubernetes authentication backend. | +| spring.cloud.vault.pcf.role | | Name of the role against which the login is being attempted. | +| spring.cloud.vault.port | `8200` | Vault server port. | +| spring.cloud.vault.postgresql.backend | `postgresql` | postgresql backend path. | +| spring.cloud.vault.postgresql.enabled | `false` | Enable postgresql backend usage. | +| spring.cloud.vault.postgresql.password-property | `spring.datasource.password` | Target property for the obtained username. | +| spring.cloud.vault.postgresql.role | | Role name for credentials. | +| spring.cloud.vault.postgresql.username-property | `spring.datasource.username` | Target property for the obtained username. | +| spring.cloud.vault.rabbitmq.backend | `rabbitmq` | rabbitmq backend path. | +| spring.cloud.vault.rabbitmq.enabled | `false` | Enable rabbitmq backend usage. | +| spring.cloud.vault.rabbitmq.password-property | `spring.rabbitmq.password` | Target property for the obtained password. | +| spring.cloud.vault.rabbitmq.role | | Role name for credentials. | +| spring.cloud.vault.rabbitmq.username-property | `spring.rabbitmq.username` | Target property for the obtained username. | +| spring.cloud.vault.reactive.enabled | `true` | Flag to indicate that reactive discovery is enabled | +| spring.cloud.vault.read-timeout | `15000` | Read timeout. | +| spring.cloud.vault.scheme | `https` | Protocol scheme. Can be either "http" or "https". | +| spring.cloud.vault.session.lifecycle.enabled | `true` | Enable session lifecycle management. | +| spring.cloud.vault.session.lifecycle.expiry-threshold | `7s` | The expiry threshold for a {@link LoginToken}. The threshold represents a minimum TTL duration to consider a login token as valid. Tokens with a shorter TTL are considered expired and are not used anymore. Should be greater than {@code refreshBeforeExpiry} to prevent token expiry. | +|spring.cloud.vault.session.lifecycle.refresh-before-expiry| `5s` | The time period that is at least required before renewing the {@link LoginToken}. | +| spring.cloud.vault.ssl.cert-auth-path | `cert` | Mount path of the TLS cert authentication backend. | +| spring.cloud.vault.ssl.enabled-cipher-suites | | List of enabled SSL/TLS cipher suites. @since 3.0.2 | +| spring.cloud.vault.ssl.enabled-protocols | | List of enabled SSL/TLS protocol. @since 3.0.2 | +| spring.cloud.vault.ssl.key-store | | Trust store that holds certificates and private keys. | +| spring.cloud.vault.ssl.key-store-password | | Password used to access the key store. | +| spring.cloud.vault.ssl.key-store-type | | Type of the key store. @since 3.0 | +| spring.cloud.vault.ssl.trust-store | | Trust store that holds SSL certificates. | +| spring.cloud.vault.ssl.trust-store-password | | Password used to access the trust store. | +| spring.cloud.vault.ssl.trust-store-type | | Type of the trust store. @since 3.0 | +| spring.cloud.vault.token | | Static vault token. Required if {@link #authentication} is {@code TOKEN}. | +| spring.cloud.vault.uri | | Vault URI. Can be set with scheme, host and port. | + diff --git a/docs/en/spring-cloud/spring-cloud-zookeeper.md b/docs/en/spring-cloud/spring-cloud-zookeeper.md new file mode 100644 index 0000000000000000000000000000000000000000..b4cce17f32b17ebc53ad006f523351cb68c53fd7 --- /dev/null +++ b/docs/en/spring-cloud/spring-cloud-zookeeper.md @@ -0,0 +1,802 @@ +# [Spring Cloud Zookeeper](#_spring_cloud_zookeeper) + +This project provides Zookeeper integrations for Spring Boot applications through +autoconfiguration and binding to the Spring Environment and other Spring programming model +idioms. With a few annotations, you can quickly enable and configure the common patterns +inside your application and build large distributed systems with Zookeeper based +components. The provided patterns include Service Discovery and Configuration. The project +also provides client-side load-balancing via integration with Spring Cloud LoadBalancer. + +## 1. Quick Start + +This quick start walks through using Spring Cloud Zookeeper for Service Discovery and Distributed Configuration. + +First, run Zookeeper on your machine. Then you can access it and use it as a Service Registry and Configuration source with Spring Cloud Zookeeper. + +### 1.1. Discovery Client Usage + +To use these features in an application, you can build it as a Spring Boot application that depends on `spring-cloud-zookeeper-core` and `spring-cloud-zookeeper-discovery`. +The most convenient way to add the dependency is with a Spring Boot starter: `org.springframework.cloud:spring-cloud-starter-zookeeper-discovery`. +We recommend using dependency management and `spring-boot-starter-parent`. +The following example shows a typical Maven configuration: + +pom.xml + +``` + + + org.springframework.boot + spring-boot-starter-parent + {spring-boot-version} + + + + + + org.springframework.cloud + spring-cloud-starter-zookeeper-discovery + + + org.springframework.boot + spring-boot-starter-test + test + + + + + + org.springframework.cloud + spring-cloud-dependencies + ${spring-cloud.version} + pom + import + + + + + + + org.springframework.boot + spring-boot-maven-plugin + + + + +``` + +The following example shows a typical Gradle setup: + +build.gradle + +``` +plugins { + id 'org.springframework.boot' version ${spring-boot-version} + id 'io.spring.dependency-management' version ${spring-dependency-management-version} + id 'java' +} + +repositories { + mavenCentral() +} + +dependencies { + implementation 'org.springframework.cloud:spring-cloud-starter-zookeeper-discovery' + testImplementation 'org.springframework.boot:spring-boot-starter-test' +} +dependencyManagement { + imports { + mavenBom "org.springframework.cloud:spring-cloud-dependencies:${springCloudVersion}" + } +} +``` + +| |Depending on the version you are using, you might need to adjust Apache Zookeeper version used in your project.
You can read more about it in the [Install Zookeeper section](#spring-cloud-zookeeper-install).| +|---|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +Now you can create a standard Spring Boot application, such as the following HTTP server: + +``` +@SpringBootApplication +@RestController +public class Application { + + @GetMapping("/") + public String home() { + return "Hello World!"; + } + + public static void main(String[] args) { + SpringApplication.run(Application.class, args); + } + +} +``` + +When this HTTP server runs, it connects to Zookeeper, which runs on the default local port (2181). +To modify the startup behavior, you can change the location of Zookeeper by using `application.properties`, as shown in the following example: + +``` +spring: + cloud: + zookeeper: + connect-string: localhost:2181 +``` + +You can now use `DiscoveryClient`, `@LoadBalanced RestTemplate`, or `@LoadBalanced WebClient.Builder` to retrieve services and instances data from Zookeeper, as shown in the following example: + +``` +@Autowired +private DiscoveryClient discoveryClient; + +public String serviceUrl() { + List list = discoveryClient.getInstances("STORES"); + if (list != null && list.size() > 0 ) { + return list.get(0).getUri().toString(); + } + return null; +} +``` + +### 1.2. Distributed Configuration Usage + +To use these features in an application, you can build it as a Spring Boot application that depends on `spring-cloud-zookeeper-core` and `spring-cloud-zookeeper-config`. +The most convenient way to add the dependency is with a Spring Boot starter: `org.springframework.cloud:spring-cloud-starter-zookeeper-config`. +We recommend using dependency management and `spring-boot-starter-parent`. +The following example shows a typical Maven configuration: + +pom.xml + +``` + + + org.springframework.boot + spring-boot-starter-parent + {spring-boot-version} + + + + + + org.springframework.cloud + spring-cloud-starter-zookeeper-config + + + org.springframework.boot + spring-boot-starter-test + test + + + + + + org.springframework.cloud + spring-cloud-dependencies + ${spring-cloud.version} + pom + import + + + + + + + org.springframework.boot + spring-boot-maven-plugin + + + + +``` + +The following example shows a typical Gradle setup: + +build.gradle + +``` +plugins { + id 'org.springframework.boot' version ${spring-boot-version} + id 'io.spring.dependency-management' version ${spring-dependency-management-version} + id 'java' +} + +repositories { + mavenCentral() +} + +dependencies { + implementation 'org.springframework.cloud:spring-cloud-starter-zookeeper-config' + testImplementation 'org.springframework.boot:spring-boot-starter-test' +} +dependencyManagement { + imports { + mavenBom "org.springframework.cloud:spring-cloud-dependencies:${springCloudVersion}" + } +} +``` + +| |Depending on the version you are using, you might need to adjust Apache Zookeeper version used in your project.
You can read more about it in the [Install Zookeeper section](#spring-cloud-zookeeper-install).| +|---|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +Now you can create a standard Spring Boot application, such as the following HTTP server: + +``` +@SpringBootApplication +@RestController +public class Application { + + @GetMapping("/") + public String home() { + return "Hello World!"; + } + + public static void main(String[] args) { + SpringApplication.run(Application.class, args); + } + +} +``` + +The application retrieves configuration data from Zookeeper. + +| |If you use Spring Cloud Zookeeper Config, you need to set the `spring.config.import` property in order to bind to Zookeeper.
You can read more about it in the [Spring Boot Config Data Import section](#config-data-import).| +|---|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +## 2. Install Zookeeper + +See the [installation +documentation](https://zookeeper.apache.org/doc/current/zookeeperStarted.html) for instructions on how to install Zookeeper. + +Spring Cloud Zookeeper uses Apache Curator behind the scenes. +While Zookeeper 3.5.x is still considered "beta" by the Zookeeper development team, +the reality is that it is used in production by many users. +However, Zookeeper 3.4.x is also used in production. +Prior to Apache Curator 4.0, both versions of Zookeeper were supported via two versions of Apache Curator. +Starting with Curator 4.0 both versions of Zookeeper are supported via the same Curator libraries. + +In case you are integrating with version 3.4 you need to change the Zookeeper dependency +that comes shipped with `curator`, and thus `spring-cloud-zookeeper`. +To do so simply exclude that dependency and add the 3.4.x version like shown below. + +maven + +``` + + org.springframework.cloud + spring-cloud-starter-zookeeper-all + + + org.apache.zookeeper + zookeeper + + + + + org.apache.zookeeper + zookeeper + 3.4.12 + + + org.slf4j + slf4j-log4j12 + + + +``` + +gradle + +``` +compile('org.springframework.cloud:spring-cloud-starter-zookeeper-all') { + exclude group: 'org.apache.zookeeper', module: 'zookeeper' +} +compile('org.apache.zookeeper:zookeeper:3.4.12') { + exclude group: 'org.slf4j', module: 'slf4j-log4j12' +} +``` + +## 3. Service Discovery with Zookeeper + +Service Discovery is one of the key tenets of a microservice based architecture. Trying to +hand-configure each client or some form of convention can be difficult to do and can be +brittle. [Curator](https://curator.apache.org)(A Java library for Zookeeper) provides Service +Discovery through a [Service Discovery +Extension](https://curator.apache.org/curator-x-discovery/). Spring Cloud Zookeeper uses this extension for service registration and +discovery. + +### 3.1. Activating + +Including a dependency on`org.springframework.cloud:spring-cloud-starter-zookeeper-discovery` enables +autoconfiguration that sets up Spring Cloud Zookeeper Discovery. + +| |For web functionality, you still need to include`org.springframework.boot:spring-boot-starter-web`.| +|---|---------------------------------------------------------------------------------------------------| + +| |When working with version 3.4 of Zookeeper you need to change
the way you include the dependency as described [here](#spring-cloud-zookeeper-install).| +|---|----------------------------------------------------------------------------------------------------------------------------------------------------------| + +### 3.2. Registering with Zookeeper + +When a client registers with Zookeeper, it provides metadata (such as host and port, ID, +and name) about itself. + +The following example shows a Zookeeper client: + +``` +@SpringBootApplication +@RestController +public class Application { + + @RequestMapping("/") + public String home() { + return "Hello world"; + } + + public static void main(String[] args) { + new SpringApplicationBuilder(Application.class).web(true).run(args); + } + +} +``` + +| |The preceding example is a normal Spring Boot application.| +|---|----------------------------------------------------------| + +If Zookeeper is located somewhere other than `localhost:2181`, the configuration must +provide the location of the server, as shown in the following example: + +application.yml + +``` +spring: + cloud: + zookeeper: + connect-string: localhost:2181 +``` + +| |If you use [Spring Cloud Zookeeper Config](#spring-cloud-zookeeper-config), the
values shown in the preceding example need to be in `bootstrap.yml` instead of`application.yml`.| +|---|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +The default service name, instance ID, and port (taken from the `Environment`) are`${spring.application.name}`, the Spring Context ID, and `${server.port}`, respectively. + +Having `spring-cloud-starter-zookeeper-discovery` on the classpath makes the app into both +a Zookeeper “service” (that is, it registers itself) and a “client” (that is, it can +query Zookeeper to locate other services). + +If you would like to disable the Zookeeper Discovery Client, you can set`spring.cloud.zookeeper.discovery.enabled` to `false`. + +### 3.3. Using the DiscoveryClient + +Spring Cloud has support for[Feign](https://github.com/spring-cloud/spring-cloud-netflix/blob/master/docs/src/main/asciidoc/spring-cloud-netflix.adoc#spring-cloud-feign)(a REST client builder),[Spring`RestTemplate`](https://github.com/spring-cloud/spring-cloud-netflix/blob/master/docs/src/main/ascii) and[Spring WebFlux](https://cloud.spring.io/spring-cloud-commons/reference/html/#loadbalanced-webclient), using logical service names instead of physical URLs. + +You can also use the `org.springframework.cloud.client.discovery.DiscoveryClient`, which +provides a simple API for discovery clients that is not specific to Netflix, as shown in +the following example: + +``` +@Autowired +private DiscoveryClient discoveryClient; + +public String serviceUrl() { + List list = discoveryClient.getInstances("STORES"); + if (list != null && list.size() > 0 ) { + return list.get(0).getUri().toString(); + } + return null; +} +``` + +## 4. Using Spring Cloud Zookeeper with Spring Cloud Components + +Feign, Spring Cloud Gateway and Spring Cloud LoadBalancer all work with Spring Cloud Zookeeper. + +### 4.1. Spring Cloud LoadBalancer with Zookeeper + +Spring Cloud Zookeeper provides an implementation of Spring Cloud LoadBalancer `ServiceInstanceListSupplier`. +When you use the `spring-cloud-starter-zookeeper-discovery`, Spring Cloud LoadBalancer is autoconfigured to use the`ZookeeperServiceInstanceListSupplier` by default. + +| |If you were previously using the StickyRule in Zookeeper, its replacement in the current stack
is the `SameInstancePreferenceServiceInstanceListSupplier` in SC LoadBalancer. You can read on how to set it up in the [Spring Cloud Commons documentation](https://docs.spring.io/spring-cloud-commons/docs/current/reference/html/#spring-cloud-loadbalancer).| +|---|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +## 5. Spring Cloud Zookeeper and Service Registry + +Spring Cloud Zookeeper implements the `ServiceRegistry` interface, letting developers +register arbitrary services in a programmatic way. + +The `ServiceInstanceRegistration` class offers a `builder()` method to create a`Registration` object that can be used by the `ServiceRegistry`, as shown in the following +example: + +``` +@Autowired +private ZookeeperServiceRegistry serviceRegistry; + +public void registerThings() { + ZookeeperRegistration registration = ServiceInstanceRegistration.builder() + .defaultUriSpec() + .address("anyUrl") + .port(10) + .name("/a/b/c/d/anotherservice") + .build(); + this.serviceRegistry.register(registration); +} +``` + +### 5.1. Instance Status + +Netflix Eureka supports having instances that are `OUT_OF_SERVICE` registered with the server. +These instances are not returned as active service instances. +This is useful for behaviors such as blue/green deployments. +(Note that the Curator Service Discovery recipe does not support this behavior.) Taking advantage of the flexible payload has let Spring Cloud Zookeeper implement `OUT_OF_SERVICE` by updating some specific metadata and then filtering on that metadata in the Spring Cloud LoadBalancer `ZookeeperServiceInstanceListSupplier`. +The `ZookeeperServiceInstanceListSupplier` filters out all non-null instance statuses that do not equal `UP`. +If the instance status field is empty, it is considered to be `UP` for backwards compatibility. +To change the status of an instance, make a `POST` with `OUT_OF_SERVICE` to the `ServiceRegistry`instance status actuator endpoint, as shown in the following example: + +``` +$ http POST http://localhost:8081/service-registry status=OUT_OF_SERVICE +``` + +| |The preceding example uses the `http` command from [httpie.org](https://httpie.org).| +|---|------------------------------------------------------------------------------------| + +## 6. Zookeeper Dependencies + +The following topics cover how to work with Spring Cloud Zookeeper dependencies: + +* [Using the Zookeeper Dependencies](#spring-cloud-zookeeper-dependencies-using) + +* [Activating Zookeeper Dependencies](#spring-cloud-zookeeper-dependencies-activating) + +* [Setting up Zookeeper Dependencies](#spring-cloud-zookeeper-dependencies-setting-up) + +* [Configuring Spring Cloud Zookeeper Dependencies](#spring-cloud-zookeeper-dependencies-configuring) + +### 6.1. Using the Zookeeper Dependencies + +Spring Cloud Zookeeper gives you a possibility to provide dependencies of your application +as properties. As dependencies, you can understand other applications that are registered +in Zookeeper and which you would like to call through[Feign](https://github.com/spring-cloud/spring-cloud-netflix/blob/master/docs/src/main/asciidoc/spring-cloud-netflix.adoc#spring-cloud-feign)(a REST client builder),[Spring`RestTemplate`](https://github.com/spring-cloud/spring-cloud-netflix/blob/master/docs/src/main/ascii) and[Spring WebFlux](https://cloud.spring.io/spring-cloud-commons/reference/html/#loadbalanced-webclient). + +You can also use the Zookeeper Dependency Watchers functionality to control and monitor +the state of your dependencies. + +### 6.2. Activating Zookeeper Dependencies + +Including a dependency on`org.springframework.cloud:spring-cloud-starter-zookeeper-discovery` enables +autoconfiguration that sets up Spring Cloud Zookeeper Dependencies. Even if you provide +the dependencies in your properties, you can turn off the dependencies. To do so, set the`spring.cloud.zookeeper.dependency.enabled` property to false (it defaults to `true`). + +### 6.3. Setting up Zookeeper Dependencies + +Consider the following example of dependency representation: + +application.yml + +``` +spring.application.name: yourServiceName +spring.cloud.zookeeper: + dependencies: + newsletter: + path: /path/where/newsletter/has/registered/in/zookeeper + loadBalancerType: ROUND_ROBIN + contentTypeTemplate: application/vnd.newsletter.$version+json + version: v1 + headers: + header1: + - value1 + header2: + - value2 + required: false + stubs: org.springframework:foo:stubs + mailing: + path: /path/where/mailing/has/registered/in/zookeeper + loadBalancerType: ROUND_ROBIN + contentTypeTemplate: application/vnd.mailing.$version+json + version: v1 + required: true +``` + +The next few sections go through each part of the dependency one by one. The root property +name is `spring.cloud.zookeeper.dependencies`. + +#### 6.3.1. Aliases + +Below the root property you have to represent each dependency as an alias. +This is due to the constraints of Spring Cloud LoadBalancer, which requires that the application ID be placed in the URL. +Consequently, you cannot pass any complex path, suchas `/myApp/myRoute/name`). +The alias is the name you use instead of the `serviceId` for `DiscoveryClient`, `Feign`, or`RestTemplate`. + +In the previous examples, the aliases are `newsletter` and `mailing`. +The following example shows Feign usage with a `newsletter` alias: + +``` +@FeignClient("newsletter") +public interface NewsletterService { + @RequestMapping(method = RequestMethod.GET, value = "/newsletter") + String getNewsletters(); +} +``` + +#### 6.3.2. Path + +The path is represented by the `path` YAML property and is the path under which the dependency is registered under Zookeeper. +As described in the[previous section](#spring-cloud-zookeeper-dependencies-setting-up-aliases), Spring Cloud LoadBalancer operates on URLs. +As a result, this path is not compliant with its requirement. +That is why Spring Cloud Zookeeper maps the alias to the proper path. + +#### 6.3.3. Load Balancer Type + +The load balancer type is represented by `loadBalancerType` YAML property. + +If you know what kind of load-balancing strategy has to be applied when calling this particular dependency, you can provide it in the YAML file, and it is automatically applied. +You can choose one of the following load balancing strategies: + +* STICKY: Once chosen, the instance is always called. + +* RANDOM: Picks an instance randomly. + +* ROUND\_ROBIN: Iterates over instances over and over again. + +#### 6.3.4. `Content-Type` Template and Version + +The `Content-Type` template and version are represented by the `contentTypeTemplate` and`version` YAML properties. + +If you version your API in the `Content-Type` header, you do not want to add this header +to each of your requests. Also, if you want to call a new version of the API, you do not +want to roam around your code to bump up the API version. That is why you can provide a`contentTypeTemplate` with a special `$version` placeholder. That placeholder will be filled by the value of the`version` YAML property. Consider the following example of a `contentTypeTemplate`: + +``` +application/vnd.newsletter.$version+json +``` + +Further consider the following `version`: + +``` +v1 +``` + +The combination of `contentTypeTemplate` and version results in the creation of a`Content-Type` header for each request, as follows: + +``` +application/vnd.newsletter.v1+json +``` + +#### 6.3.5. Default Headers + +Default headers are represented by the `headers` map in YAML. + +Sometimes, each call to a dependency requires setting up of some default headers. To not +do that in code, you can set them up in the YAML file, as shown in the following example`headers` section: + +``` +headers: + Accept: + - text/html + - application/xhtml+xml + Cache-Control: + - no-cache +``` + +That `headers` section results in adding the `Accept` and `Cache-Control` headers with +appropriate list of values in your HTTP request. + +#### 6.3.6. Required Dependencies + +Required dependencies are represented by `required` property in YAML. + +If one of your dependencies is required to be up when your application boots, you can set +the `required: true` property in the YAML file. + +If your application cannot localize the required dependency during boot time, it throws an +exception, and the Spring Context fails to set up. In other words, your application cannot +start if the required dependency is not registered in Zookeeper. + +You can read more about Spring Cloud Zookeeper Presence Checker[later in this document](#spring-cloud-zookeeper-dependency-watcher-presence-checker). + +#### 6.3.7. Stubs + +You can provide a colon-separated path to the JAR containing stubs of the dependency, as +shown in the following example: + +`stubs: org.springframework:myApp:stubs` + +where: + +* `org.springframework` is the `groupId`. + +* `myApp` is the `artifactId`. + +* `stubs` is the classifier. (Note that `stubs` is the default value.) + +Because `stubs` is the default classifier, the preceding example is equal to the following +example: + +`stubs: org.springframework:myApp` + +### 6.4. Configuring Spring Cloud Zookeeper Dependencies + +You can set the following properties to enable or disable parts of Zookeeper Dependencies functionalities: + +* `spring.cloud.zookeeper.dependencies`: If you do not set this property, you cannot use Zookeeper Dependencies. + +* `spring.cloud.zookeeper.dependency.loadbalancer.enabled` (enabled by default): Turns on Zookeeper-specific custom load-balancing strategies, including `ZookeeperServiceInstanceListSupplier` and dependency-based load-balanced `RestTemplate` setup. + +* `spring.cloud.zookeeper.dependency.headers.enabled` (enabled by default): This property registers a `FeignBlockingLoadBalancerClient` that automatically appends appropriate headers and content types with their versions, as presented in the Dependency configuration. + Without this setting, those two parameters do not work. + +* `spring.cloud.zookeeper.dependency.resttemplate.enabled` (enabled by default): When enabled, this property modifies the request headers of a `@LoadBalanced`-annotated`RestTemplate` such that it passes headers and content type with the version set in dependency configuration. + Without this setting, those two parameters do not work. + +## 7. Spring Cloud Zookeeper Dependency Watcher + +The Dependency Watcher mechanism lets you register listeners to your dependencies. The +functionality is, in fact, an implementation of the `Observator` pattern. When a +dependency changes, its state (to either UP or DOWN), some custom logic can be applied. + +### 7.1. Activating + +Spring Cloud Zookeeper Dependencies functionality needs to be enabled for you to use the +Dependency Watcher mechanism. + +### 7.2. Registering a Listener + +To register a listener, you must implement an interface called`org.springframework.cloud.zookeeper.discovery.watcher.DependencyWatcherListener` and +register it as a bean. The interface gives you one method: + +``` +void stateChanged(String dependencyName, DependencyState newState); +``` + +If you want to register a listener for a particular dependency, the `dependencyName` would +be the discriminator for your concrete implementation. `newState` provides you with +information about whether your dependency has changed to `CONNECTED` or `DISCONNECTED`. + +### 7.3. Using the Presence Checker + +Bound with the Dependency Watcher is the functionality called Presence Checker. It lets +you provide custom behavior when your application boots, to react according to the state +of your dependencies. + +The default implementation of the abstract`org.springframework.cloud.zookeeper.discovery.watcher.presence.DependencyPresenceOnStartupVerifier`class is the`org.springframework.cloud.zookeeper.discovery.watcher.presence.DefaultDependencyPresenceOnStartupVerifier`, +which works in the following way. + +1. If the dependency is marked us `required` and is not in Zookeeper, when your application + boots, it throws an exception and shuts down. + +2. If the dependency is not `required`, the`org.springframework.cloud.zookeeper.discovery.watcher.presence.LogMissingDependencyChecker`logs that the dependency is missing at the `WARN` level. + +Because the `DefaultDependencyPresenceOnStartupVerifier` is registered only when there is +no bean of type `DependencyPresenceOnStartupVerifier`, this functionality can be +overridden. + +## 8. Distributed Configuration with Zookeeper + +Zookeeper provides a[hierarchical namespace](https://zookeeper.apache.org/doc/current/zookeeperOver.html#sc_dataModelNameSpace)that lets clients store arbitrary data, such as configuration data. Spring Cloud Zookeeper +Config is an alternative to the[Config Server and Client](https://github.com/spring-cloud/spring-cloud-config). +Configuration is loaded into the Spring Environment during the special “bootstrap” +phase. Configuration is stored in the `/config` namespace by default. Multiple`PropertySource` instances are created, based on the application’s name and the active +profiles, to mimic the Spring Cloud Config order of resolving properties. For example, an +application with a name of `testApp` and with the `dev` profile has the following property +sources created for it: + +* `config/testApp,dev` + +* `config/testApp` + +* `config/application,dev` + +* `config/application` + +The most specific property source is at the top, with the least specific at the bottom. +Properties in the `config/application` namespace apply to all applications that use +zookeeper for configuration. Properties in the `config/testApp` namespace are available +only to the instances of the service named `testApp`. + +Configuration is currently read on startup of the application. Sending a HTTP `POST`request to `/refresh` causes the configuration to be reloaded. Watching the configuration +namespace (which Zookeeper supports) is not currently implemented. + +### 8.1. Activating + +Including a dependency on`org.springframework.cloud:spring-cloud-starter-zookeeper-config` enables +autoconfiguration that sets up Spring Cloud Zookeeper Config. + +| |When working with version 3.4 of Zookeeper you need to change
the way you include the dependency as described [here](#spring-cloud-zookeeper-install).| +|---|----------------------------------------------------------------------------------------------------------------------------------------------------------| + +### 8.2. Spring Boot Config Data Import + +Spring Boot 2.4 introduced a new way to import configuration data via the `spring.config.import` property. This is now the default way to get configuration from Zookeeper. + +To optionally connect to Zookeeper for configuration set the following in application.properties: + +application.properties + +``` +spring.config.import=optional:zookeeper: +``` + +This will connect to Zookeeper at the default location of "localhost:2181". Removing the `optional:` prefix will cause Zookeeper Config to fail if it is unable to connect to Zookeeper. To change the connection properties of Zookeeper Config either set `spring.cloud.zookeeper.connect-string` or add the connect string to the `spring.config.import` statement such as, `spring.config.import=optional:zookeeper:myhost:2818`. The location in the import property has precedence over the `connect-string` property. + +Zookeeper Config will try to load values from four automatic contexts based on `spring.cloud.zookeeper.config.name` (which defaults to the value of the `spring.application.name` property) and `spring.cloud.zookeeper.config.default-context` (which defaults to `application`). If you want to specify the contexts rather than using the computed ones, you can add that information to the `spring.config.import` statement. + +application.properties + +``` +spring.config.import=optional:zookeeper:myhost:2181/contextone;/context/two +``` + +This will optionally load configuration only from `/contextone` and `/context/two`. + +| |A `bootstrap` file (properties or yaml) is **not** needed for the Spring Boot Config Data method of import via `spring.config.import`.| +|---|--------------------------------------------------------------------------------------------------------------------------------------| + +### 8.3. Customizing + +Zookeeper Config may be customized by setting the following properties: + +``` +spring: + cloud: + zookeeper: + config: + enabled: true + root: configuration + defaultContext: apps + profileSeparator: '::' +``` + +* `enabled`: Setting this value to `false` disables Zookeeper Config. + +* `root`: Sets the base namespace for configuration values. + +* `defaultContext`: Sets the name used by all applications. + +* `profileSeparator`: Sets the value of the separator used to separate the profile name in + property sources with profiles. + +| |If you have set `spring.cloud.bootstrap.enabled=true` or `spring.config.use-legacy-processing=true`, or included `spring-cloud-starter-bootstrap`, then the above values will need to be placed in `bootstrap.yml` instead of `application.yml`.| +|---|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +### 8.4. Access Control Lists (ACLs) + +You can add authentication information for Zookeeper ACLs by calling the `addAuthInfo`method of a `CuratorFramework` bean. One way to accomplish this is to provide your own`CuratorFramework` bean, as shown in the following example: + +``` +@BoostrapConfiguration +public class CustomCuratorFrameworkConfig { + + @Bean + public CuratorFramework curatorFramework() { + CuratorFramework curator = new CuratorFramework(); + curator.addAuthInfo("digest", "user:password".getBytes()); + return curator; + } + +} +``` + +Consult[the ZookeeperAutoConfiguration class](https://github.com/spring-cloud/spring-cloud-zookeeper/blob/master/spring-cloud-zookeeper-core/src/main/java/org/springframework/cloud/zookeeper/ZookeeperAutoConfiguration.java)to see how the `CuratorFramework` bean’s default configuration. + +Alternatively, you can add your credentials from a class that depends on the existing`CuratorFramework` bean, as shown in the following example: + +``` +@BoostrapConfiguration +public class DefaultCuratorFrameworkConfig { + + public ZookeeperConfig(CuratorFramework curator) { + curator.addAuthInfo("digest", "user:password".getBytes()); + } + +} +``` + +The creation of this bean must occur during the boostrapping phase. You can register +configuration classes to run during this phase by annotating them with`@BootstrapConfiguration` and including them in a comma-separated list that you set as the +value of the `org.springframework.cloud.bootstrap.BootstrapConfiguration` property in the`resources/META-INF/spring.factories` file, as shown in the following example: + +resources/META-INF/spring.factories + +``` +org.springframework.cloud.bootstrap.BootstrapConfiguration=\ +my.project.CustomCuratorFrameworkConfig,\ +my.project.DefaultCuratorFrameworkConfig +``` + diff --git a/docs/en/spring-credhub/README.md b/docs/en/spring-credhub/README.md new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/docs/en/spring-credhub/spring-credhub.md b/docs/en/spring-credhub/spring-credhub.md new file mode 100644 index 0000000000000000000000000000000000000000..709810672a44cd39c36f68a5d3ddd58c706f7cd7 --- /dev/null +++ b/docs/en/spring-credhub/spring-credhub.md @@ -0,0 +1,534 @@ +# Spring CredHub + +Spring CredHub provides client-side support for storing, retrieving, and deleting credentials from a [CredHub](https://docs.cloudfoundry.org/credhub/) server running in a [Cloud Foundry](https://www.cloudfoundry.org/) platform. + +CredHub provides an [HTTP API](https://docs.cloudfoundry.org/api/credhub/) to securely store, generate, retrieve, and delete credentials of various types. Spring CredHub provides a Java binding for the CredHub API, making it easy to integrate Spring applications with CredHub. + +## 1. Getting started + +Spring CredHub supports CredHub server version 1.x and 2.x. +This library is intended to provide full coverage of the CredHub API - all operations on all credential types. + +Spring CredHub has been optimized to work with Spring Boot applications. +To include Spring CredHub in a Spring Boot application, add some dependencies to the project build file. + +### 1.1. Maven Dependencies + +Add the Spring CredHub starter to the `dependencies` section of the build file: + +``` + + + org.springframework.credhub + spring-credhub-starter + 2.2.0 + + +``` + +To enable reactive support in Spring CredHub, add the following [Spring WebFlux](https://docs.spring.io/spring-framework/docs/5.3.13/reference/html/web-reactive.html#spring-webflux) dependency to the build file: + +``` + + + org.springframework.boot + spring-boot-starter-webflux + 5.3.13 + + +``` + +To use OAuth2 authentication to CredHub, add the following [Spring Security](https://spring.io/projects/spring-security) dependencies to the build file: + +``` + + + org.springframework.security + spring-security-config + 5.5.3 + + + org.springframework.security + spring-security-oauth2-client + 5.5.3 + + +``` + +### 1.2. Gradle Dependencies + +Add the Spring CredHub starter to the `dependencies` section of the build file: + +``` + dependencies { + compile('org.springframework.credhub:spring-credhub-starter:2.2.0') + } +``` + +To enable reactive support in Spring CredHub, add the following [Spring WebFlux](https://docs.spring.io/spring-framework/docs/5.3.13/reference/html/web-reactive.html#spring-webflux) dependency to the build file: + +``` + dependencies { + compile("org.springframework.boot:spring-boot-starter-webflux:5.3.13") + } +``` + +To use OAuth2 authentication to CredHub, add the following [Spring Security](https://spring.io/projects/spring-security) dependencies to the build file: + +``` + dependencies { + compile("org.springframework.security:spring-security-config:5.5.3") + compile("org.springframework.security:spring-security-oauth2-client:5.5.3") + } +``` + +## 2. Spring Boot Configuration + +When using the Spring CredHub starter dependency, Spring CredHub can be configured with [Spring Boot application properties](https://docs.spring.io/spring-boot/docs/current/reference/html/boot-features-external-config.html#boot-features-external-config-application-property-files). +With the proper configuration properties, Spring CredHub will auto-configure a connection to a CredHub server. + +### 2.1. Mutual TLS Authentication + +An application running on Cloud Foundry can authenticate to a CredHub server deployed to the same platform using mutual TLS. +Mutual TLS is the default authentication scheme when no other authentication credentials are provided. +To use mutual TLS authentication to a CredHub server, simply provide the URL of the CredHub server as an application property: + +``` +spring: + credhub: + url: [CredHub server URL] +``` + +See the [CredHub documentation](https://docs.cloudfoundry.org/api/credhub/version/main/#_mutual_tls) for more information on mutual TLS authentication. + +An application running on Cloud Foundry can use the internal address `[https://credhub.service.cf.internal:8844](https://credhub.service.cf.internal:8844)` to communicate with a CredHub server deployed to the same platform. + +### 2.2. OAuth2 Authentication + +OAuth2 can be used to authenticate via UAA to any CredHub server. +Spring CredHub supports client credentials grant tokens for authentication using the following Spring CredHub and Spring Security configuration: + +``` +spring: + credhub: + url: [CredHub server URL] + oauth2: + registration-id: credhub-client + security: + oauth2: + client: + registration: + credhub-client: + provider: uaa + client-id: [OAuth2 client ID] + client-secret: [OAuth2 client secret] + authorization-grant-type: client_credentials + provider: + uaa: + token-uri: [UAA token server endpoint] +``` + +The ID provided in `spring.credhub.oauth2.registration-id` must refer to a client configured under `spring.security.oauth2.client.registration`. +See the [Spring Boot documentation](https://docs.spring.io/spring-boot/docs/current/reference/htmlsingle/#boot-features-security-oauth2) for more information on Spring Boot OAuth2 client configuration. + +The OAuth2 client specified in the Spring Security client registration must have CredHub scopes such as `credhub.read` or `credhub.write` to perform most operations. +See the [CredHub documentation](https://docs.cloudfoundry.org/api/credhub/version/main/#_uaa_oauth2) for more information on OAuth2 authentication with UAA. + +#### 2.2.1. Auto-configuration of Spring Security OAuth2 + +When `spring.credhub.oauth2` properties are set and Spring Security is on the application classpath, Spring CredHub will auto-configure the Spring Security beans required for OAuth2 authentication. +An application can provide the required Spring Security OAuth2 beans to override the auto-configuration if necessary. + +##### [](#servlet-and-non-reactive-applications)[Servlet and Non-reactive Applications](#servlet-and-non-reactive-applications) + +Spring CredHub requires beans of the following types, provided by Spring Security, in order to authenticate using OAuth2. + +| Required Bean Type | Auto-configured Type | +|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +|[`ClientRegistrationRepository`](https://docs.spring.io/spring-security/site/docs/5.5.3/api/org/springframework/security/oauth2/client/registration/ClientRegistrationRepository.html)| [`InMemoryClientRegistrationRepository`](https://docs.spring.io/spring-security/site/docs/5.5.3/api/org/springframework/security/oauth2/client/registration/InMemoryClientRegistrationRepository.html) | +|[`OAuth2AuthorizedClientRepository`](https://docs.spring.io/spring-security/site/docs/5.5.3/api/org/springframework/security/oauth2/client/web/OAuth2AuthorizedClientRepository.html) |[`AuthenticatedPrincipalOAuth2AuthorizedClientRepository`](https://docs.spring.io/spring-security/site/docs/5.5.3/api/org/springframework/security/oauth2/client/web/AuthenticatedPrincipalOAuth2AuthorizedClientRepository.html)| +| [`OAuth2AuthorizedClientManager`](https://docs.spring.io/spring-security/site/docs/5.5.3/api/org/springframework/security/oauth2/client/OAuth2AuthorizedClientManager.html) | [`DefaultOAuth2AuthorizedClientManager`](https://docs.spring.io/spring-security/site/docs/5.5.3/api/org/springframework/security/oauth2/client/web\DefaultOAuth2AuthorizedClientManager.html) | + +The auto-configured `DefaultOAuth2AuthorizedClientManager` assumes the application is running in a servlet container and has an active `HttpServletRequest`. +An application might need to provide an alternate implementation of the `OAuth2AuthorizedClientManager` bean such as [`AuthorizedClientServiceOAuth2AuthorizedClientManager`](https://docs.spring.io/spring-security/site/docs/5.5.3/api/org/springframework/security/oauth2/client/AuthorizedClientServiceOAuth2AuthorizedClientManager.html) to process requests outside of an `HttpServletRequest`, as shown in the following example: + +``` +/* + * Copyright 2016-2020 the original author or authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.credhub; + +import org.springframework.context.annotation.Bean; +import org.springframework.context.annotation.Configuration; +import org.springframework.security.oauth2.client.AuthorizedClientServiceOAuth2AuthorizedClientManager; +import org.springframework.security.oauth2.client.ClientCredentialsOAuth2AuthorizedClientProvider; +import org.springframework.security.oauth2.client.OAuth2AuthorizedClientService; +import org.springframework.security.oauth2.client.registration.ClientRegistrationRepository; + +@Configuration +public class CredHubSecurityConfiguration { + + @Bean + public AuthorizedClientServiceOAuth2AuthorizedClientManager reactiveClientManager( + ClientRegistrationRepository clientRegistrationRepository, + OAuth2AuthorizedClientService authorizedClientService) { + AuthorizedClientServiceOAuth2AuthorizedClientManager clientManager = new AuthorizedClientServiceOAuth2AuthorizedClientManager( + clientRegistrationRepository, authorizedClientService); + clientManager.setAuthorizedClientProvider(new ClientCredentialsOAuth2AuthorizedClientProvider()); + return clientManager; + } + +} +``` + +Refer to the [Spring Security documentation](https://docs.spring.io/spring-security/site/docs/5.5.3/reference/html5/#oauth2login-override-boot-autoconfig) for more information and examples of configuring other beans. + +##### [](#reactive-applications)[Reactive Applications](#reactive-applications) + +Spring CredHub requires beans of the following types, provided by Spring Security, in order to authenticate using OAuth2. + +| Required Bean Type | Auto-configured Type | +|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| [`ReactiveClientRegistrationRepository`](https://docs.spring.io/spring-security/site/docs/5.5.3/api/org/springframework/security/oauth2/client/registration/ReactiveClientRegistrationRepository.html) | [`InMemoryReactiveClientRegistrationRepository`](https://docs.spring.io/spring-security/site/docs/5.5.3/api/org/springframework/security/oauth2/client/registration/InMemoryReactiveClientRegistrationRepository.html) | +|[`ServerOAuth2AuthorizedClientRepository`](https://docs.spring.io/spring-security/site/docs/5.5.3/api/org/springframework/security/oauth2/client/web/server/ServerOAuth2AuthorizedClientRepository.html)|[`UnAuthenticatedServerOAuth2AuthorizedClientRepository`](https://docs.spring.io/spring-security/site/docs/5.5.3/api/org/springframework/security/oauth2/client/web/server/UnAuthenticatedServerOAuth2AuthorizedClientRepository.html)| +| [`ReactiveOAuth2AuthorizedClientManager`](https://docs.spring.io/spring-security/site/docs/5.5.3/api/org/springframework/security/oauth2/client/ReactiveOAuth2AuthorizedClientManager.html) | [`DefaultReactiveOAuth2AuthorizedClientManager`](https://docs.spring.io/spring-security/site/docs/5.5.3/api/org/springframework/security/oauth2/client/web/DefaultReactiveOAuth2AuthorizedClientManager.html) | + +The auto-configured `DefaultReactiveOAuth2AuthorizedClientManager` requires an active `ServerHttpRequest` context. +An application might need to provide an alternate implementation of the `ReactiveOAuth2AuthorizedClientManager` bean such as [`AuthorizedClientServiceReactiveOAuth2AuthorizedClientManager`](https://docs.spring.io/spring-security/site/docs/5.5.3/api/org/springframework/security/oauth2/client/AuthorizedClientServiceReactiveOAuth2AuthorizedClientManager.html) to process requests outside of an `ServerHttpRequest`, as shown in the following example: + +``` +/* + * Copyright 2016-2020 the original author or authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.credhub; + +import org.springframework.context.annotation.Bean; +import org.springframework.context.annotation.Configuration; +import org.springframework.security.oauth2.client.AuthorizedClientServiceReactiveOAuth2AuthorizedClientManager; +import org.springframework.security.oauth2.client.ClientCredentialsReactiveOAuth2AuthorizedClientProvider; +import org.springframework.security.oauth2.client.ReactiveOAuth2AuthorizedClientService; +import org.springframework.security.oauth2.client.registration.ReactiveClientRegistrationRepository; + +@Configuration +public class CredHubReactiveSecurityConfiguration { + + @Bean + public AuthorizedClientServiceReactiveOAuth2AuthorizedClientManager reactiveClientManager( + ReactiveClientRegistrationRepository clientRegistrationRepository, + ReactiveOAuth2AuthorizedClientService authorizedClientService) { + AuthorizedClientServiceReactiveOAuth2AuthorizedClientManager clientManager = new AuthorizedClientServiceReactiveOAuth2AuthorizedClientManager( + clientRegistrationRepository, authorizedClientService); + clientManager.setAuthorizedClientProvider(new ClientCredentialsReactiveOAuth2AuthorizedClientProvider()); + return clientManager; + } + +} +``` + +Refer to the [Spring Security documentation](https://docs.spring.io/spring-security/site/docs/5.5.3/reference/html5/#oauth2login-override-boot-autoconfig) for more information and examples of configuring other beans. + +## 3. Introduction to CredHubOperations + +The interface `org.springframework.credhub.core.CredHubOperations` and the implementation `org.springframework.credhub.core.CredHubTemplate` are the central classes in Spring CredHub.`CredHubOperations` provides access to additional operations interfaces that model the full CredHub API: + +``` +/** + * Get the operations for saving, retrieving, and deleting credentials. + */ +CredHubCredentialOperations credentials(); + +/** + * Get the operations for adding, retrieving, and deleting credential permissions. + */ +CredHubPermissionOperations permissions(); + +/** + * Get the operations for adding, retrieving, and deleting credential permissions. + */ +CredHubPermissionV2Operations permissionsV2(); + +/** + * Get the operations for retrieving, regenerating, and updating certificates. + */ +CredHubCertificateOperations certificates(); + +/** + * Get the operations for interpolating service binding credentials. + */ +CredHubInterpolationOperations interpolation(); + +/** + * Get the operations for retrieving CredHub server information. + */ +CredHubInfoOperations info(); +``` + +### 3.1. Mapping to CredHub API + +Each method of the `Operations` interfaces maps directly to one endpoint of the CredHub HTTP API. +The following table shows the mapping between the CredHub API and the appropriate Spring CredHub `Operations` interface. + +| [CredHub Credentials API](https://docs.cloudfoundry.org/api/credhub/version/main/#_credentials_endpoint) | [CredHubCredentialOperations](https://docs.spring.io/spring-credhub/docs/2.2.0/api/index.html?org/springframework/credhub/core/credential/CredHubCredentialOperations.html) | +|------------------------------------------------------------------------------------------------------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +|[CredHub Permissions API](https://docs.cloudfoundry.org/api/credhub/version/main/#_permissions_v1_deprecated) (v1)| [CredHubPermissionOperations](https://docs.spring.io/spring-credhub/docs/2.2.0/api/index.html?org/springframework/credhub/core/permission/CredHubPermissionOperations.html) | +| [CredHub Permissions API](https://docs.cloudfoundry.org/api/credhub/version/main/#_permissions_v2_endpoint) (v2) | [CredHubPermissionV2Operations](https://docs.spring.io/spring-credhub/docs/2.2.0/api/index.html?org/springframework/credhub/core/permissionV2/CredHubPermissionV2Operations.html) | +| [CredHub Certificates API](https://docs.cloudfoundry.org/api/credhub/version/main/#_certificates_endpoint) | [CredHubCertificateOperations](https://docs.spring.io/spring-credhub/docs/2.2.0/api/index.html?org/springframework/credhub/core/certificate/CredHubCertificateOperations.html) | +| [CredHub Interpolation API](https://docs.cloudfoundry.org/api/credhub/version/main/#_interpolation_endpoint) |[CredHubInterpolationOperations](https://docs.spring.io/spring-credhub/docs/2.2.0/api/index.html?org/springframework/credhub/core/interpolation/CredHubInterpolationOperations.html)| +| [CredHub Information API](https://docs.cloudfoundry.org/api/credhub/version/main/#_info_endpoint) | [CredHubInfoOperations](https://docs.spring.io/spring-credhub/docs/2.2.0/api/index.html?org/springframework/credhub/core/info/CredHubInfoOperations.html) | + +### 3.2. CredHubOperations Auto-configuration + +A `CredHubOperations` Spring bean is created using Spring Boot auto-configuration when application properties are properly configured. +Application classes can autowire an instance of this bean to interact with a CredHub server. + +``` +/* + * Copyright 2016-2020 the original author or authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.credhub; + +import org.springframework.credhub.core.CredHubOperations; +import org.springframework.credhub.support.CredentialDetails; +import org.springframework.credhub.support.SimpleCredentialName; +import org.springframework.credhub.support.password.PasswordCredential; +import org.springframework.credhub.support.password.PasswordParameters; +import org.springframework.credhub.support.password.PasswordParametersRequest; +import org.springframework.stereotype.Component; + +@Component +public class CredHubService { + + private final CredHubOperations credHubOperations; + + private final SimpleCredentialName credentialName; + + public CredHubService(CredHubOperations credHubOperations) { + this.credHubOperations = credHubOperations; + + this.credentialName = new SimpleCredentialName("example", "password"); + } + + public String generatePassword() { + PasswordParameters parameters = PasswordParameters.builder().length(12).excludeLower(false).excludeUpper(false) + .excludeNumber(false).includeSpecial(true).build(); + + CredentialDetails password = this.credHubOperations.credentials() + .generate(PasswordParametersRequest.builder().name(this.credentialName).parameters(parameters).build()); + + return password.getValue().getPassword(); + } + + public String getPassword() { + CredentialDetails password = this.credHubOperations.credentials() + .getByName(this.credentialName, PasswordCredential.class); + + return password.getValue().getPassword(); + } + +} +``` + +## 4. Introduction to ReactiveCredHubOperations + +The interface `org.springframework.credhub.core.ReactiveCredHubOperations` and the implementation `org.springframework.credhub.core.ReactiveCredHubTemplate` are the central classes in Spring CredHub reactive support.`ReactiveCredHubOperations` provides access to additional operations interfaces that model the full CredHub API: + +``` +/** + * Get the operations for saving, retrieving, and deleting credentials. + */ +ReactiveCredHubCredentialOperations credentials(); + +/** + * Get the operations for adding, retrieving, and deleting credential permissions. + */ +ReactiveCredHubPermissionOperations permissions(); + +/** + * Get the operations for adding, retrieving, and deleting credential permissions. + */ +ReactiveCredHubPermissionV2Operations permissionsV2(); + +/** + * Get the operations for retrieving, regenerating, and updating certificates. + */ +ReactiveCredHubCertificateOperations certificates(); + +/** + * Get the operations for interpolating service binding credentials. + */ +ReactiveCredHubInterpolationOperations interpolation(); + +/** + * Get the operations for retrieving CredHub server information. + */ +ReactiveCredHubInfoOperations info(); +``` + +### 4.1. Mapping to CredHub API + +Each method of the `Reactive…​Operations` interfaces maps directly to one endpoint of the CredHub HTTP API. +The following table shows the mapping between the CredHub API and the appropriate Spring CredHub `Reactive…​Operations` interface. + +| [CredHub Credentials API](https://docs.cloudfoundry.org/api/credhub/version/main/#_credentials_endpoint) | [ReactiveCredHubCredentialOperations](https://docs.spring.io/spring-credhub/docs/2.2.0/api/index.html?org/springframework/credhub/core/credential/ReactiveCredHubCredentialOperations.html) | +|------------------------------------------------------------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +|[CredHub Permissions API](https://docs.cloudfoundry.org/api/credhub/version/main/#_permissions_v1_deprecated) (v1)| [ReactiveCredHubPermissionOperations](https://docs.spring.io/spring-credhub/docs/2.2.0/api/index.html?org/springframework/credhub/core/permission/ReactiveCredHubPermissionOperations.html) | +| [CredHub Permissions API](https://docs.cloudfoundry.org/api/credhub/version/main/#_permissions_v2_endpoint) (v2) | [ReactiveCredHubPermissionV2Operations](https://docs.spring.io/spring-credhub/docs/2.2.0/api/index.html?org/springframework/credhub/core/permissionV2/ReactiveCredHubPermissionV2Operations.html) | +| [CredHub Certificates API](https://docs.cloudfoundry.org/api/credhub/version/main/#_certificates_endpoint) | [ReactiveCredHubCertificateOperations](https://docs.spring.io/spring-credhub/docs/2.2.0/api/index.html?org/springframework/credhub/core/certificate/ReactiveCredHubCertificateOperations.html) | +| [CredHub Interpolation API](https://docs.cloudfoundry.org/api/credhub/version/main/#_interpolation_endpoint) |[ReactiveCredHubInterpolationOperations](https://docs.spring.io/spring-credhub/docs/2.2.0/api/index.html?org/springframework/credhub/core/interpolation/ReactiveCredHubInterpolationOperations.html)| +| [CredHub Information API](https://docs.cloudfoundry.org/api/credhub/version/main/#_info_endpoint) | [ReactiveCredHubInfoOperations](https://docs.spring.io/spring-credhub/docs/2.2.0/api/index.html?org/springframework/credhub/core/info/ReactiveCredHubInfoOperations.html) | + +### 4.2. ReactiveCredHubOperations Auto-configuration + +A `ReactiveCredHubOperations` Spring bean is created using Spring Boot auto-configuration when application properties are properly configured and the Spring WebFlux library is on the classpath. +Application classes can autowire an instance of this bean to interact with a CredHub server. + +``` +/* + * Copyright 2016-2020 the original author or authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.credhub; + +import reactor.core.publisher.Mono; + +import org.springframework.credhub.core.ReactiveCredHubOperations; +import org.springframework.credhub.support.SimpleCredentialName; +import org.springframework.credhub.support.password.PasswordCredential; +import org.springframework.credhub.support.password.PasswordParameters; +import org.springframework.credhub.support.password.PasswordParametersRequest; +import org.springframework.stereotype.Component; + +@Component +public class ReactiveCredHubService { + + private final ReactiveCredHubOperations credHubOperations; + + private final SimpleCredentialName credentialName; + + public ReactiveCredHubService(ReactiveCredHubOperations credHubOperations) { + this.credHubOperations = credHubOperations; + + this.credentialName = new SimpleCredentialName("example", "password"); + } + + public Mono generatePassword() { + PasswordParameters parameters = PasswordParameters.builder().length(12).excludeLower(false).excludeUpper(false) + .excludeNumber(false).includeSpecial(true).build(); + + return this.credHubOperations.credentials() + .generate(PasswordParametersRequest.builder().name(this.credentialName).parameters(parameters).build(), + PasswordCredential.class) + .map((password) -> password.getValue().getPassword()); + } + + public Mono getPassword() { + return this.credHubOperations.credentials().getByName(this.credentialName, PasswordCredential.class) + .map((password) -> password.getValue().getPassword()); + } + +} +``` + +## 5. HTTP Client Support + +Spring CredHub `CredHubOperations` supports multiple HTTP client libraries to communicate with the CredHub API. The following libraries are supported: + +* Java’s builtin `HttpURLConnection` (default) + +* [Apache HttpComponents](https://hc.apache.org/) + +* [OkHttp 3](https://square.github.io/okhttp/) + +* [Netty](https://netty.io/) + +Choosing a specific client library requires the appropriate dependency to be available on the application classpath. +The application classpath will be inspected for each client library in the order listed above. + +Spring CredHub `ReactiveCredHubOperations` only supports the Netty HTTP client library. + +### 5.1. Apache HttpComponents + +To use Apache HttpComponents to communicate with CredHub, add the following dependency to the application: + +``` + + org.apache.httpcomponents + httpclient + +``` + +| |Apache HttpClient’s [wire logging](https://hc.apache.org/httpcomponents-client-4.5.x/logging.html) can be enabled through logging configuration. Make sure to not accidentally enable wire logging as logs may expose traffic (including tokens and secrets) between your application and CredHub in plain text.| +|---|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +### 5.2. OkHttp 3 + +To use OkHttp 3 to communicate with CredHub, add the following dependency to the application: + +``` + + com.squareup.okhttp3 + okhttp + +``` + +### 5.3. Netty + +To use Netty to communicate with CredHub, add the following dependency to the application: + +``` + + io.netty + netty-all + +``` \ No newline at end of file diff --git a/docs/en/spring-data/README.md b/docs/en/spring-data/README.md new file mode 100644 index 0000000000000000000000000000000000000000..8ce5c920d6b129c5d2693495f20a9d201153e6a9 --- /dev/null +++ b/docs/en/spring-data/README.md @@ -0,0 +1 @@ +# Spring Data \ No newline at end of file diff --git a/docs/en/spring-data/spring-data.md b/docs/en/spring-data/spring-data.md new file mode 100644 index 0000000000000000000000000000000000000000..8d83b6954809df85f6b53a01ec593eb4afe9514b --- /dev/null +++ b/docs/en/spring-data/spring-data.md @@ -0,0 +1,2702 @@ +# Preface + +The Spring Data Commons project applies core Spring concepts to the development of solutions using many relational and non-relational data stores. + +## 1. Project Metadata + +* Version control: [https://github.com/spring-projects/spring-data-commons](https://github.com/spring-projects/spring-data-commons) + +* Bugtracker: [https://github.com/spring-projects/spring-data-commons/issues](https://github.com/spring-projects/spring-data-commons/issues) + +* Release repository: [https://repo.spring.io/libs-release](https://repo.spring.io/libs-release) + +* Milestone repository: [https://repo.spring.io/libs-milestone](https://repo.spring.io/libs-milestone) + +* Snapshot repository: [https://repo.spring.io/libs-snapshot](https://repo.spring.io/libs-snapshot) + +## Reference Documentation + +## 2. Dependencies + +Due to the different inception dates of individual Spring Data modules, most of them carry different major and minor version numbers. The easiest way to find compatible ones is to rely on the Spring Data Release Train BOM that we ship with the compatible versions defined. In a Maven project, you would declare this dependency in the `` section of your POM as follows: + +Example 1. Using the Spring Data release train BOM + +``` + + + + org.springframework.data + spring-data-bom + 2021.1.2 + import + pom + + + +``` + +The current release train version is `2021.1.2`. The train version uses [calver](https://calver.org/) with the pattern `YYYY.MINOR.MICRO`. +The version name follows `${calver}` for GA releases and service releases and the following pattern for all other versions: `${calver}-${modifier}`, where `modifier` can be one of the following: + +* `SNAPSHOT`: Current snapshots + +* `M1`, `M2`, and so on: Milestones + +* `RC1`, `RC2`, and so on: Release candidates + +You can find a working example of using the BOMs in our [Spring Data examples repository](https://github.com/spring-projects/spring-data-examples/tree/master/bom). With that in place, you can declare the Spring Data modules you would like to use without a version in the `` block, as follows: + +Example 2. Declaring a dependency to a Spring Data module + +``` + + + org.springframework.data + spring-data-jpa + + +``` + +### 2.1. Dependency Management with Spring Boot + +Spring Boot selects a recent version of Spring Data modules for you. If you still want to upgrade to a newer version, set +the `spring-data-releasetrain.version` property to the [train version and iteration](#dependencies.train-version) you would like to use. + +### 2.2. Spring Framework + +The current version of Spring Data modules require Spring Framework 5.3.16 or better. The modules might also work with an older bugfix version of that minor version. However, using the most recent version within that generation is highly recommended. + +## 3. Object Mapping Fundamentals + +This section covers the fundamentals of Spring Data object mapping, object creation, field and property access, mutability and immutability. +Note, that this section only applies to Spring Data modules that do not use the object mapping of the underlying data store (like JPA). +Also be sure to consult the store-specific sections for store-specific object mapping, like indexes, customizing column or field names or the like. + +Core responsibility of the Spring Data object mapping is to create instances of domain objects and map the store-native data structures onto those. +This means we need two fundamental steps: + +1. Instance creation by using one of the constructors exposed. + +2. Instance population to materialize all exposed properties. + +### 3.1. Object creation + +Spring Data automatically tries to detect a persistent entity’s constructor to be used to materialize objects of that type. +The resolution algorithm works as follows: + +1. If there is a single constructor, it is used. + +2. If there are multiple constructors and exactly one is annotated with `@PersistenceConstructor`, it is used. + +3. If there’s a no-argument constructor, it is used. + Other constructors will be ignored. + +The value resolution assumes constructor argument names to match the property names of the entity, i.e. the resolution will be performed as if the property was to be populated, including all customizations in mapping (different datastore column or field name etc.). +This also requires either parameter names information available in the class file or an `@ConstructorProperties` annotation being present on the constructor. + +The value resolution can be customized by using Spring Framework’s `@Value` value annotation using a store-specific SpEL expression. +Please consult the section on store specific mappings for further details. + +Object creation internals + +To avoid the overhead of reflection, Spring Data object creation uses a factory class generated at runtime by default, which will call the domain classes constructor directly. +I.e. for this example type: + +``` +class Person { + Person(String firstname, String lastname) { … } +} +``` + +we will create a factory class semantically equivalent to this one at runtime: + +``` +class PersonObjectInstantiator implements ObjectInstantiator { + + Object newInstance(Object... args) { + return new Person((String) args[0], (String) args[1]); + } +} +``` + +This gives us a roundabout 10% performance boost over reflection. +For the domain class to be eligible for such optimization, it needs to adhere to a set of constraints: + +* it must not be a private class + +* it must not be a non-static inner class + +* it must not be a CGLib proxy class + +* the constructor to be used by Spring Data must not be private + +If any of these criteria match, Spring Data will fall back to entity instantiation via reflection. + +### 3.2. Property population + +Once an instance of the entity has been created, Spring Data populates all remaining persistent properties of that class. +Unless already populated by the entity’s constructor (i.e. consumed through its constructor argument list), the identifier property will be populated first to allow the resolution of cyclic object references. +After that, all non-transient properties that have not already been populated by the constructor are set on the entity instance. +For that we use the following algorithm: + +1. If the property is immutable but exposes a `with…` method (see below), we use the `with…` method to create a new entity instance with the new property value. + +2. If property access (i.e. access through getters and setters) is defined, we’re invoking the setter method. + +3. If the property is mutable we set the field directly. + +4. If the property is immutable we’re using the constructor to be used by persistence operations (see [Object creation](#mapping.object-creation)) to create a copy of the instance. + +5. By default, we set the field value directly. + +Property population internals + +Similarly to our [optimizations in object construction](#mapping.object-creation.details) we also use Spring Data runtime generated accessor classes to interact with the entity instance. + +``` +class Person { + + private final Long id; + private String firstname; + private @AccessType(Type.PROPERTY) String lastname; + + Person() { + this.id = null; + } + + Person(Long id, String firstname, String lastname) { + // Field assignments + } + + Person withId(Long id) { + return new Person(id, this.firstname, this.lastame); + } + + void setLastname(String lastname) { + this.lastname = lastname; + } +} +``` + +Example 3. A generated Property Accessor + +``` +class PersonPropertyAccessor implements PersistentPropertyAccessor { + + private static final MethodHandle firstname; (2) + + private Person person; (1) + + public void setProperty(PersistentProperty property, Object value) { + + String name = property.getName(); + + if ("firstname".equals(name)) { + firstname.invoke(person, (String) value); (2) + } else if ("id".equals(name)) { + this.person = person.withId((Long) value); (3) + } else if ("lastname".equals(name)) { + this.person.setLastname((String) value); (4) + } + } +} +``` + +|**1**| PropertyAccessor’s hold a mutable instance of the underlying object. This is, to enable mutations of otherwise immutable properties. | +|-----|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +|**2**| By default, Spring Data uses field-access to read and write property values. As per visibility rules of `private` fields, `MethodHandles` are used to interact with fields. | +|**3**|The class exposes a `withId(…)` method that’s used to set the identifier, e.g. when an instance is inserted into the datastore and an identifier has been generated. Calling `withId(…)` creates a new `Person` object. All subsequent mutations will take place in the new instance leaving the previous untouched.| +|**4**| Using property-access allows direct method invocations without using `MethodHandles`. | + +This gives us a roundabout 25% performance boost over reflection. +For the domain class to be eligible for such optimization, it needs to adhere to a set of constraints: + +* Types must not reside in the default or under the `java` package. + +* Types and their constructors must be `public` + +* Types that are inner classes must be `static`. + +* The used Java Runtime must allow for declaring classes in the originating `ClassLoader`. Java 9 and newer impose certain limitations. + +By default, Spring Data attempts to use generated property accessors and falls back to reflection-based ones if a limitation is detected. + +Let’s have a look at the following entity: + +Example 4. A sample entity + +``` +class Person { + + private final @Id Long id; (1) + private final String firstname, lastname; (2) + private final LocalDate birthday; + private final int age; (3) + + private String comment; (4) + private @AccessType(Type.PROPERTY) String remarks; (5) + + static Person of(String firstname, String lastname, LocalDate birthday) { (6) + + return new Person(null, firstname, lastname, birthday, + Period.between(birthday, LocalDate.now()).getYears()); + } + + Person(Long id, String firstname, String lastname, LocalDate birthday, int age) { (6) + + this.id = id; + this.firstname = firstname; + this.lastname = lastname; + this.birthday = birthday; + this.age = age; + } + + Person withId(Long id) { (1) + return new Person(id, this.firstname, this.lastname, this.birthday, this.age); + } + + void setRemarks(String remarks) { (5) + this.remarks = remarks; + } +} +``` + +|**1**|The identifier property is final but set to `null` in the constructor.
The class exposes a `withId(…)` method that’s used to set the identifier, e.g. when an instance is inserted into the datastore and an identifier has been generated.
The original `Person` instance stays unchanged as a new one is created.
The same pattern is usually applied for other properties that are store managed but might have to be changed for persistence operations.
The wither method is optional as the persistence constructor (see 6) is effectively a copy constructor and setting the property will be translated into creating a fresh instance with the new identifier value applied.| +|-----|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +|**2**| The `firstname` and `lastname` properties are ordinary immutable properties potentially exposed through getters. | +|**3**| The `age` property is an immutable but derived one from the `birthday` property.
With the design shown, the database value will trump the defaulting as Spring Data uses the only declared constructor.
Even if the intent is that the calculation should be preferred, it’s important that this constructor also takes `age` as parameter (to potentially ignore it) as otherwise the property population step will attempt to set the age field and fail due to it being immutable and no `with…` method being present. | +|**4**| The `comment` property is mutable is populated by setting its field directly. | +|**5**| The `remarks` properties are mutable and populated by setting the `comment` field directly or by invoking the setter method for | +|**6**| The class exposes a factory method and a constructor for object creation.
The core idea here is to use factory methods instead of additional constructors to avoid the need for constructor disambiguation through `@PersistenceConstructor`.
Instead, defaulting of properties is handled within the factory method. | + +### 3.3. General recommendations + +* *Try to stick to immutable objects* — Immutable objects are straightforward to create as materializing an object is then a matter of calling its constructor only. + Also, this avoids your domain objects to be littered with setter methods that allow client code to manipulate the objects state. + If you need those, prefer to make them package protected so that they can only be invoked by a limited amount of co-located types. + Constructor-only materialization is up to 30% faster than properties population. + +* *Provide an all-args constructor* — Even if you cannot or don’t want to model your entities as immutable values, there’s still value in providing a constructor that takes all properties of the entity as arguments, including the mutable ones, as this allows the object mapping to skip the property population for optimal performance. + +* *Use factory methods instead of overloaded constructors to avoid `@PersistenceConstructor`* — With an all-argument constructor needed for optimal performance, we usually want to expose more application use case specific constructors that omit things like auto-generated identifiers etc. + It’s an established pattern to rather use static factory methods to expose these variants of the all-args constructor. + +* *Make sure you adhere to the constraints that allow the generated instantiator and property accessor classes to be used* —  + +* *For identifiers to be generated, still use a final field in combination with an all-arguments persistence constructor (preferred) or a `with…` method* —  + +* *Use Lombok to avoid boilerplate code* — As persistence operations usually require a constructor taking all arguments, their declaration becomes a tedious repetition of boilerplate parameter to field assignments that can best be avoided by using Lombok’s `@AllArgsConstructor`. + +#### 3.3.1. Overriding Properties + +Java’s allows a flexible design of domain classes where a subclass could define a property that is already declared with the same name in its superclass. +Consider the following example: + +``` +public class SuperType { + + private CharSequence field; + + public SuperType(CharSequence field) { + this.field = field; + } + + public CharSequence getField() { + return this.field; + } + + public void setField(CharSequence field) { + this.field = field; + } +} + +public class SubType extends SuperType { + + private String field; + + public SubType(String field) { + super(field); + this.field = field; + } + + @Override + public String getField() { + return this.field; + } + + public void setField(String field) { + this.field = field; + + // optional + super.setField(field); + } +} +``` + +Both classes define a `field` using assignable types. `SubType` however shadows `SuperType.field`. +Depending on the class design, using the constructor could be the only default approach to set `SuperType.field`. +Alternatively, calling `super.setField(…)` in the setter could set the `field` in `SuperType`. +All these mechanisms create conflicts to some degree because the properties share the same name yet might represent two distinct values. +Spring Data skips super-type properties if types are not assignable. +That is, the type of the overridden property must be assignable to its super-type property type to be registered as override, otherwise the super-type property is considered transient. +We generally recommend using distinct property names. + +Spring Data modules generally support overridden properties holding different values. +From a programming model perspective there are a few things to consider: + +1. Which property should be persisted (default to all declared properties)? + You can exclude properties by annotating these with `@Transient`. + +2. How to represent properties in your data store? + Using the same field/column name for different values typically leads to corrupt data so you should annotate least one of the properties using an explicit field/column name. + +3. Using `@AccessType(PROPERTY)` cannot be used as the super-property cannot be generally set without making any further assumptions of the setter implementation. + +### 3.4. Kotlin support + +Spring Data adapts specifics of Kotlin to allow object creation and mutation. + +#### 3.4.1. Kotlin object creation #### + +Kotlin classes are supported to be instantiated , all classes are immutable by default and require explicit property declarations to define mutable properties. +Consider the following `data` class `Person`: + +``` +data class Person(val id: String, val name: String) +``` + +The class above compiles to a typical class with an explicit constructor.We can customize this class by adding another constructor and annotate it with `@PersistenceConstructor` to indicate a constructor preference: + +``` +data class Person(var id: String, val name: String) { + + @PersistenceConstructor + constructor(id: String) : this(id, "unknown") +} +``` + +Kotlin supports parameter optionality by allowing default values to be used if a parameter is not provided. +When Spring Data detects a constructor with parameter defaulting, then it leaves these parameters absent if the data store does not provide a value (or simply returns `null`) so Kotlin can apply parameter defaulting.Consider the following class that applies parameter defaulting for `name` + +``` +data class Person(var id: String, val name: String = "unknown") +``` + +Every time the `name` parameter is either not part of the result or its value is `null`, then the `name` defaults to `unknown`. + +#### 3.4.2. Property population of Kotlin data classes #### + +In Kotlin, all classes are immutable by default and require explicit property declarations to define mutable properties. +Consider the following `data` class `Person`: + +``` +data class Person(val id: String, val name: String) +``` + +This class is effectively immutable. +It allows creating new instances as Kotlin generates a `copy(…)` method that creates new object instances copying all property values from the existing object and applying property values provided as arguments to the method. + +#### 3.4.3. Kotlin Overriding Properties + +Kotlin allows declaring [property overrides](https://kotlinlang.org/docs/inheritance.html#overriding-properties) to alter properties in subclasses. + +``` +open class SuperType(open var field: Int) + +class SubType(override var field: Int = 1) : + SuperType(field) { +} +``` + +Such an arrangement renders two properties with the name `field`. +Kotlin generates property accessors (getters and setters) for each property in each class. +Effectively, the code looks like as follows: + +``` +public class SuperType { + + private int field; + + public SuperType(int field) { + this.field = field; + } + + public int getField() { + return this.field; + } + + public void setField(int field) { + this.field = field; + } +} + +public final class SubType extends SuperType { + + private int field; + + public SubType(int field) { + super(field); + this.field = field; + } + + public int getField() { + return this.field; + } + + public void setField(int field) { + this.field = field; + } +} +``` + +Getters and setters on `SubType` set only `SubType.field` and not `SuperType.field`. +In such an arrangement, using the constructor is the only default approach to set `SuperType.field`. +Adding a method to `SubType` to set `SuperType.field` via `this.SuperType.field = …` is possible but falls outside of supported conventions. +Property overrides create conflicts to some degree because the properties share the same name yet might represent two distinct values. +We generally recommend using distinct property names. + +Spring Data modules generally support overridden properties holding different values. +From a programming model perspective there are a few things to consider: + +1. Which property should be persisted (default to all declared properties)? + You can exclude properties by annotating these with `@Transient`. + +2. How to represent properties in your data store? + Using the same field/column name for different values typically leads to corrupt data so you should annotate least one of the properties using an explicit field/column name. + +3. Using `@AccessType(PROPERTY)` cannot be used as the super-property cannot be set. + +## 4. Working with Spring Data Repositories + +The goal of the Spring Data repository abstraction is to significantly reduce the amount of boilerplate code required to implement data access layers for various persistence stores. + +| |*Spring Data repository documentation and your module*

This chapter explains the core concepts and interfaces of Spring Data repositories.
The information in this chapter is pulled from the Spring Data Commons module.
It uses the configuration and code samples for the Java Persistence API (JPA) module.
You should adapt the XML namespace declaration and the types to be extended to the equivalents of the particular module that you use. “[Namespace reference](#repositories.namespace-reference)” covers XML configuration, which is supported across all Spring Data modules that support the repository API. “[Repository query keywords](#repository-query-keywords)” covers the query method keywords supported by the repository abstraction in general.
For detailed information on the specific features of your module, see the chapter on that module of this document.| +|---|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +### 4.1. Core concepts + +The central interface in the Spring Data repository abstraction is `Repository`. +It takes the domain class to manage as well as the ID type of the domain class as type arguments. +This interface acts primarily as a marker interface to capture the types to work with and to help you to discover interfaces that extend this one. +The [`CrudRepository`](https://docs.spring.io/spring-data/commons/docs/current/api/org/springframework/data/repository/CrudRepository.html) interface provides sophisticated CRUD functionality for the entity class that is being managed. + +Example 5. `CrudRepository` Interface + +``` +public interface CrudRepository extends Repository { + + S save(S entity); (1) + + Optional findById(ID primaryKey); (2) + + Iterable findAll(); (3) + + long count(); (4) + + void delete(T entity); (5) + + boolean existsById(ID primaryKey); (6) + + // … more functionality omitted. +} +``` + +|**1**| Saves the given entity. | +|-----|-----------------------------------------------------| +|**2**| Returns the entity identified by the given ID. | +|**3**| Returns all entities. | +|**4**| Returns the number of entities. | +|**5**| Deletes the given entity. | +|**6**|Indicates whether an entity with the given ID exists.| + +| |We also provide persistence technology-specific abstractions, such as `JpaRepository` or `MongoRepository`.
Those interfaces extend `CrudRepository` and expose the capabilities of the underlying persistence technology in addition to the rather generic persistence technology-agnostic interfaces such as `CrudRepository`.| +|---|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +On top of the `CrudRepository`, there is a [`PagingAndSortingRepository`](https://docs.spring.io/spring-data/commons/docs/current/api/org/springframework/data/repository/PagingAndSortingRepository.html) abstraction that adds additional methods to ease paginated access to entities: + +Example 6. `PagingAndSortingRepository` interface + +``` +public interface PagingAndSortingRepository extends CrudRepository { + + Iterable findAll(Sort sort); + + Page findAll(Pageable pageable); +} +``` + +To access the second page of `User` by a page size of 20, you could do something like the following: + +``` +PagingAndSortingRepository repository = // … get access to a bean +Page users = repository.findAll(PageRequest.of(1, 20)); +``` + +In addition to query methods, query derivation for both count and delete queries is available. +The following list shows the interface definition for a derived count query: + +Example 7. Derived Count Query + +``` +interface UserRepository extends CrudRepository { + + long countByLastname(String lastname); +} +``` + +The following listing shows the interface definition for a derived delete query: + +Example 8. Derived Delete Query + +``` +interface UserRepository extends CrudRepository { + + long deleteByLastname(String lastname); + + List removeByLastname(String lastname); +} +``` + +### 4.2. Query Methods + +Standard CRUD functionality repositories usually have queries on the underlying datastore. +With Spring Data, declaring those queries becomes a four-step process: + +1. Declare an interface extending Repository or one of its subinterfaces and type it to the domain class and ID type that it should handle, as shown in the following example: + + ``` + interface PersonRepository extends Repository { … } + ``` + +2. Declare query methods on the interface. + + ``` + interface PersonRepository extends Repository { + List findByLastname(String lastname); + } + ``` + +3. Set up Spring to create proxy instances for those interfaces, either with [JavaConfig](#repositories.create-instances.java-config) or with [XML configuration](#repositories.create-instances). + + 1. To use Java configuration, create a class similar to the following: + + ``` + import org.springframework.data.jpa.repository.config.EnableJpaRepositories; + + @EnableJpaRepositories + class Config { … } + ``` + + 2. To use XML configuration, define a bean similar to the following: + + ``` + + + + + + + ``` + + The JPA namespace is used in this example. + If you use the repository abstraction for any other store, you need to change this to the appropriate namespace declaration of your store module. + In other words, you should exchange `jpa` in favor of, for example, `mongodb`. + + Also, note that the JavaConfig variant does not configure a package explicitly, because the package of the annotated class is used by default. + To customize the package to scan, use one of the `basePackage…` attributes of the data-store-specific repository’s `@Enable${store}Repositories`-annotation. + +4. Inject the repository instance and use it, as shown in the following example: + + ``` + class SomeClient { + + private final PersonRepository repository; + + SomeClient(PersonRepository repository) { + this.repository = repository; + } + + void doSomething() { + List persons = repository.findByLastname("Matthews"); + } + } + ``` + +The sections that follow explain each step in detail: + +* [Defining Repository Interfaces](#repositories.definition) + +* [Defining Query Methods](#repositories.query-methods.details) + +* [Creating Repository Instances](#repositories.create-instances) + +* [Custom Implementations for Spring Data Repositories](#repositories.custom-implementations) + +### 4.3. Defining Repository Interfaces + +To define a repository interface, you first need to define a domain class-specific repository interface. +The interface must extend `Repository` and be typed to the domain class and an ID type. +If you want to expose CRUD methods for that domain type, extend `CrudRepository` instead of `Repository`. + +#### 4.3.1. Fine-tuning Repository Definition + +Typically, your repository interface extends `Repository`, `CrudRepository`, or `PagingAndSortingRepository`. +Alternatively, if you do not want to extend Spring Data interfaces, you can also annotate your repository interface with `@RepositoryDefinition`. +Extending `CrudRepository` exposes a complete set of methods to manipulate your entities. +If you prefer to be selective about the methods being exposed, copy the methods you want to expose from `CrudRepository` into your domain repository. + +| |Doing so lets you define your own abstractions on top of the provided Spring Data Repositories functionality.| +|---|-------------------------------------------------------------------------------------------------------------| + +The following example shows how to selectively expose CRUD methods (`findById` and `save`, in this case): + +Example 9. Selectively exposing CRUD methods + +``` +@NoRepositoryBean +interface MyBaseRepository extends Repository { + + Optional findById(ID id); + + S save(S entity); +} + +interface UserRepository extends MyBaseRepository { + User findByEmailAddress(EmailAddress emailAddress); +} +``` + +In the prior example, you defined a common base interface for all your domain repositories and exposed `findById(…)` as well as `save(…)`.These methods are routed into the base repository implementation of the store of your choice provided by Spring Data (for example, if you use JPA, the implementation is `SimpleJpaRepository`), because they match the method signatures in `CrudRepository`. +So the `UserRepository` can now save users, find individual users by ID, and trigger a query to find `Users` by email address. + +| |The intermediate repository interface is annotated with `@NoRepositoryBean`.
Make sure you add that annotation to all repository interfaces for which Spring Data should not create instances at runtime.| +|---|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +#### 4.3.2. Using Repositories with Multiple Spring Data Modules + +Using a unique Spring Data module in your application makes things simple, because all repository interfaces in the defined scope are bound to the Spring Data module. +Sometimes, applications require using more than one Spring Data module. +In such cases, a repository definition must distinguish between persistence technologies. +When it detects multiple repository factories on the class path, Spring Data enters strict repository configuration mode. +Strict configuration uses details on the repository or the domain class to decide about Spring Data module binding for a repository definition: + +1. If the repository definition [extends the module-specific repository](#repositories.multiple-modules.types), it is a valid candidate for the particular Spring Data module. + +2. If the domain class is [annotated with the module-specific type annotation](#repositories.multiple-modules.annotations), it is a valid candidate for the particular Spring Data module. + Spring Data modules accept either third-party annotations (such as JPA’s `@Entity`) or provide their own annotations (such as `@Document` for Spring Data MongoDB and Spring Data Elasticsearch). + +The following example shows a repository that uses module-specific interfaces (JPA in this case): + +Example 10. Repository definitions using module-specific interfaces + +``` +interface MyRepository extends JpaRepository { } + +@NoRepositoryBean +interface MyBaseRepository extends JpaRepository { … } + +interface UserRepository extends MyBaseRepository { … } +``` + +`MyRepository` and `UserRepository` extend `JpaRepository` in their type hierarchy. +They are valid candidates for the Spring Data JPA module. + +The following example shows a repository that uses generic interfaces: + +Example 11. Repository definitions using generic interfaces + +``` +interface AmbiguousRepository extends Repository { … } + +@NoRepositoryBean +interface MyBaseRepository extends CrudRepository { … } + +interface AmbiguousUserRepository extends MyBaseRepository { … } +``` + +`AmbiguousRepository` and `AmbiguousUserRepository` extend only `Repository` and `CrudRepository` in their type hierarchy. +While this is fine when using a unique Spring Data module, multiple modules cannot distinguish to which particular Spring Data these repositories should be bound. + +The following example shows a repository that uses domain classes with annotations: + +Example 12. Repository definitions using domain classes with annotations + +``` +interface PersonRepository extends Repository { … } + +@Entity +class Person { … } + +interface UserRepository extends Repository { … } + +@Document +class User { … } +``` + +`PersonRepository` references `Person`, which is annotated with the JPA `@Entity` annotation, so this repository clearly belongs to Spring Data JPA. `UserRepository` references `User`, which is annotated with Spring Data MongoDB’s `@Document` annotation. + +The following bad example shows a repository that uses domain classes with mixed annotations: + +Example 13. Repository definitions using domain classes with mixed annotations + +``` +interface JpaPersonRepository extends Repository { … } + +interface MongoDBPersonRepository extends Repository { … } + +@Entity +@Document +class Person { … } +``` + +This example shows a domain class using both JPA and Spring Data MongoDB annotations. +It defines two repositories, `JpaPersonRepository` and `MongoDBPersonRepository`. +One is intended for JPA and the other for MongoDB usage. +Spring Data is no longer able to tell the repositories apart, which leads to undefined behavior. + +[Repository type details](#repositories.multiple-modules.types) and [distinguishing domain class annotations](#repositories.multiple-modules.annotations) are used for strict repository configuration to identify repository candidates for a particular Spring Data module. +Using multiple persistence technology-specific annotations on the same domain type is possible and enables reuse of domain types across multiple persistence technologies. +However, Spring Data can then no longer determine a unique module with which to bind the repository. + +The last way to distinguish repositories is by scoping repository base packages. +Base packages define the starting points for scanning for repository interface definitions, which implies having repository definitions located in the appropriate packages. +By default, annotation-driven configuration uses the package of the configuration class. +The [base package in XML-based configuration](#repositories.create-instances.spring) is mandatory. + +The following example shows annotation-driven configuration of base packages: + +Example 14. Annotation-driven configuration of base packages + +``` +@EnableJpaRepositories(basePackages = "com.acme.repositories.jpa") +@EnableMongoRepositories(basePackages = "com.acme.repositories.mongo") +class Configuration { … } +``` + +### 4.4. Defining Query Methods + +The repository proxy has two ways to derive a store-specific query from the method name: + +* By deriving the query from the method name directly. + +* By using a manually defined query. + +Available options depend on the actual store. +However, there must be a strategy that decides what actual query is created. +The next section describes the available options. + +#### 4.4.1. Query Lookup Strategies + +The following strategies are available for the repository infrastructure to resolve the query. +With XML configuration, you can configure the strategy at the namespace through the `query-lookup-strategy` attribute. +For Java configuration, you can use the `queryLookupStrategy` attribute of the `Enable${store}Repositories` annotation. +Some strategies may not be supported for particular datastores. + +* `CREATE` attempts to construct a store-specific query from the query method name. + The general approach is to remove a given set of well known prefixes from the method name and parse the rest of the method. + You can read more about query construction in “[Query Creation](#repositories.query-methods.query-creation)”. + +* `USE_DECLARED_QUERY` tries to find a declared query and throws an exception if it cannot find one. + The query can be defined by an annotation somewhere or declared by other means. + See the documentation of the specific store to find available options for that store. + If the repository infrastructure does not find a declared query for the method at bootstrap time, it fails. + +* `CREATE_IF_NOT_FOUND` (the default) combines `CREATE` and `USE_DECLARED_QUERY`. + It looks up a declared query first, and, if no declared query is found, it creates a custom method name-based query. + This is the default lookup strategy and, thus, is used if you do not configure anything explicitly. + It allows quick query definition by method names but also custom-tuning of these queries by introducing declared queries as needed. + +#### 4.4.2. Query Creation + +The query builder mechanism built into the Spring Data repository infrastructure is useful for building constraining queries over entities of the repository. + +The following example shows how to create a number of queries: + +Example 15. Query creation from method names + +``` +interface PersonRepository extends Repository { + + List findByEmailAddressAndLastname(EmailAddress emailAddress, String lastname); + + // Enables the distinct flag for the query + List findDistinctPeopleByLastnameOrFirstname(String lastname, String firstname); + List findPeopleDistinctByLastnameOrFirstname(String lastname, String firstname); + + // Enabling ignoring case for an individual property + List findByLastnameIgnoreCase(String lastname); + // Enabling ignoring case for all suitable properties + List findByLastnameAndFirstnameAllIgnoreCase(String lastname, String firstname); + + // Enabling static ORDER BY for a query + List findByLastnameOrderByFirstnameAsc(String lastname); + List findByLastnameOrderByFirstnameDesc(String lastname); +} +``` + +Parsing query method names is divided into subject and predicate. +The first part (`find…By`, `exists…By`) defines the subject of the query, the second part forms the predicate. +The introducing clause (subject) can contain further expressions. +Any text between `find` (or other introducing keywords) and `By` is considered to be descriptive unless using one of the result-limiting keywords such as a `Distinct` to set a distinct flag on the query to be created or [`Top`/`First` to limit query results](#repositories.limit-query-result). + +The appendix contains the [full list of query method subject keywords](#appendix.query.method.subject) and [query method predicate keywords including sorting and letter-casing modifiers](#appendix.query.method.predicate). +However, the first `By` acts as a delimiter to indicate the start of the actual criteria predicate. +At a very basic level, you can define conditions on entity properties and concatenate them with `And` and `Or`. + +The actual result of parsing the method depends on the persistence store for which you create the query. +However, there are some general things to notice: + +* The expressions are usually property traversals combined with operators that can be concatenated. + You can combine property expressions with `AND` and `OR`. + You also get support for operators such as `Between`, `LessThan`, `GreaterThan`, and `Like` for the property expressions. + The supported operators can vary by datastore, so consult the appropriate part of your reference documentation. + +* The method parser supports setting an `IgnoreCase` flag for individual properties (for example, `findByLastnameIgnoreCase(…)`) or for all properties of a type that supports ignoring case (usually `String` instances — for example, `findByLastnameAndFirstnameAllIgnoreCase(…)`). + Whether ignoring cases is supported may vary by store, so consult the relevant sections in the reference documentation for the store-specific query method. + +* You can apply static ordering by appending an `OrderBy` clause to the query method that references a property and by providing a sorting direction (`Asc` or `Desc`). + To create a query method that supports dynamic sorting, see “[Special parameter handling](#repositories.special-parameters)”. + +#### 4.4.3. Property Expressions + +Property expressions can refer only to a direct property of the managed entity, as shown in the preceding example. +At query creation time, you already make sure that the parsed property is a property of the managed domain class. +However, you can also define constraints by traversing nested properties. +Consider the following method signature: + +``` +List findByAddressZipCode(ZipCode zipCode); +``` + +Assume a `Person` has an `Address` with a `ZipCode`. +In that case, the method creates the `x.address.zipCode` property traversal. +The resolution algorithm starts by interpreting the entire part (`AddressZipCode`) as the property and checks the domain class for a property with that name (uncapitalized). +If the algorithm succeeds, it uses that property. +If not, the algorithm splits up the source at the camel-case parts from the right side into a head and a tail and tries to find the corresponding property — in our example, `AddressZip` and `Code`. +If the algorithm finds a property with that head, it takes the tail and continues building the tree down from there, splitting the tail up in the way just described. +If the first split does not match, the algorithm moves the split point to the left (`Address`, `ZipCode`) and continues. + +Although this should work for most cases, it is possible for the algorithm to select the wrong property. +Suppose the `Person` class has an `addressZip` property as well. +The algorithm would match in the first split round already, choose the wrong property, and fail (as the type of `addressZip` probably has no `code` property). + +To resolve this ambiguity you can use `_` inside your method name to manually define traversal points. +So our method name would be as follows: + +``` +List findByAddress_ZipCode(ZipCode zipCode); +``` + +Because we treat the underscore character as a reserved character, we strongly advise following standard Java naming conventions (that is, not using underscores in property names but using camel case instead). + +#### 4.4.4. Special parameter handling + +To handle parameters in your query, define method parameters as already seen in the preceding examples. +Besides that, the infrastructure recognizes certain specific types like `Pageable` and `Sort`, to apply pagination and sorting to your queries dynamically. +The following example demonstrates these features: + +Example 16. Using `Pageable`, `Slice`, and `Sort` in query methods + +``` +Page findByLastname(String lastname, Pageable pageable); + +Slice findByLastname(String lastname, Pageable pageable); + +List findByLastname(String lastname, Sort sort); + +List findByLastname(String lastname, Pageable pageable); +``` + +| |APIs taking `Sort` and `Pageable` expect non-`null` values to be handed into methods.
If you do not want to apply any sorting or pagination, use `Sort.unsorted()` and `Pageable.unpaged()`.| +|---|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +The first method lets you pass an `org.springframework.data.domain.Pageable` instance to the query method to dynamically add paging to your statically defined query. +A `Page` knows about the total number of elements and pages available. +It does so by the infrastructure triggering a count query to calculate the overall number. +As this might be expensive (depending on the store used), you can instead return a `Slice`. +A `Slice` knows only about whether a next `Slice` is available, which might be sufficient when walking through a larger result set. + +Sorting options are handled through the `Pageable` instance, too. +If you need only sorting, add an `org.springframework.data.domain.Sort` parameter to your method. +As you can see, returning a `List` is also possible. +In this case, the additional metadata required to build the actual `Page` instance is not created (which, in turn, means that the additional count query that would have been necessary is not issued). +Rather, it restricts the query to look up only the given range of entities. + +| |To find out how many pages you get for an entire query, you have to trigger an additional count query.
By default, this query is derived from the query you actually trigger.| +|---|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +##### Paging and Sorting + +You can define simple sorting expressions by using property names. +You can concatenate expressions to collect multiple criteria into one expression. + +Example 17. Defining sort expressions + +``` +Sort sort = Sort.by("firstname").ascending() + .and(Sort.by("lastname").descending()); +``` + +For a more type-safe way to define sort expressions, start with the type for which to define the sort expression and use method references to define the properties on which to sort. + +Example 18. Defining sort expressions by using the type-safe API + +``` +TypedSort person = Sort.sort(Person.class); + +Sort sort = person.by(Person::getFirstname).ascending() + .and(person.by(Person::getLastname).descending()); +``` + +| |`TypedSort.by(…)` makes use of runtime proxies by (typically) using CGlib, which may interfere with native image compilation when using tools such as Graal VM Native.| +|---|----------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +If your store implementation supports Querydsl, you can also use the generated metamodel types to define sort expressions: + +Example 19. Defining sort expressions by using the Querydsl API + +``` +QSort sort = QSort.by(QPerson.firstname.asc()) + .and(QSort.by(QPerson.lastname.desc())); +``` + +#### 4.4.5. Limiting Query Results + +You can limit the results of query methods by using the `first` or `top` keywords, which you can use interchangeably. +You can append an optional numeric value to `top` or `first` to specify the maximum result size to be returned. +If the number is left out, a result size of 1 is assumed. +The following example shows how to limit the query size: + +Example 20. Limiting the result size of a query with `Top` and `First` + +``` +User findFirstByOrderByLastnameAsc(); + +User findTopByOrderByAgeDesc(); + +Page queryFirst10ByLastname(String lastname, Pageable pageable); + +Slice findTop3ByLastname(String lastname, Pageable pageable); + +List findFirst10ByLastname(String lastname, Sort sort); + +List findTop10ByLastname(String lastname, Pageable pageable); +``` + +The limiting expressions also support the `Distinct` keyword for datastores that support distinct queries. +Also, for the queries that limit the result set to one instance, wrapping the result into with the `Optional` keyword is supported. + +If pagination or slicing is applied to a limiting query pagination (and the calculation of the number of available pages), it is applied within the limited result. + +| |Limiting the results in combination with dynamic sorting by using a `Sort` parameter lets you express query methods for the 'K' smallest as well as for the 'K' biggest elements.| +|---|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +#### 4.4.6. Repository Methods Returning Collections or Iterables + +Query methods that return multiple results can use standard Java `Iterable`, `List`, and `Set`. +Beyond that, we support returning Spring Data’s `Streamable`, a custom extension of `Iterable`, as well as collection types provided by [Vavr](https://www.vavr.io/). +Refer to the appendix explaining all possible [query method return types](#appendix.query.return.types). + +##### Using Streamable as Query Method Return Type + +You can use `Streamable` as alternative to `Iterable` or any collection type. +It provides convenience methods to access a non-parallel `Stream` (missing from `Iterable`) and the ability to directly `….filter(…)` and `….map(…)` over the elements and concatenate the `Streamable` to others: + +Example 21. Using Streamable to combine query method results + +``` +interface PersonRepository extends Repository { + Streamable findByFirstnameContaining(String firstname); + Streamable findByLastnameContaining(String lastname); +} + +Streamable result = repository.findByFirstnameContaining("av") + .and(repository.findByLastnameContaining("ea")); +``` + +##### Returning Custom Streamable Wrapper Types + +Providing dedicated wrapper types for collections is a commonly used pattern to provide an API for a query result that returns multiple elements. +Usually, these types are used by invoking a repository method returning a collection-like type and creating an instance of the wrapper type manually. +You can avoid that additional step as Spring Data lets you use these wrapper types as query method return types if they meet the following criteria: + +1. The type implements `Streamable`. + +2. The type exposes either a constructor or a static factory method named `of(…)` or `valueOf(…)` that takes `Streamable` as an argument. + +The following listing shows an example: + +``` +class Product { (1) + MonetaryAmount getPrice() { … } +} + +@RequiredArgsConstructor(staticName = "of") +class Products implements Streamable { (2) + + private final Streamable streamable; + + public MonetaryAmount getTotal() { (3) + return streamable.stream() + .map(Priced::getPrice) + .reduce(Money.of(0), MonetaryAmount::add); + } + + @Override + public Iterator iterator() { (4) + return streamable.iterator(); + } +} + +interface ProductRepository implements Repository { + Products findAllByDescriptionContaining(String text); (5) +} +``` + +|**1**| A `Product` entity that exposes API to access the product’s price. | +|-----|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +|**2**|A wrapper type for a `Streamable` that can be constructed by using `Products.of(…)` (factory method created with the Lombok annotation).
A standard constructor taking the `Streamable` will do as well.| +|**3**| The wrapper type exposes an additional API, calculating new values on the `Streamable`. | +|**4**| Implement the `Streamable` interface and delegate to the actual result. | +|**5**| That wrapper type `Products` can be used directly as a query method return type.
You do not need to return `Streamable` and manually wrap it after the query in the repository client. | + +##### Support for Vavr Collections + +[Vavr](https://www.vavr.io/) is a library that embraces functional programming concepts in Java. +It ships with a custom set of collection types that you can use as query method return types, as the following table shows: + +| Vavr collection type | Used Vavr implementation type |Valid Java source types| +|------------------------|----------------------------------|-----------------------| +|`io.vavr.collection.Seq`| `io.vavr.collection.List` | `java.util.Iterable` | +|`io.vavr.collection.Set`|`io.vavr.collection.LinkedHashSet`| `java.util.Iterable` | +|`io.vavr.collection.Map`|`io.vavr.collection.LinkedHashMap`| `java.util.Map` | + +You can use the types in the first column (or subtypes thereof) as query method return types and get the types in the second column used as implementation type, depending on the Java type of the actual query result (third column). +Alternatively, you can declare `Traversable` (the Vavr `Iterable` equivalent), and we then derive the implementation class from the actual return value. +That is, a `java.util.List` is turned into a Vavr `List` or `Seq`, a `java.util.Set` becomes a Vavr `LinkedHashSet` `Set`, and so on. + +#### 4.4.7. Null Handling of Repository Methods + +As of Spring Data 2.0, repository CRUD methods that return an individual aggregate instance use Java 8’s `Optional` to indicate the potential absence of a value. +Besides that, Spring Data supports returning the following wrapper types on query methods: + +* `com.google.common.base.Optional` + +* `scala.Option` + +* `io.vavr.control.Option` + +Alternatively, query methods can choose not to use a wrapper type at all. +The absence of a query result is then indicated by returning `null`. +Repository methods returning collections, collection alternatives, wrappers, and streams are guaranteed never to return `null` but rather the corresponding empty representation. +See “[Repository query return types](#repository-query-return-types)” for details. + +##### Nullability Annotations + +You can express nullability constraints for repository methods by using [Spring Framework’s nullability annotations](https://docs.spring.io/spring-framework/docs/5.3.16/reference/html/core.html#null-safety). +They provide a tooling-friendly approach and opt-in `null` checks during runtime, as follows: + +* [`@NonNullApi`](https://docs.spring.io/spring/docs/5.3.16/javadoc-api/org/springframework/lang/NonNullApi.html): Used on the package level to declare that the default behavior for parameters and return values is, respectively, neither to accept nor to produce `null` values. + +* [`@NonNull`](https://docs.spring.io/spring/docs/5.3.16/javadoc-api/org/springframework/lang/NonNull.html): Used on a parameter or return value that must not be `null` (not needed on a parameter and return value where `@NonNullApi` applies). + +* [`@Nullable`](https://docs.spring.io/spring/docs/5.3.16/javadoc-api/org/springframework/lang/Nullable.html): Used on a parameter or return value that can be `null`. + +Spring annotations are meta-annotated with [JSR 305](https://jcp.org/en/jsr/detail?id=305) annotations (a dormant but widely used JSR). +JSR 305 meta-annotations let tooling vendors (such as [IDEA](https://www.jetbrains.com/help/idea/nullable-and-notnull-annotations.html), [Eclipse](https://help.eclipse.org/oxygen/index.jsp?topic=/org.eclipse.jdt.doc.user/tasks/task-using_external_null_annotations.htm), and [Kotlin](https://kotlinlang.org/docs/reference/java-interop.html#null-safety-and-platform-types)) provide null-safety support in a generic way, without having to hard-code support for Spring annotations. +To enable runtime checking of nullability constraints for query methods, you need to activate non-nullability on the package level by using Spring’s `@NonNullApi` in `package-info.java`, as shown in the following example: + +Example 22. Declaring Non-nullability in `package-info.java` + +``` +@org.springframework.lang.NonNullApi +package com.acme; +``` + +Once non-null defaulting is in place, repository query method invocations get validated at runtime for nullability constraints. +If a query result violates the defined constraint, an exception is thrown. +This happens when the method would return `null` but is declared as non-nullable (the default with the annotation defined on the package in which the repository resides). +If you want to opt-in to nullable results again, selectively use `@Nullable` on individual methods. +Using the result wrapper types mentioned at the start of this section continues to work as expected: an empty result is translated into the value that represents absence. + +The following example shows a number of the techniques just described: + +Example 23. Using different nullability constraints + +``` +package com.acme; (1) + +import org.springframework.lang.Nullable; + +interface UserRepository extends Repository { + + User getByEmailAddress(EmailAddress emailAddress); (2) + + @Nullable + User findByEmailAddress(@Nullable EmailAddress emailAdress); (3) + + Optional findOptionalByEmailAddress(EmailAddress emailAddress); (4) +} +``` + +|**1**| The repository resides in a package (or sub-package) for which we have defined non-null behavior. | +|-----|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +|**2**|Throws an `EmptyResultDataAccessException` when the query does not produce a result.
Throws an `IllegalArgumentException` when the `emailAddress` handed to the method is `null`.| +|**3**| Returns `null` when the query does not produce a result.
Also accepts `null` as the value for `emailAddress`. | +|**4**| Returns `Optional.empty()` when the query does not produce a result.
Throws an `IllegalArgumentException` when the `emailAddress` handed to the method is `null`. | + +##### Nullability in Kotlin-based Repositories + +Kotlin has the definition of [nullability constraints](https://kotlinlang.org/docs/reference/null-safety.html) baked into the language. +Kotlin code compiles to bytecode, which does not express nullability constraints through method signatures but rather through compiled-in metadata. +Make sure to include the `kotlin-reflect` JAR in your project to enable introspection of Kotlin’s nullability constraints. +Spring Data repositories use the language mechanism to define those constraints to apply the same runtime checks, as follows: + +Example 24. Using nullability constraints on Kotlin repositories + +``` +interface UserRepository : Repository { + + fun findByUsername(username: String): User (1) + + fun findByFirstname(firstname: String?): User? (2) +} +``` + +|**1**|The method defines both the parameter and the result as non-nullable (the Kotlin default).
The Kotlin compiler rejects method invocations that pass `null` to the method.
If the query yields an empty result, an `EmptyResultDataAccessException` is thrown.| +|-----|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +|**2**| This method accepts `null` for the `firstname` parameter and returns `null` if the query does not produce a result. | + +#### 4.4.8. Streaming Query Results + +You can process the results of query methods incrementally by using a Java 8 `Stream` as the return type. +Instead of wrapping the query results in a `Stream`, data store-specific methods are used to perform the streaming, as shown in the following example: + +Example 25. Stream the result of a query with Java 8 `Stream` + +``` +@Query("select u from User u") +Stream findAllByCustomQueryAndStream(); + +Stream readAllByFirstnameNotNull(); + +@Query("select u from User u") +Stream streamAllPaged(Pageable pageable); +``` + +| |A `Stream` potentially wraps underlying data store-specific resources and must, therefore, be closed after usage.
You can either manually close the `Stream` by using the `close()` method or by using a Java 7 `try-with-resources` block, as shown in the following example:| +|---|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +Example 26. Working with a `Stream` result in a `try-with-resources` block + +``` +try (Stream stream = repository.findAllByCustomQueryAndStream()) { + stream.forEach(…); +} +``` + +| |Not all Spring Data modules currently support `Stream` as a return type.| +|---|---------------------------------------------------------------------------| + +#### 4.4.9. Asynchronous Query Results + +You can run repository queries asynchronously by using [Spring’s asynchronous method running capability](https://docs.spring.io/spring-framework/docs/5.3.16/reference/html/integration.html#scheduling). +This means the method returns immediately upon invocation while the actual query occurs in a task that has been submitted to a Spring `TaskExecutor`. +Asynchronous queries differ from reactive queries and should not be mixed. +See the store-specific documentation for more details on reactive support. +The following example shows a number of asynchronous queries: + +``` +@Async +Future findByFirstname(String firstname); (1) + +@Async +CompletableFuture findOneByFirstname(String firstname); (2) + +@Async +ListenableFuture findOneByLastname(String lastname); (3) +``` + +|**1**| Use `java.util.concurrent.Future` as the return type. | +|-----|--------------------------------------------------------------------------------| +|**2**| Use a Java 8 `java.util.concurrent.CompletableFuture` as the return type. | +|**3**|Use a `org.springframework.util.concurrent.ListenableFuture` as the return type.| + +### 4.5. Creating Repository Instances + +This section covers how to create instances and bean definitions for the defined repository interfaces. One way to do so is by using the Spring namespace that is shipped with each Spring Data module that supports the repository mechanism, although we generally recommend using Java configuration. + +#### 4.5.1. XML Configuration + +Each Spring Data module includes a `repositories` element that lets you define a base package that Spring scans for you, as shown in the following example: + +Example 27. Enabling Spring Data repositories via XML + +``` + + + + + + +``` + +In the preceding example, Spring is instructed to scan `com.acme.repositories` and all its sub-packages for interfaces extending `Repository` or one of its sub-interfaces. +For each interface found, the infrastructure registers the persistence technology-specific `FactoryBean` to create the appropriate proxies that handle invocations of the query methods. +Each bean is registered under a bean name that is derived from the interface name, so an interface of `UserRepository` would be registered under `userRepository`. +Bean names for nested repository interfaces are prefixed with their enclosing type name. +The `base-package` attribute allows wildcards so that you can define a pattern of scanned packages. + +##### Using Filters + +By default, the infrastructure picks up every interface that extends the persistence technology-specific `Repository` sub-interface located under the configured base package and creates a bean instance for it. +However, you might want more fine-grained control over which interfaces have bean instances created for them. +To do so, use `` and `` elements inside the `` element. +The semantics are exactly equivalent to the elements in Spring’s context namespace. +For details, see the [Spring reference documentation](https://docs.spring.io/spring-framework/docs/5.3.16/reference/html/core.html#beans-scanning-filters) for these elements. + +For example, to exclude certain interfaces from instantiation as repository beans, you could use the following configuration: + +Example 28. Using exclude-filter element + +``` + + + +``` + +The preceding example excludes all interfaces ending in `SomeRepository` from being instantiated. + +#### 4.5.2. Java Configuration + +You can also trigger the repository infrastructure by using a store-specific `@Enable${store}Repositories` annotation on a Java configuration class. For an introduction to Java-based configuration of the Spring container, see [JavaConfig in the Spring reference documentation](https://docs.spring.io/spring-framework/docs/5.3.16/reference/html/core.html#beans-java). + +A sample configuration to enable Spring Data repositories resembles the following: + +Example 29. Sample annotation-based repository configuration + +``` +@Configuration +@EnableJpaRepositories("com.acme.repositories") +class ApplicationConfiguration { + + @Bean + EntityManagerFactory entityManagerFactory() { + // … + } +} +``` + +| |The preceding example uses the JPA-specific annotation, which you would change according to the store module you actually use. The same applies to the definition of the `EntityManagerFactory` bean. See the sections covering the store-specific configuration.| +|---|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +#### 4.5.3. Standalone Usage + +You can also use the repository infrastructure outside of a Spring container — for example, in CDI environments. You still need some Spring libraries in your classpath, but, generally, you can set up repositories programmatically as well. The Spring Data modules that provide repository support ship with a persistence technology-specific `RepositoryFactory` that you can use, as follows: + +Example 30. Standalone usage of the repository factory + +``` +RepositoryFactorySupport factory = … // Instantiate factory here +UserRepository repository = factory.getRepository(UserRepository.class); +``` + +### 4.6. Custom Implementations for Spring Data Repositories + +Spring Data provides various options to create query methods with little coding. +But when those options don’t fit your needs you can also provide your own custom implementation for repository methods. +This section describes how to do that. + +#### 4.6.1. Customizing Individual Repositories + +To enrich a repository with custom functionality, you must first define a fragment interface and an implementation for the custom functionality, as follows: + +Example 31. Interface for custom repository functionality + +``` +interface CustomizedUserRepository { + void someCustomMethod(User user); +} +``` + +Example 32. Implementation of custom repository functionality + +``` +class CustomizedUserRepositoryImpl implements CustomizedUserRepository { + + public void someCustomMethod(User user) { + // Your custom implementation + } +} +``` + +| |The most important part of the class name that corresponds to the fragment interface is the `Impl` postfix.| +|---|-----------------------------------------------------------------------------------------------------------| + +The implementation itself does not depend on Spring Data and can be a regular Spring bean.Consequently, you can use standard dependency injection behavior to inject references to other beans (such as a `JdbcTemplate`), take part in aspects, and so on. + +Then you can let your repository interface extend the fragment interface, as follows: + +Example 33. Changes to your repository interface + +``` +interface UserRepository extends CrudRepository, CustomizedUserRepository { + + // Declare query methods here +} +``` + +Extending the fragment interface with your repository interface combines the CRUD and custom functionality and makes it available to clients. + +Spring Data repositories are implemented by using fragments that form a repository composition. Fragments are the base repository, functional aspects (such as [QueryDsl](#core.extensions.querydsl)), and custom interfaces along with their implementations. Each time you add an interface to your repository interface, you enhance the composition by adding a fragment. The base repository and repository aspect implementations are provided by each Spring Data module. + +The following example shows custom interfaces and their implementations: + +Example 34. Fragments with their implementations + +``` +interface HumanRepository { + void someHumanMethod(User user); +} + +class HumanRepositoryImpl implements HumanRepository { + + public void someHumanMethod(User user) { + // Your custom implementation + } +} + +interface ContactRepository { + + void someContactMethod(User user); + + User anotherContactMethod(User user); +} + +class ContactRepositoryImpl implements ContactRepository { + + public void someContactMethod(User user) { + // Your custom implementation + } + + public User anotherContactMethod(User user) { + // Your custom implementation + } +} +``` + +The following example shows the interface for a custom repository that extends `CrudRepository`: + +Example 35. Changes to your repository interface + +``` +interface UserRepository extends CrudRepository, HumanRepository, ContactRepository { + + // Declare query methods here +} +``` + +Repositories may be composed of multiple custom implementations that are imported in the order of their declaration. Custom implementations have a higher priority than the base implementation and repository aspects. This ordering lets you override base repository and aspect methods and resolves ambiguity if two fragments contribute the same method signature. Repository fragments are not limited to use in a single repository interface. Multiple repositories may use a fragment interface, letting you reuse customizations across different repositories. + +The following example shows a repository fragment and its implementation: + +Example 36. Fragments overriding `save(…)` + +``` +interface CustomizedSave { + S save(S entity); +} + +class CustomizedSaveImpl implements CustomizedSave { + + public S save(S entity) { + // Your custom implementation + } +} +``` + +The following example shows a repository that uses the preceding repository fragment: + +Example 37. Customized repository interfaces + +``` +interface UserRepository extends CrudRepository, CustomizedSave { +} + +interface PersonRepository extends CrudRepository, CustomizedSave { +} +``` + +##### Configuration + +If you use namespace configuration, the repository infrastructure tries to autodetect custom implementation fragments by scanning for classes below the package in which it found a repository. +These classes need to follow the naming convention of appending the namespace element’s `repository-impl-postfix` attribute to the fragment interface name. +This postfix defaults to `Impl`. +The following example shows a repository that uses the default postfix and a repository that sets a custom value for the postfix: + +Example 38. Configuration example + +``` + + + +``` + +The first configuration in the preceding example tries to look up a class called `com.acme.repository.CustomizedUserRepositoryImpl` to act as a custom repository implementation. +The second example tries to look up `com.acme.repository.CustomizedUserRepositoryMyPostfix`. + +###### Resolution of Ambiguity + +If multiple implementations with matching class names are found in different packages, Spring Data uses the bean names to identify which one to use. + +Given the following two custom implementations for the `CustomizedUserRepository` shown earlier, the first implementation is used. +Its bean name is `customizedUserRepositoryImpl`, which matches that of the fragment interface (`CustomizedUserRepository`) plus the postfix `Impl`. + +Example 39. Resolution of ambiguous implementations + +``` +package com.acme.impl.one; + +class CustomizedUserRepositoryImpl implements CustomizedUserRepository { + + // Your custom implementation +} +``` + +``` +package com.acme.impl.two; + +@Component("specialCustomImpl") +class CustomizedUserRepositoryImpl implements CustomizedUserRepository { + + // Your custom implementation +} +``` + +If you annotate the `UserRepository` interface with `@Component("specialCustom")`, the bean name plus `Impl` then matches the one defined for the repository implementation in `com.acme.impl.two`, and it is used instead of the first one. + +###### Manual Wiring + +If your custom implementation uses annotation-based configuration and autowiring only, the preceding approach shown works well, because it is treated as any other Spring bean. +If your implementation fragment bean needs special wiring, you can declare the bean and name it according to the conventions described in the [preceding section](#repositories.single-repository-behaviour.ambiguity). +The infrastructure then refers to the manually defined bean definition by name instead of creating one itself. +The following example shows how to manually wire a custom implementation: + +Example 40. Manual wiring of custom implementations + +``` + + + + + +``` + +#### 4.6.2. Customize the Base Repository + +The approach described in the [preceding section](#repositories.manual-wiring) requires customization of each repository interfaces when you want to customize the base repository behavior so that all repositories are affected. +To instead change behavior for all repositories, you can create an implementation that extends the persistence technology-specific repository base class. +This class then acts as a custom base class for the repository proxies, as shown in the following example: + +Example 41. Custom repository base class + +``` +class MyRepositoryImpl + extends SimpleJpaRepository { + + private final EntityManager entityManager; + + MyRepositoryImpl(JpaEntityInformation entityInformation, + EntityManager entityManager) { + super(entityInformation, entityManager); + + // Keep the EntityManager around to used from the newly introduced methods. + this.entityManager = entityManager; + } + + @Transactional + public S save(S entity) { + // implementation goes here + } +} +``` + +| |The class needs to have a constructor of the super class which the store-specific repository factory implementation uses.
If the repository base class has multiple constructors, override the one taking an `EntityInformation` plus a store specific infrastructure object (such as an `EntityManager` or a template class).| +|---|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +The final step is to make the Spring Data infrastructure aware of the customized repository base class. +In Java configuration, you can do so by using the `repositoryBaseClass` attribute of the `@Enable${store}Repositories` annotation, as shown in the following example: + +Example 42. Configuring a custom repository base class using JavaConfig + +``` +@Configuration +@EnableJpaRepositories(repositoryBaseClass = MyRepositoryImpl.class) +class ApplicationConfiguration { … } +``` + +A corresponding attribute is available in the XML namespace, as shown in the following example: + +Example 43. Configuring a custom repository base class using XML + +``` + +``` + +### 4.7. Publishing Events from Aggregate Roots + +Entities managed by repositories are aggregate roots. +In a Domain-Driven Design application, these aggregate roots usually publish domain events. +Spring Data provides an annotation called `@DomainEvents` that you can use on a method of your aggregate root to make that publication as easy as possible, as shown in the following example: + +Example 44. Exposing domain events from an aggregate root + +``` +class AnAggregateRoot { + + @DomainEvents (1) + Collection domainEvents() { + // … return events you want to get published here + } + + @AfterDomainEventPublication (2) + void callbackMethod() { + // … potentially clean up domain events list + } +} +``` + +|**1**| The method that uses `@DomainEvents` can return either a single event instance or a collection of events.
It must not take any arguments. | +|-----|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +|**2**|After all events have been published, we have a method annotated with `@AfterDomainEventPublication`.
You can use it to potentially clean the list of events to be published (among other uses).| + +The methods are called every time one of a Spring Data repository’s `save(…)`, `saveAll(…)`, `delete(…)` or `deleteAll(…)` methods are called. + +### 4.8. Spring Data Extensions + +This section documents a set of Spring Data extensions that enable Spring Data usage in a variety of contexts. +Currently, most of the integration is targeted towards Spring MVC. + +#### 4.8.1. Querydsl Extension + +[Querydsl](http://www.querydsl.com/) is a framework that enables the construction of statically typed SQL-like queries through its fluent API. + +Several Spring Data modules offer integration with Querydsl through `QuerydslPredicateExecutor`, as the following example shows: + +Example 45. QuerydslPredicateExecutor interface + +``` +public interface QuerydslPredicateExecutor { + + Optional findById(Predicate predicate); (1) + + Iterable findAll(Predicate predicate); (2) + + long count(Predicate predicate); (3) + + boolean exists(Predicate predicate); (4) + + // … more functionality omitted. +} +``` + +|**1**| Finds and returns a single entity matching the `Predicate`. | +|-----|--------------------------------------------------------------| +|**2**| Finds and returns all entities matching the `Predicate`. | +|**3**| Returns the number of entities matching the `Predicate`. | +|**4**|Returns whether an entity that matches the `Predicate` exists.| + +To use the Querydsl support, extend `QuerydslPredicateExecutor` on your repository interface, as the following example shows: + +Example 46. Querydsl integration on repositories + +``` +interface UserRepository extends CrudRepository, QuerydslPredicateExecutor { +} +``` + +The preceding example lets you write type-safe queries by using Querydsl `Predicate` instances, as the following example shows: + +``` +Predicate predicate = user.firstname.equalsIgnoreCase("dave") + .and(user.lastname.startsWithIgnoreCase("mathews")); + +userRepository.findAll(predicate); +``` + +#### 4.8.2. Web support + +Spring Data modules that support the repository programming model ship with a variety of web support. +The web related components require Spring MVC JARs to be on the classpath. +Some of them even provide integration with [Spring HATEOAS](https://github.com/spring-projects/spring-hateoas). +In general, the integration support is enabled by using the `@EnableSpringDataWebSupport` annotation in your JavaConfig configuration class, as the following example shows: + +Example 47. Enabling Spring Data web support + +``` +@Configuration +@EnableWebMvc +@EnableSpringDataWebSupport +class WebConfiguration {} +``` + +The `@EnableSpringDataWebSupport` annotation registers a few components. +We discuss those later in this section. +It also detects Spring HATEOAS on the classpath and registers integration components (if present) for it as well. + +Alternatively, if you use XML configuration, register either `SpringDataWebConfiguration` or `HateoasAwareSpringDataWebConfiguration` as Spring beans, as the following example shows (for `SpringDataWebConfiguration`): + +Example 48. Enabling Spring Data web support in XML + +``` + + + + +``` + +##### Basic Web Support + +The configuration shown in the [previous section](#core.web) registers a few basic components: + +* A [Using the `DomainClassConverter` Class](#core.web.basic.domain-class-converter) to let Spring MVC resolve instances of repository-managed domain classes from request parameters or path variables. + +* [`HandlerMethodArgumentResolver`](#core.web.basic.paging-and-sorting) implementations to let Spring MVC resolve `Pageable` and `Sort` instances from request parameters. + +* [Jackson Modules](#core.web.basic.jackson-mappers) to de-/serialize types like `Point` and `Distance`, or store specific ones, depending on the Spring Data Module used. + +###### Using the `DomainClassConverter` Class + +The `DomainClassConverter` class lets you use domain types in your Spring MVC controller method signatures directly so that you need not manually lookup the instances through the repository, as the following example shows: + +Example 49. A Spring MVC controller using domain types in method signatures + +``` +@Controller +@RequestMapping("/users") +class UserController { + + @RequestMapping("/{id}") + String showUserForm(@PathVariable("id") User user, Model model) { + + model.addAttribute("user", user); + return "userForm"; + } +} +``` + +The method receives a `User` instance directly, and no further lookup is necessary. +The instance can be resolved by letting Spring MVC convert the path variable into the `id` type of the domain class first and eventually access the instance through calling `findById(…)` on the repository instance registered for the domain type. + +| |Currently, the repository has to implement `CrudRepository` to be eligible to be discovered for conversion.| +|---|-----------------------------------------------------------------------------------------------------------| + +###### HandlerMethodArgumentResolvers for Pageable and Sort + +The configuration snippet shown in the [previous section](#core.web.basic.domain-class-converter) also registers a `PageableHandlerMethodArgumentResolver` as well as an instance of `SortHandlerMethodArgumentResolver`. +The registration enables `Pageable` and `Sort` as valid controller method arguments, as the following example shows: + +Example 50. Using Pageable as a controller method argument + +``` +@Controller +@RequestMapping("/users") +class UserController { + + private final UserRepository repository; + + UserController(UserRepository repository) { + this.repository = repository; + } + + @RequestMapping + String showUsers(Model model, Pageable pageable) { + + model.addAttribute("users", repository.findAll(pageable)); + return "users"; + } +} +``` + +The preceding method signature causes Spring MVC try to derive a `Pageable` instance from the request parameters by using the following default configuration: + +|`page`| Page you want to retrieve. 0-indexed and defaults to 0. | +|------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +|`size`| Size of the page you want to retrieve. Defaults to 20. | +|`sort`|Properties that should be sorted by in the format `property,property(,ASC|DESC)(,IgnoreCase)`. The default sort direction is case-sensitive ascending. Use multiple `sort` parameters if you want to switch direction or case sensitivity — for example, `?sort=firstname&sort=lastname,asc&sort=city,ignorecase`.| + +To customize this behavior, register a bean that implements the `PageableHandlerMethodArgumentResolverCustomizer` interface or the `SortHandlerMethodArgumentResolverCustomizer` interface, respectively. +Its `customize()` method gets called, letting you change settings, as the following example shows: + +``` +@Bean SortHandlerMethodArgumentResolverCustomizer sortCustomizer() { + return s -> s.setPropertyDelimiter("<-->"); +} +``` + +If setting the properties of an existing `MethodArgumentResolver` is not sufficient for your purpose, extend either `SpringDataWebConfiguration` or the HATEOAS-enabled equivalent, override the `pageableResolver()` or `sortResolver()` methods, and import your customized configuration file instead of using the `@Enable` annotation. + +If you need multiple `Pageable` or `Sort` instances to be resolved from the request (for multiple tables, for example), you can use Spring’s `@Qualifier` annotation to distinguish one from another. +The request parameters then have to be prefixed with `${qualifier}_`. +The following example shows the resulting method signature: + +``` +String showUsers(Model model, + @Qualifier("thing1") Pageable first, + @Qualifier("thing2") Pageable second) { … } +``` + +You have to populate `thing1_page`, `thing2_page`, and so on. + +The default `Pageable` passed into the method is equivalent to a `PageRequest.of(0, 20)`, but you can customize it by using the `@PageableDefault` annotation on the `Pageable` parameter. + +##### Hypermedia Support for Pageables + +Spring HATEOAS ships with a representation model class (`PagedResources`) that allows enriching the content of a `Page` instance with the necessary `Page` metadata as well as links to let the clients easily navigate the pages. +The conversion of a `Page` to a `PagedResources` is done by an implementation of the Spring HATEOAS `ResourceAssembler` interface, called the `PagedResourcesAssembler`. +The following example shows how to use a `PagedResourcesAssembler` as a controller method argument: + +Example 51. Using a PagedResourcesAssembler as controller method argument + +``` +@Controller +class PersonController { + + @Autowired PersonRepository repository; + + @RequestMapping(value = "/persons", method = RequestMethod.GET) + HttpEntity> persons(Pageable pageable, + PagedResourcesAssembler assembler) { + + Page persons = repository.findAll(pageable); + return new ResponseEntity<>(assembler.toResources(persons), HttpStatus.OK); + } +} +``` + +Enabling the configuration, as shown in the preceding example, lets the `PagedResourcesAssembler` be used as a controller method argument. +Calling `toResources(…)` on it has the following effects: + +* The content of the `Page` becomes the content of the `PagedResources` instance. + +* The `PagedResources` object gets a `PageMetadata` instance attached, and it is populated with information from the `Page` and the underlying `PageRequest`. + +* The `PagedResources` may get `prev` and `next` links attached, depending on the page’s state. + The links point to the URI to which the method maps. + The pagination parameters added to the method match the setup of the `PageableHandlerMethodArgumentResolver` to make sure the links can be resolved later. + +Assume we have 30 `Person` instances in the database. +You can now trigger a request (`GET [http://localhost:8080/persons](http://localhost:8080/persons)`) and see output similar to the following: + +``` +{ "links" : [ { "rel" : "next", + "href" : "http://localhost:8080/persons?page=1&size=20" } + ], + "content" : [ + … // 20 Person instances rendered here + ], + "pageMetadata" : { + "size" : 20, + "totalElements" : 30, + "totalPages" : 2, + "number" : 0 + } +} +``` + +The assembler produced the correct URI and also picked up the default configuration to resolve the parameters into a `Pageable` for an upcoming request. +This means that, if you change that configuration, the links automatically adhere to the change. +By default, the assembler points to the controller method it was invoked in, but you can customize that by passing a custom `Link` to be used as base to build the pagination links, which overloads the `PagedResourcesAssembler.toResource(…)` method. + +##### Spring Data Jackson Modules + +The core module, and some of the store specific ones, ship with a set of Jackson Modules for types, like `org.springframework.data.geo.Distance` and `org.springframework.data.geo.Point`, used by the Spring Data domain. +Those Modules are imported once [web support](#core.web) is enabled and `com.fasterxml.jackson.databind.ObjectMapper` is available. + +During initialization `SpringDataJacksonModules`, like the `SpringDataJacksonConfiguration`, get picked up by the infrastructure, so that the declared `com.fasterxml.jackson.databind.Module`s are made available to the Jackson `ObjectMapper`. + +Data binding mixins for the following domain types are registered by the common infrastructure. + +``` +org.springframework.data.geo.Distance +org.springframework.data.geo.Point +org.springframework.data.geo.Box +org.springframework.data.geo.Circle +org.springframework.data.geo.Polygon +``` + +| |The individual module may provide additional `SpringDataJacksonModules`.
Please refer to the store specific section for more details.| +|---|-------------------------------------------------------------------------------------------------------------------------------------------| + +##### Web Databinding Support + +You can use Spring Data projections (described in [Projections](#projections)) to bind incoming request payloads by using either [JSONPath](https://goessner.net/articles/JsonPath/) expressions (requires [Jayway JsonPath](https://github.com/json-path/JsonPath) or [XPath](https://www.w3.org/TR/xpath-31/) expressions (requires [XmlBeam](https://xmlbeam.org/)), as the following example shows: + +Example 52. HTTP payload binding using JSONPath or XPath expressions + +``` +@ProjectedPayload +public interface UserPayload { + + @XBRead("//firstname") + @JsonPath("$..firstname") + String getFirstname(); + + @XBRead("/lastname") + @JsonPath({ "$.lastname", "$.user.lastname" }) + String getLastname(); +} +``` + +You can use the type shown in the preceding example as a Spring MVC handler method argument or by using `ParameterizedTypeReference` on one of methods of the `RestTemplate`. +The preceding method declarations would try to find `firstname` anywhere in the given document. +The `lastname` XML lookup is performed on the top-level of the incoming document. +The JSON variant of that tries a top-level `lastname` first but also tries `lastname` nested in a `user` sub-document if the former does not return a value. +That way, changes in the structure of the source document can be mitigated easily without having clients calling the exposed methods (usually a drawback of class-based payload binding). + +Nested projections are supported as described in [Projections](#projections). +If the method returns a complex, non-interface type, a Jackson `ObjectMapper` is used to map the final value. + +For Spring MVC, the necessary converters are registered automatically as soon as `@EnableSpringDataWebSupport` is active and the required dependencies are available on the classpath. +For usage with `RestTemplate`, register a `ProjectingJackson2HttpMessageConverter` (JSON) or `XmlBeamHttpMessageConverter` manually. + +For more information, see the [web projection example](https://github.com/spring-projects/spring-data-examples/tree/master/web/projection) in the canonical [Spring Data Examples repository](https://github.com/spring-projects/spring-data-examples). + +##### Querydsl Web Support + +For those stores that have [QueryDSL](http://www.querydsl.com/) integration, you can derive queries from the attributes contained in a `Request` query string. + +Consider the following query string: + +``` +?firstname=Dave&lastname=Matthews +``` + +Given the `User` object from the previous examples, you can resolve a query string to the following value by using the `QuerydslPredicateArgumentResolver`, as follows: + +``` +QUser.user.firstname.eq("Dave").and(QUser.user.lastname.eq("Matthews")) +``` + +| |The feature is automatically enabled, along with `@EnableSpringDataWebSupport`, when Querydsl is found on the classpath.| +|---|------------------------------------------------------------------------------------------------------------------------| + +Adding a `@QuerydslPredicate` to the method signature provides a ready-to-use `Predicate`, which you can run by using the `QuerydslPredicateExecutor`. + +| |Type information is typically resolved from the method’s return type.
Since that information does not necessarily match the domain type, it might be a good idea to use the `root` attribute of `QuerydslPredicate`.| +|---|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +The following example shows how to use `@QuerydslPredicate` in a method signature: + +``` +@Controller +class UserController { + + @Autowired UserRepository repository; + + @RequestMapping(value = "/", method = RequestMethod.GET) + String index(Model model, @QuerydslPredicate(root = User.class) Predicate predicate, (1) + Pageable pageable, @RequestParam MultiValueMap parameters) { + + model.addAttribute("users", repository.findAll(predicate, pageable)); + + return "index"; + } +} +``` + +|**1**|Resolve query string arguments to matching `Predicate` for `User`.| +|-----|------------------------------------------------------------------| + +The default binding is as follows: + +* `Object` on simple properties as `eq`. + +* `Object` on collection like properties as `contains`. + +* `Collection` on simple properties as `in`. + +You can customize those bindings through the `bindings` attribute of `@QuerydslPredicate` or by making use of Java 8 `default methods` and adding the `QuerydslBinderCustomizer` method to the repository interface, as follows: + +``` +interface UserRepository extends CrudRepository, + QuerydslPredicateExecutor, (1) + QuerydslBinderCustomizer { (2) + + @Override + default void customize(QuerydslBindings bindings, QUser user) { + + bindings.bind(user.username).first((path, value) -> path.contains(value)) (3) + bindings.bind(String.class) + .first((StringPath path, String value) -> path.containsIgnoreCase(value)); (4) + bindings.excluding(user.password); (5) + } +} +``` + +|**1**| `QuerydslPredicateExecutor` provides access to specific finder methods for `Predicate`. | +|-----|------------------------------------------------------------------------------------------------------------------------------------------| +|**2**|`QuerydslBinderCustomizer` defined on the repository interface is automatically picked up and shortcuts `@QuerydslPredicate(bindings=…​)`.| +|**3**| Define the binding for the `username` property to be a simple `contains` binding. | +|**4**| Define the default binding for `String` properties to be a case-insensitive `contains` match. | +|**5**| Exclude the `password` property from `Predicate` resolution. | + +| |You can register a `QuerydslBinderCustomizerDefaults` bean holding default Querydsl bindings before applying specific bindings from the repository or `@QuerydslPredicate`.| +|---|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +#### 4.8.3. Repository Populators + +If you work with the Spring JDBC module, you are probably familiar with the support for populating a `DataSource` with SQL scripts. +A similar abstraction is available on the repositories level, although it does not use SQL as the data definition language because it must be store-independent. +Thus, the populators support XML (through Spring’s OXM abstraction) and JSON (through Jackson) to define data with which to populate the repositories. + +Assume you have a file called `data.json` with the following content: + +Example 53. Data defined in JSON + +``` +[ { "_class" : "com.acme.Person", + "firstname" : "Dave", + "lastname" : "Matthews" }, + { "_class" : "com.acme.Person", + "firstname" : "Carter", + "lastname" : "Beauford" } ] +``` + +You can populate your repositories by using the populator elements of the repository namespace provided in Spring Data Commons. +To populate the preceding data to your `PersonRepository`, declare a populator similar to the following: + +Example 54. Declaring a Jackson repository populator + +``` + + + + + + +``` + +The preceding declaration causes the `data.json` file to be read and deserialized by a Jackson `ObjectMapper`. + +The type to which the JSON object is unmarshalled is determined by inspecting the `_class` attribute of the JSON document. +The infrastructure eventually selects the appropriate repository to handle the object that was deserialized. + +To instead use XML to define the data the repositories should be populated with, you can use the `unmarshaller-populator` element. +You configure it to use one of the XML marshaller options available in Spring OXM. See the [Spring reference documentation](https://docs.spring.io/spring-framework/docs/5.3.16/reference/html/data-access.html#oxm) for details. +The following example shows how to unmarshall a repository populator with JAXB: + +Example 55. Declaring an unmarshalling repository populator (using JAXB) + +``` + + + + + + + + +``` + +## 5. Projections + +Spring Data query methods usually return one or multiple instances of the aggregate root managed by the repository. +However, it might sometimes be desirable to create projections based on certain attributes of those types. +Spring Data allows modeling dedicated return types, to more selectively retrieve partial views of the managed aggregates. + +Imagine a repository and aggregate root type such as the following example: + +Example 56. A sample aggregate and repository + +``` +class Person { + + @Id UUID id; + String firstname, lastname; + Address address; + + static class Address { + String zipCode, city, street; + } +} + +interface PersonRepository extends Repository { + + Collection findByLastname(String lastname); +} +``` + +Now imagine that we want to retrieve the person’s name attributes only. +What means does Spring Data offer to achieve this? The rest of this chapter answers that question. + +### 5.1. Interface-based Projections + +The easiest way to limit the result of the queries to only the name attributes is by declaring an interface that exposes accessor methods for the properties to be read, as shown in the following example: + +Example 57. A projection interface to retrieve a subset of attributes + +``` +interface NamesOnly { + + String getFirstname(); + String getLastname(); +} +``` + +The important bit here is that the properties defined here exactly match properties in the aggregate root. +Doing so lets a query method be added as follows: + +Example 58. A repository using an interface based projection with a query method + +``` +interface PersonRepository extends Repository { + + Collection findByLastname(String lastname); +} +``` + +The query execution engine creates proxy instances of that interface at runtime for each element returned and forwards calls to the exposed methods to the target object. + +| |Declaring a method in your `Repository` that overrides a base method (e.g. declared in `CrudRepository`, a store-specific repository interface, or the `Simple…Repository`) results in a call to the base method regardless of the declared return type. Make sure to use a compatible return type as base methods cannot be used for projections. Some store modules support `@Query` annotations to turn an overridden base method into a query method that then can be used to return projections.| +|---|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +Projections can be used recursively. If you want to include some of the `Address` information as well, create a projection interface for that and return that interface from the declaration of `getAddress()`, as shown in the following example: + +Example 59. A projection interface to retrieve a subset of attributes + +``` +interface PersonSummary { + + String getFirstname(); + String getLastname(); + AddressSummary getAddress(); + + interface AddressSummary { + String getCity(); + } +} +``` + +On method invocation, the `address` property of the target instance is obtained and wrapped into a projecting proxy in turn. + +#### 5.1.1. Closed Projections + +A projection interface whose accessor methods all match properties of the target aggregate is considered to be a closed projection. The following example (which we used earlier in this chapter, too) is a closed projection: + +Example 60. A closed projection + +``` +interface NamesOnly { + + String getFirstname(); + String getLastname(); +} +``` + +If you use a closed projection, Spring Data can optimize the query execution, because we know about all the attributes that are needed to back the projection proxy. +For more details on that, see the module-specific part of the reference documentation. + +#### 5.1.2. Open Projections + +Accessor methods in projection interfaces can also be used to compute new values by using the `@Value` annotation, as shown in the following example: + +Example 61. An Open Projection + +``` +interface NamesOnly { + + @Value("#{target.firstname + ' ' + target.lastname}") + String getFullName(); + … +} +``` + +The aggregate root backing the projection is available in the `target` variable. +A projection interface using `@Value` is an open projection. +Spring Data cannot apply query execution optimizations in this case, because the SpEL expression could use any attribute of the aggregate root. + +The expressions used in `@Value` should not be too complex — you want to avoid programming in `String` variables. +For very simple expressions, one option might be to resort to default methods (introduced in Java 8), as shown in the following example: + +Example 62. A projection interface using a default method for custom logic + +``` +interface NamesOnly { + + String getFirstname(); + String getLastname(); + + default String getFullName() { + return getFirstname().concat(" ").concat(getLastname()); + } +} +``` + +This approach requires you to be able to implement logic purely based on the other accessor methods exposed on the projection interface. +A second, more flexible, option is to implement the custom logic in a Spring bean and then invoke that from the SpEL expression, as shown in the following example: + +Example 63. Sample Person object + +``` +@Component +class MyBean { + + String getFullName(Person person) { + … + } +} + +interface NamesOnly { + + @Value("#{@myBean.getFullName(target)}") + String getFullName(); + … +} +``` + +Notice how the SpEL expression refers to `myBean` and invokes the `getFullName(…)` method and forwards the projection target as a method parameter. +Methods backed by SpEL expression evaluation can also use method parameters, which can then be referred to from the expression. +The method parameters are available through an `Object` array named `args`. The following example shows how to get a method parameter from the `args` array: + +Example 64. Sample Person object + +``` +interface NamesOnly { + + @Value("#{args[0] + ' ' + target.firstname + '!'}") + String getSalutation(String prefix); +} +``` + +Again, for more complex expressions, you should use a Spring bean and let the expression invoke a method, as described [earlier](#projections.interfaces.open.bean-reference). + +#### 5.1.3. Nullable Wrappers + +Getters in projection interfaces can make use of nullable wrappers for improved null-safety. Currently supported wrapper types are: + +* `java.util.Optional` + +* `com.google.common.base.Optional` + +* `scala.Option` + +* `io.vavr.control.Option` + +Example 65. A projection interface using nullable wrappers + +``` +interface NamesOnly { + + Optional getFirstname(); +} +``` + +If the underlying projection value is not `null`, then values are returned using the present-representation of the wrapper type. +In case the backing value is `null`, then the getter method returns the empty representation of the used wrapper type. + +### 5.2. Class-based Projections (DTOs) + +Another way of defining projections is by using value type DTOs (Data Transfer Objects) that hold properties for the fields that are supposed to be retrieved. +These DTO types can be used in exactly the same way projection interfaces are used, except that no proxying happens and no nested projections can be applied. + +If the store optimizes the query execution by limiting the fields to be loaded, the fields to be loaded are determined from the parameter names of the constructor that is exposed. + +The following example shows a projecting DTO: + +Example 66. A projecting DTO + +``` +class NamesOnly { + + private final String firstname, lastname; + + NamesOnly(String firstname, String lastname) { + + this.firstname = firstname; + this.lastname = lastname; + } + + String getFirstname() { + return this.firstname; + } + + String getLastname() { + return this.lastname; + } + + // equals(…) and hashCode() implementations +} +``` + +| |Avoid boilerplate code for projection DTOs

You can dramatically simplify the code for a DTO by using [Project Lombok](https://projectlombok.org), which provides an `@Value` annotation (not to be confused with Spring’s `@Value` annotation shown in the earlier interface examples).
If you use Project Lombok’s `@Value` annotation, the sample DTO shown earlier would become the following:

```
@Value
class NamesOnly {
String firstname, lastname;
}
```

Fields are `private final` by default, and the class exposes a constructor that takes all fields and automatically gets `equals(…)` and `hashCode()` methods implemented.| +|---|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +### 5.3. Dynamic Projections + +So far, we have used the projection type as the return type or element type of a collection. +However, you might want to select the type to be used at invocation time (which makes it dynamic). +To apply dynamic projections, use a query method such as the one shown in the following example: + +Example 67. A repository using a dynamic projection parameter + +``` +interface PersonRepository extends Repository { + + Collection findByLastname(String lastname, Class type); +} +``` + +This way, the method can be used to obtain the aggregates as is or with a projection applied, as shown in the following example: + +Example 68. Using a repository with dynamic projections + +``` +void someMethod(PersonRepository people) { + + Collection aggregates = + people.findByLastname("Matthews", Person.class); + + Collection aggregates = + people.findByLastname("Matthews", NamesOnly.class); +} +``` + +| |Query parameters of type `Class` are inspected whether they qualify as dynamic projection parameter.
If the actual return type of the query equals the generic parameter type of the `Class` parameter, then the matching `Class` parameter is not available for usage within the query or SpEL expressions.
If you want to use a `Class` parameter as query argument then make sure to use a different generic parameter, for example `Class`.| +|---|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +## 6. Query by Example + +### 6.1. Introduction + +This chapter provides an introduction to Query by Example and explains how to use it. + +Query by Example (QBE) is a user-friendly querying technique with a simple interface. +It allows dynamic query creation and does not require you to write queries that contain field names. +In fact, Query by Example does not require you to write queries by using store-specific query languages at all. + +### 6.2. Usage + +The Query by Example API consists of three parts: + +* Probe: The actual example of a domain object with populated fields. + +* `ExampleMatcher`: The `ExampleMatcher` carries details on how to match particular fields. + It can be reused across multiple Examples. + +* `Example`: An `Example` consists of the probe and the `ExampleMatcher`. + It is used to create the query. + +Query by Example is well suited for several use cases: + +* Querying your data store with a set of static or dynamic constraints. + +* Frequent refactoring of the domain objects without worrying about breaking existing queries. + +* Working independently from the underlying data store API. + +Query by Example also has several limitations: + +* No support for nested or grouped property constraints, such as `firstname = ?0 or (firstname = ?1 and lastname = ?2)`. + +* Only supports starts/contains/ends/regex matching for strings and exact matching for other property types. + +Before getting started with Query by Example, you need to have a domain object. +To get started, create an interface for your repository, as shown in the following example: + +Example 69. Sample Person object + +``` +public class Person { + + @Id + private String id; + private String firstname; + private String lastname; + private Address address; + + // … getters and setters omitted +} +``` + +The preceding example shows a simple domain object. +You can use it to create an `Example`. +By default, fields having `null` values are ignored, and strings are matched by using the store specific defaults. + +| |Inclusion of properties into a Query by Example criteria is based on nullability. Properties using primitive types (`int`, `double`, …) are always included unless [ignoring the property path](#query-by-example.matchers).| +|---|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +Examples can be built by either using the `of` factory method or by using [`ExampleMatcher`](#query-by-example.matchers). `Example` is immutable. +The following listing shows a simple Example: + +Example 70. Simple Example + +``` +Person person = new Person(); (1) +person.setFirstname("Dave"); (2) + +Example example = Example.of(person); (3) +``` + +|**1**|Create a new instance of the domain object.| +|-----|-------------------------------------------| +|**2**| Set the properties to query. | +|**3**| Create the `Example`. | + +You can run the example queries by using repositories. +To do so, let your repository interface extend `QueryByExampleExecutor`. +The following listing shows an excerpt from the `QueryByExampleExecutor` interface: + +Example 71. The `QueryByExampleExecutor` + +``` +public interface QueryByExampleExecutor { + + S findOne(Example example); + + Iterable findAll(Example example); + + // … more functionality omitted. +} +``` + +### 6.3. Example Matchers + +Examples are not limited to default settings. +You can specify your own defaults for string matching, null handling, and property-specific settings by using the `ExampleMatcher`, as shown in the following example: + +Example 72. Example matcher with customized matching + +``` +Person person = new Person(); (1) +person.setFirstname("Dave"); (2) + +ExampleMatcher matcher = ExampleMatcher.matching() (3) + .withIgnorePaths("lastname") (4) + .withIncludeNullValues() (5) + .withStringMatcher(StringMatcher.ENDING); (6) + +Example example = Example.of(person, matcher); (7) +``` + +|**1**| Create a new instance of the domain object. | +|-----|---------------------------------------------------------------------------------------------------------------------------------------| +|**2**| Set properties. | +|**3**| Create an `ExampleMatcher` to expect all values to match.
It is usable at this stage even without further configuration. | +|**4**| Construct a new `ExampleMatcher` to ignore the `lastname` property path. | +|**5**| Construct a new `ExampleMatcher` to ignore the `lastname` property path and to include null values. | +|**6**|Construct a new `ExampleMatcher` to ignore the `lastname` property path, to include null values, and to perform suffix string matching.| +|**7**| Create a new `Example` based on the domain object and the configured `ExampleMatcher`. | + +By default, the `ExampleMatcher` expects all values set on the probe to match. +If you want to get results matching any of the predicates defined implicitly, use `ExampleMatcher.matchingAny()`. + +You can specify behavior for individual properties (such as "firstname" and "lastname" or, for nested properties, "address.city"). +You can tune it with matching options and case sensitivity, as shown in the following example: + +Example 73. Configuring matcher options + +``` +ExampleMatcher matcher = ExampleMatcher.matching() + .withMatcher("firstname", endsWith()) + .withMatcher("lastname", startsWith().ignoreCase()); +} +``` + +Another way to configure matcher options is to use lambdas (introduced in Java 8). +This approach creates a callback that asks the implementor to modify the matcher. +You need not return the matcher, because configuration options are held within the matcher instance. +The following example shows a matcher that uses lambdas: + +Example 74. Configuring matcher options with lambdas + +``` +ExampleMatcher matcher = ExampleMatcher.matching() + .withMatcher("firstname", match -> match.endsWith()) + .withMatcher("firstname", match -> match.startsWith()); +} +``` + +Queries created by `Example` use a merged view of the configuration. +Default matching settings can be set at the `ExampleMatcher` level, while individual settings can be applied to particular property paths. +Settings that are set on `ExampleMatcher` are inherited by property path settings unless they are defined explicitly. +Settings on a property patch have higher precedence than default settings. +The following table describes the scope of the various `ExampleMatcher` settings: + +| Setting | Scope | +|--------------------|----------------------------------| +| Null-handling | `ExampleMatcher` | +| String matching |`ExampleMatcher` and property path| +|Ignoring properties | Property path | +| Case sensitivity |`ExampleMatcher` and property path| +|Value transformation| Property path | + +## 7. Auditing + +### 7.1. Basics + +Spring Data provides sophisticated support to transparently keep track of who created or changed an entity and when the change happened. To benefit from that functionality, you have to equip your entity classes with auditing metadata that can be defined either using annotations or by implementing an interface. +Additionally, auditing has to be enabled either through Annotation configuration or XML configuration to register the required infrastructure components. +Please refer to the store-specific section for configuration samples. + +| |Applications that only track creation and modification dates do not need to specify an [`AuditorAware`](#auditing.auditor-aware).| +|---|---------------------------------------------------------------------------------------------------------------------------------| + +#### 7.1.1. Annotation-based Auditing Metadata + +We provide `@CreatedBy` and `@LastModifiedBy` to capture the user who created or modified the entity as well as `@CreatedDate` and `@LastModifiedDate` to capture when the change happened. + +Example 75. An audited entity + +``` +class Customer { + + @CreatedBy + private User user; + + @CreatedDate + private Instant createdDate; + + // … further properties omitted +} +``` + +As you can see, the annotations can be applied selectively, depending on which information you want to capture. The annotations capturing when changes were made can be used on properties of type Joda-Time, `DateTime`, legacy Java `Date` and `Calendar`, JDK8 date and time types, and `long` or `Long`. + +Auditing metadata does not necessarily need to live in the root level entity but can be added to an embedded one (depending on the actual store in use), as shown in the snipped below. + +Example 76. Audit metadata in embedded entity + +``` +class Customer { + + private AuditMetadata auditingMetadata; + + // … further properties omitted +} + +class AuditMetadata { + + @CreatedBy + private User user; + + @CreatedDate + private Instant createdDate; + +} +``` + +#### 7.1.2. Interface-based Auditing Metadata + +In case you do not want to use annotations to define auditing metadata, you can let your domain class implement the `Auditable` interface. It exposes setter methods for all of the auditing properties. + +#### 7.1.3. `AuditorAware` + +In case you use either `@CreatedBy` or `@LastModifiedBy`, the auditing infrastructure somehow needs to become aware of the current principal. To do so, we provide an `AuditorAware` SPI interface that you have to implement to tell the infrastructure who the current user or system interacting with the application is. The generic type `T` defines what type the properties annotated with `@CreatedBy` or `@LastModifiedBy` have to be. + +The following example shows an implementation of the interface that uses Spring Security’s `Authentication` object: + +Example 77. Implementation of `AuditorAware` based on Spring Security + +``` +class SpringSecurityAuditorAware implements AuditorAware { + + @Override + public Optional getCurrentAuditor() { + + return Optional.ofNullable(SecurityContextHolder.getContext()) + .map(SecurityContext::getAuthentication) + .filter(Authentication::isAuthenticated) + .map(Authentication::getPrincipal) + .map(User.class::cast); + } +} +``` + +The implementation accesses the `Authentication` object provided by Spring Security and looks up the custom `UserDetails` instance that you have created in your `UserDetailsService` implementation. We assume here that you are exposing the domain user through the `UserDetails` implementation but that, based on the `Authentication` found, you could also look it up from anywhere. + +#### 7.1.4. `ReactiveAuditorAware` + +When using reactive infrastructure you might want to make use of contextual information to provide `@CreatedBy` or `@LastModifiedBy` information. +We provide an `ReactiveAuditorAware` SPI interface that you have to implement to tell the infrastructure who the current user or system interacting with the application is. The generic type `T` defines what type the properties annotated with `@CreatedBy` or `@LastModifiedBy` have to be. + +The following example shows an implementation of the interface that uses reactive Spring Security’s `Authentication` object: + +Example 78. Implementation of `ReactiveAuditorAware` based on Spring Security + +``` +class SpringSecurityAuditorAware implements ReactiveAuditorAware { + + @Override + public Mono getCurrentAuditor() { + + return ReactiveSecurityContextHolder.getContext() + .map(SecurityContext::getAuthentication) + .filter(Authentication::isAuthenticated) + .map(Authentication::getPrincipal) + .map(User.class::cast); + } +} +``` + +The implementation accesses the `Authentication` object provided by Spring Security and looks up the custom `UserDetails` instance that you have created in your `UserDetailsService` implementation. We assume here that you are exposing the domain user through the `UserDetails` implementation but that, based on the `Authentication` found, you could also look it up from anywhere. + +## Appendices + +## Appendix A: Namespace reference + +### The `` Element + +The `` element triggers the setup of the Spring Data repository infrastructure. The most important attribute is `base-package`, which defines the package to scan for Spring Data repository interfaces. See “[XML Configuration](#repositories.create-instances.spring)”. The following table describes the attributes of the `` element: + +| Name | Description | +|------------------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| `base-package` |Defines the package to be scanned for repository interfaces that extend `*Repository` (the actual interface is determined by the specific Spring Data module) in auto-detection mode. All packages below the configured package are scanned, too. Wildcards are allowed.| +| `repository-impl-postfix` | Defines the postfix to autodetect custom repository implementations. Classes whose names end with the configured postfix are considered as candidates. Defaults to `Impl`. | +| `query-lookup-strategy` | Determines the strategy to be used to create finder queries. See “[Query Lookup Strategies](#repositories.query-methods.query-lookup-strategies)” for details. Defaults to `create-if-not-found`. | +| `named-queries-location` | Defines the location to search for a Properties file containing externally defined queries. | +|`consider-nested-repositories`| Whether nested repository interface definitions should be considered. Defaults to `false`. | + +## Appendix B: Populators namespace reference + +### The \ element + +The `` element allows to populate the a data store via the Spring Data repository infrastructure.[1] + +| Name | Description | +|-----------|----------------------------------------------------------------------------------------| +|`locations`|Where to find the files to read the objects from the repository shall be populated with.| + +## Appendix C: Repository query keywords + +### Supported query method subject keywords + +The following table lists the subject keywords generally supported by the Spring Data repository query derivation mechanism to express the predicate. +Consult the store-specific documentation for the exact list of supported keywords, because some keywords listed here might not be supported in a particular store. + +| Keyword | Description | +|--------------------------------------------------------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +|`find…By`, `read…By`, `get…By`, `query…By`, `search…By`, `stream…By`|General query method returning typically the repository type, a `Collection` or `Streamable` subtype or a result wrapper such as `Page`, `GeoResults` or any other store-specific result wrapper. Can be used as `findBy…`, `findMyDomainTypeBy…` or in combination with additional keywords.| +| `exists…By` | Exists projection, returning typically a `boolean` result. | +| `count…By` | Count projection returning a numeric result. | +| `delete…By`, `remove…By` | Delete query method returning either no result (`void`) or the delete count. | +| `…First…`, `…Top…` | Limit the query results to the first `` of results. This keyword can occur in any place of the subject between `find` (and the other keywords) and `by`. | +| `…Distinct…` | Use a distinct query to return only unique results. Consult the store-specific documentation whether that feature is supported. This keyword can occur in any place of the subject between `find` (and the other keywords) and `by`. | + +### Supported query method predicate keywords and modifiers + +The following table lists the predicate keywords generally supported by the Spring Data repository query derivation mechanism. +However, consult the store-specific documentation for the exact list of supported keywords, because some keywords listed here might not be supported in a particular store. + +| Logical keyword | Keyword expressions | +|---------------------|----------------------------------------------| +| `AND` | `And` | +| `OR` | `Or` | +| `AFTER` | `After`, `IsAfter` | +| `BEFORE` | `Before`, `IsBefore` | +| `CONTAINING` | `Containing`, `IsContaining`, `Contains` | +| `BETWEEN` | `Between`, `IsBetween` | +| `ENDING_WITH` | `EndingWith`, `IsEndingWith`, `EndsWith` | +| `EXISTS` | `Exists` | +| `FALSE` | `False`, `IsFalse` | +| `GREATER_THAN` | `GreaterThan`, `IsGreaterThan` | +|`GREATER_THAN_EQUALS`| `GreaterThanEqual`, `IsGreaterThanEqual` | +| `IN` | `In`, `IsIn` | +| `IS` | `Is`, `Equals`, (or no keyword) | +| `IS_EMPTY` | `IsEmpty`, `Empty` | +| `IS_NOT_EMPTY` | `IsNotEmpty`, `NotEmpty` | +| `IS_NOT_NULL` | `NotNull`, `IsNotNull` | +| `IS_NULL` | `Null`, `IsNull` | +| `LESS_THAN` | `LessThan`, `IsLessThan` | +| `LESS_THAN_EQUAL` | `LessThanEqual`, `IsLessThanEqual` | +| `LIKE` | `Like`, `IsLike` | +| `NEAR` | `Near`, `IsNear` | +| `NOT` | `Not`, `IsNot` | +| `NOT_IN` | `NotIn`, `IsNotIn` | +| `NOT_LIKE` | `NotLike`, `IsNotLike` | +| `REGEX` | `Regex`, `MatchesRegex`, `Matches` | +| `STARTING_WITH` |`StartingWith`, `IsStartingWith`, `StartsWith`| +| `TRUE` | `True`, `IsTrue` | +| `WITHIN` | `Within`, `IsWithin` | + +In addition to filter predicates, the following list of modifiers is supported: + +| Keyword | Description | +|----------------------------------|---------------------------------------------------------------------------------------------------------------------| +| `IgnoreCase`, `IgnoringCase` | Used with a predicate keyword for case-insensitive comparison. | +|`AllIgnoreCase`, `AllIgnoringCase`| Ignore case for all suitable properties. Used somewhere in the query method predicate. | +| `OrderBy…` |Specify a static sorting order followed by the property path and direction (e. g. `OrderByFirstnameAscLastnameDesc`).| + +## Appendix D: Repository query return types + +### Supported Query Return Types + +The following table lists the return types generally supported by Spring Data repositories. +However, consult the store-specific documentation for the exact list of supported return types, because some types listed here might not be supported in a particular store. + +| |Geospatial types (such as `GeoResult`, `GeoResults`, and `GeoPage`) are available only for data stores that support geospatial queries.
Some store modules may define their own result wrapper types.| +|---|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +| Return type | Description | +|------------------------------------------------------------------------------------------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| `void` | Denotes no return value. | +| Primitives | Java primitives. | +| Wrapper types | Java wrapper types. | +| `T` | A unique entity. Expects the query method to return one result at most. If no result is found, `null` is returned. More than one result triggers an `IncorrectResultSizeDataAccessException`. | +| `Iterator` | An `Iterator`. | +| `Collection` | A `Collection`. | +| `List` | A `List`. | +| `Optional` | A Java 8 or Guava `Optional`. Expects the query method to return one result at most. If no result is found, `Optional.empty()` or `Optional.absent()` is returned. More than one result triggers an `IncorrectResultSizeDataAccessException`. | +| `Option` | Either a Scala or Vavr `Option` type. Semantically the same behavior as Java 8’s `Optional`, described earlier. | +| `Stream` | A Java 8 `Stream`. | +| `Streamable` | A convenience extension of `Iterable` that directy exposes methods to stream, map and filter results, concatenate them etc. | +|Types that implement `Streamable` and take a `Streamable` constructor or factory method argument| Types that expose a constructor or `….of(…)`/`….valueOf(…)` factory method taking a `Streamable` as argument. See [Returning Custom Streamable Wrapper Types](#repositories.collections-and-iterables.streamable-wrapper) for details. | +| Vavr `Seq`, `List`, `Map`, `Set` | Vavr collection types. See [Support for Vavr Collections](#repositories.collections-and-iterables.vavr) for details. | +| `Future` | A `Future`. Expects a method to be annotated with `@Async` and requires Spring’s asynchronous method execution capability to be enabled. | +| `CompletableFuture` | A Java 8 `CompletableFuture`. Expects a method to be annotated with `@Async` and requires Spring’s asynchronous method execution capability to be enabled. | +| `ListenableFuture` | A `org.springframework.util.concurrent.ListenableFuture`. Expects a method to be annotated with `@Async` and requires Spring’s asynchronous method execution capability to be enabled. | +| `Slice` | A sized chunk of data with an indication of whether there is more data available. Requires a `Pageable` method parameter. | +| `Page` | A `Slice` with additional information, such as the total number of results. Requires a `Pageable` method parameter. | +| `GeoResult` | A result entry with additional information, such as the distance to a reference location. | +| `GeoResults` | A list of `GeoResult` with additional information, such as the average distance to a reference location. | +| `GeoPage` | A `Page` with `GeoResult`, such as the average distance to a reference location. | +| `Mono` |A Project Reactor `Mono` emitting zero or one element using reactive repositories. Expects the query method to return one result at most. If no result is found, `Mono.empty()` is returned. More than one result triggers an `IncorrectResultSizeDataAccessException`.| +| `Flux` | A Project Reactor `Flux` emitting zero, one, or many elements using reactive repositories. Queries returning `Flux` can emit also an infinite number of elements. | +| `Single` | A RxJava `Single` emitting a single element using reactive repositories. Expects the query method to return one result at most. If no result is found, `Mono.empty()` is returned. More than one result triggers an `IncorrectResultSizeDataAccessException`. | +| `Maybe` | A RxJava `Maybe` emitting zero or one element using reactive repositories. Expects the query method to return one result at most. If no result is found, `Mono.empty()` is returned. More than one result triggers an `IncorrectResultSizeDataAccessException`. | +| `Flowable` | A RxJava `Flowable` emitting zero, one, or many elements using reactive repositories. Queries returning `Flowable` can emit also an infinite number of elements. | + +--- + +[1](#_footnoteref_1). see [XML Configuration](#repositories.create-instances.spring) + diff --git a/docs/en/spring-flo/README.md b/docs/en/spring-flo/README.md new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/docs/en/spring-flo/spring-flo.md b/docs/en/spring-flo/spring-flo.md new file mode 100644 index 0000000000000000000000000000000000000000..c3d6a0ca910a8ee92cb63287412c17d0155b7585 --- /dev/null +++ b/docs/en/spring-flo/spring-flo.md @@ -0,0 +1,378 @@ +# Welcome to the Spring Flo wiki! +Spring Flo is a set of [Angular JS](https://angularjs.org/) directives for a diagram editor able to represent a DSL graphically and synchronize graphical and textual representation of that DSL. Graphical representation is done with a [Joint JS](http://jointjs.com/) graph object, textual representation can be either a plain HTML element (such as ` + +``` + +The HTML above translates into a page with toolbar for buttons (Layout and Show/Hide Palette), text area for DSL and the Flo editor for graph representation of the DSL. + +## All the extension points: + +### Metamodel Service +This service enables the domain in which Flo is being used to specify what kinds of element are being connected together in the graph and also how the graph should be converted to-and-from a textual representation. [Sample metamodel service is here](https://github.com/spring-projects/spring-flo/blob/master/samples/spring-flo-sample/src/main/resources/static/js/metamodel-service.js). +#### textToGraph(flo, definition) +Sets the graph contents for the `flo` object based on the textual representation of the DSL from `definition` object. Text is transformed into the corresponding Joint JS graph content. The graph is to be populated via `flo` objects functions such as `flo.createLink()` and `flo.createNode()` and cleared with `flo.clearGraph` +#### graphToText(flo, definition) +Convert the current graph available from the `flo` object into a textual representation which is then set (as the `text` property) on the `definition` object. +#### load() +Returns a promise that resolves to a `metamodel` object. The `metamodel` object layout is a map of element `group` names to a map of elements that belong to this `group`. The map of elements that belong to the `group` is a mapping between element's `name` and element's [Metadata Object](https://github.com/spring-projects/spring-flo/wiki#element-metadata) +#### refresh() _(Optional)_ +Refreshes the meta-model and returns a promise that is resolved to the same result as [load()](#load). Refresh should also fire event to `metamodel` change listeners. +#### encodeTextToDSL(text) _(Optional)_ +Encodes DSL element property value text to the DSL required format. Example is converting multiline text into a single line required by the DSL format. Used to display the property value in a human readable format. +#### decodeTextFromDSL(dsl) _(Optional)_ +Decodes DSL element property value text from DSL format. Example is converting single line text into a multiline text, i.e. replacing escaped line breaks. Used to set a property value for DSL element entered by the user via UI. +#### subscribe(listener) _(Optional)_ +Adds a listener to `metamodel` events. (See [Metamodel Listener](#metamodel-listener)) +#### unsubscribe(listener) _(Optional)_ +Removes `metamodel` events listener. (See [Metamodel Listener](#metamodel-listener)) +#### isValidPropertyValue(element, key, value) _(Optional)_ +Check if the the value being specified for the key on the specified element is allowed. For example: if the key takes an integer, don't allow alphabetic characters. + +### Render Service +The service is responsible for visual representation of graph elements based on the metadata (coming from [Metamodel Service](#metamodel-service)). This service is **optional**. [Sample render service is here](https://github.com/spring-projects/spring-flo/blob/master/samples/spring-flo-sample/src/main/resources/static/js/render-service.js). +#### createNode(metadata, properties) _(Optional)_ +Creates an instance of Joint JS graph node model object (`joint.dia.Element`). Parameters that may affect the kind of node model object are element's [metadata](#element-metadata) and map of properties (if any passed in). +#### createLink(source, target, metadata, properties) _(Optional)_ +Creates an instance of Joint JS graph link model object (`joint.dia.Link`). Parameters that may affect the kind of link model object are element's [metadata](#element-metadata), map of properties (if any passed in), source and target elements +#### createHandle(kind, parent) _(Optional)_ +Creates an instance of Joint JS graph node model object (`joint.dia.Element`). An example of a handle is a shape shown next to the parent shape interacting with which results in some editing action over the parent shape. Parameters that may affect the kind of handle model object are `kind` of type `string` (user defined, i.e. `delete`, `resize`, etc.) and handle's `parent` element. This function is only called by the framework if Editor Service `createHandles()` function is implemented. +#### createDecoration(kind, parent) _(Optional)_ +Creates an instance of Joint JS graph node model object (`joint.dia.Element`). An example of decoration is a validation marker displayed over the parent shape. Parameters that may affect the kind of decoration model object are `kind` of type `string` and decoration's `parent` element. Note that `kind` parameter is coming from the framework (unlike for `createHandle` function). This function is only called by the framework if Editor Service `validateNode()` function is implemented. (At the moment decorations are only the validation error markers). +#### initializeNewNode(node, context) _(Optional)_ +Performs any additional initialization of a newly created graph `node` when `node` is already added to the Joint JS graph and rendered on the canvas, e.g. element's SVG DOM structure is available. The `context` parameter is an object with `paper` and `graph` properties applicable for the `node`. Useful to perform any kind of initialization on a node when it's SVG DOM is appended to the page DOM. Examples: fit string label inside a shape, use angular directive on a shape, add DOM listeners etc. +#### initializeNewLink(link, context) _(Optional)_ +Performs any additional initialization of a newly created graph `link` when `link` is already added to the Joint JS graph and rendered on the canvas, e.g. element's SVG DOM structure is available. The `context` parameter is an object with `paper` and `graph` properties applicable for the `link`. Useful to perform any kind of initialization on a link when it's SVG DOM is appended to the page DOM. Examples: use angular directive on a shape, add DOM listeners etc. +#### initializeNewHandle(handle, context) _(Optional)_ +Performs any additional initialization of a newly created graph `handle` when `handle` is already added to the Joint JS graph and rendered on the canvas, e.g. element's SVG DOM structure is available. The `context` parameter is an object with `paper` and `graph` properties applicable for the `handle`. Useful to perform any kind of initialization on a handle shape when it's SVG DOM is appended to the page DOM. Examples: fit string label inside a shape, use angular directive on a shape, add DOM listeners etc. +#### initializeNewDecoration(decoration, context) _(Optional)_ +Performs any additional initialization of a newly created graph `decoration` when `decoration` is already added to the Joint JS graph and rendered on the canvas, e.g. element's SVG DOM structure is available. The `context` parameter is an object with `paper` and `graph` properties applicable for the `decoration`. Useful to perform any kind of initialization on a decoration shape when it's SVG DOM is appended to the page DOM. Examples: fit string label inside a shape, use angular directive on a shape, add DOM listeners etc. +#### getNodeView() _(Optional)_ +Returns instance of `joint.dia.ElementView`. It can also be a function of the form `function(element)` that takes an element model and should return an object responsible for rendering that model onto the screen. Under normal circumstances this function does not need to be implemented and the Joint JS view object created by the framework should be enough. Implement this function if different nodes require different Joint Js views or view has some special rendering (i.e. embedded HTML elements). See [Joint JS Paper Options](http://jointjs.com/api#joint.dia.Paper:options) +#### getLinkView() _(Optional)_ +Returns instance of Joint JS `joint.dia.LinkView`. Default is `joint.dia.LinkView`. It can also be a function of the form `function(link)` that takes a link model and should return an object responsible for rendering that model onto the screen. Under normal circumstances this function does not need to be implemented and the Joint JS view object created by the framework should be enough. Implement this function if different links require different Joint JS views or view has some special rendering (i.e. pattern applied to a line - `joint.shapes.flo.PatternLinkView`). See [Joint JS Paper Options](http://jointjs.com/api#joint.dia.Paper:options) +#### layout(paper) _(Optional)_ +Responsible for laying out the Joint JS graph that can be derived from passed in `paper` parameter (`paper.model`). +#### handleLinkEvent(paper, event, link) _(Optional)_ +Responsible for handling `event` that occurred on the `link` that belong to passed in Joint JS `paper` object. The `event` parameter is a `string` with possible values: `'add'`, `'remove'` or Joint JS native link change events such as `'change:source'`, `'change:target'`, etc. see [Joint JS Link Events](http://jointjs.com/api#joint.dia.Link:events) +#### isSemanticProperty(propertyPath, element) _(Optional)_ +Returns `true` for `string` property attribute path `propertyPath` on an `element` if graphs needs to perform some visual update based on `propertyPath` value change (Not needed for properties under `props` on an `element`). Visual update is performed by [refreshVisuals()](#refreshVisuals). The property path `propertyPath` is relative to Joint JS element `attrs` property +#### refreshVisuals(element, propertyPath, paper) _(Optional)_ +Performs some visual update of the graph or, which is more likely, the passed in `element` displayed on Joint JS `paper` based on the changed property specified by `propertyPath` +#### getLinkAnchorPoint(linkView, view, port, reference) _(Optional)_ +This function allows you to customize what are the anchor points of links. The function must return a point (with `x` and `y` properties) where the link anchors to the element. The function takes the link view, element view, the `port` (SVG element) the link should stick to and a reference point (either the closest vertex or the anchor point on the other side of the link). + +### Editor Service +The service responsible for providing Flo editor with rich editing capabilities such as handles around selected shapes, custom drag and drop behaviour, live and static validation. This service is **optional**. [Sample editor service is here](https://github.com/spring-projects/spring-flo/blob/master/samples/spring-flo-sample/src/main/resources/static/js/editor-service.js) +#### createHandles(flo, createHandle, selected) _(Optional)_ +Called when node is selected and handles can be displayed. Handles are usually small shapes around the `selected` Joint JS node in `flo` editor interactions with which modify properties on `selected` node, i.e. resize or delete handles. Call `createHandle(selected, kind, clickHandlerFunction, coordinate)` function to create a handle. The `kind` parameter is a `string` kind of a handle, `clickHandlerFunction` is performed when handle has been clicked on and `coordinate` is the place to put the handle shape. Note that if this function is implemented then Render Service `createHandle(...)` function must be implemented as well. The framework will remove handles automatically when needed, hence no need to worry about this on the client side. +#### validatePort(paper, view, portView) _(Optional)_ +Decide whether to create a link if the user clicks a port. The `portView` is the DOM element representing the port, `view` is the port's parent Joint JS view object show in Joint JS `paper` +#### validateLink(flo, cellViewS, portS, cellViewT, portT, end, linkView) _(Optional)_ +Decide whether to allow or disallow a connection between the source view/port (`cellViewS`/`portS`) and target view/port (`cellViewT`/`portT`). The `end` is either `'source'` or `'target'` and tells which end of the link is being dragged. This is useful for defining whether, for example, a link starting in a port POut of element A can lead to a port PIn of elmement B. +#### calculateDragDescriptor(flo, draggedView, targetUnderMouse, coordinate, context) _(Optional)_ +Called when dragging of a node `draggedView` is in progress over `targetUnderMouse` Joint JS graph element (node or link) at `coordinate`. There are also `flo` object parameter and `context` object, which currently just has a `boolean` property `palette` to denote whether drag and drop occurring on the palette or canvas. The function should return a [Drag Descriptor Object](#drag-descriptor). +#### handleNodeDropping(flo, dragDescriptor) _(Optional)_ +Performs necessary graph manipulations when the node being dragged is dropped. The `dragDescriptor` [Drag Descriptor](#drag-descriptor) should have the mandatory information on what is being dragged and where it's being dropped. The `flo` object parameter would help to make necessary graph modifications +#### showDragFeedback(flo, dragDescriptor) _(Optional)_ +Any custom visual feedback when dragging a node over some graph element (node or link) can be drawn by this function. `dragDescriptor` parameter has a [Drag Descriptor Object](#drag-descriptor) that has complete information about dragging in progress and `flo` object would help with drawing feedback using Joint JS +#### hideDragFeedback(flo, dragDescriptor) _(Optional)_ +Removes any custom visual feedback drawn by [showDragFeedback()](#show-drag-feedback). Has the same parameters. +#### validateNode(flo, node) _(Optional)_ +Returns a `javascript` array of `string` error messages that are the result of validating `node` Joint JS graph node on the canvas in `flo` editor +#### preDelete(flo, deletedElement) _(Optional)_ +Called prior to removal of the specified `deletedElement` allowing extra tidyup before that happens. For example: removes any dependent Joint JS graph elements related to the element about to be deleted. +#### interactive _(Optional)_ +If set to `false`, interaction with elements and links is disabled. If it is a function, it will be called with the cell view in action and the name of the method it is evaluated in (`'pointerdown'`, `'pointermove'`, ...). If the returned value of such a function is false interaction will be disabled for the action. For links, there are special properties of the interaction object that are useful to disable the default behaviour. These properties are: `vertexAdd`, `vertexMove`, `vertexRemove` and `arrowheadMove`. By setting any of these properties to false, you can disable the related default action on links. +#### allowLinkVertexEdit _(Optional)_ +If set to `false` link vertex (or bend point) creation or editing (e.g. movement) is not allowed in the editor. + +## Data structure reference: +### Flo +This object is created by the `flo-editor` directive controller and it contains various editor specific properties and functions. +#### scheduleUpdateGraphRepresentation() +Schedules an asynchronous update of the graph DSL representation based on the text DSL representation. +#### updateGraphRepresentation() +Asynchronously update the graph DSL representation based on the text DSL representation. A promise is returned which gets resolved when the update completes. +#### updateTextRepresentation() +Asynchronously update the text DSL representation (`definition` object) based on the graph DSL representation. A promise is returned which gets resolved when the update completes. +#### performLayout() +Arranges nodes and links of the graph on the canvas. +#### clearGraph() +Clears out canvas of all nodes and links. With syncing on this also causes the text DSL representation to clear. +#### getGraph() +Returns a reference to `joint.dia.Graph` object instance of the canvas contents (The graph model, see [Joint JS Graph API](http://jointjs.com/api#joint.dia.Graph) +#### getPaper() +Returns a reference to joint.dia.Paper object instance of the canvas (The graph view object, see [Joint JS Paper API](http://jointjs.com/api#joint.dia.Paper) +#### enableSyncing(enable) +Enables or disables textual and graph DSL representation synchronization mechanism based on the passed `boolean` parameter `enable`. Useful when textual DSL representation UI is collapsed. +#### getSelection() +Returns currently selected graph model element (node or link) on the canvas +#### zoomPercent(percent) +Angular getter/setter function for the zoom value on the canvas. Sets zoom percent value if the integer `number` parameter is supplied. Returns the integer percent value if parameter is missing (getter mode) +#### gridSize(gridSize) +Angular getter/setter function for the canvas grid size in pixels. Sets grid width value if the integer `number` parameter `gridSize` is supplied. Returns the current grid size value if parameter is missing (getter mode). Note that setting grid width to `1` turns the grid off. Invalid values for `gridSize` are ignored +#### getMinZoom() +Returns integer `number` minimum allowed value for the zoom percent. Useful to set the proper range for zoom controls. Needed by the zoom control on the canvas (if it is set to be shown). The value equals `5` by default (5%). +#### getMaxZoom() +Returns integer `number` maximum allowed value for the zoom percent. Useful to set the proper range for zoom controls. Needed by the zoom control on the canvas (if it is set to be shown). The value equals `400` by default (400%). +#### getZoomStep() +Returns integer `number` zoom percent increment/decrement step. Needed by the zoom control on the canvas (if it is set to be shown). The value equals `5` by default (5% increment/decrement value). +#### fitToPage() +Fits the whole graph into canvas's viewport (i.e. no need to scroll to look for content on the canvas). Adjusts the zoom level and scroll position appropriately +#### readOnlyCanvas(newValue) +Angular getter/setter function for the canvas "read-only" property. Read-only canvas does not allow for any user editing interaction of any shapes on the canvas. Sets the read-only property based on the passed in `newValue` parameter as the result the canvas toggles the behaviour for read-only state right away. Returns the current "read-only" state value if parameter is missing (getter mode). +#### createNode(metadata, properties, location) +Creates and returns the newly created Joint JS graph node (instance of `joint.dia.Element`) based on the graph node `metadata` object (see [Element Metadata](#element-metadata)), `properties` key-value pairs map, and location on the canvas (object with `x` and `y` properties). The new node is also added to the Flo canvas Joint JS `graph` and hence to the Joint JS `paper` and appears right away on the canvas before this function returns the result. +#### createLink(source, target, metadata, properties); +Creates and returns the newly created Joint JS graph link (instance of `joint.dia.Link`) between `source` and `target` nodes (of type `joint.dia.Element`) based on the graph link `metadata` object (see [Element Metadata](#element-metadata)), `properties` key-value pairs map. The new link is also added to the Flo canvas Joint JS `graph` and hence to the Joint JS `paper` and appears right away on the canvas before this function returns the result. + +### Definition +This object holds data related to DSL's textual representation. Typically this object should at least have `text` property of type `string` for the DSL text, but it can also have other properties that might be added by client's Metamodel Service graph-text conversion functions. + +### Metamodel Listener +Typically Metamodel object is loaded asynchronously via HTTP request. If metadata is cached by the service then it might be useful to register listeners. Flo editor palette would automatically rebuild itself if metamodel has changed +```javascript +{ + metadataError: function(data) { + /* Error loading metadata has occurred */ + }, + metadataRefresh: function() { + /* Metadata is about to be refreshed */ + }, + metadataChanged: function(data) { + /* New metadata is available */ + } +} +``` +### Drag Descriptor +API client is free to add extra properties to this object (i.e. may help drawing visual feedback) +```javascript +{ + context: context, /* String 'palette' or 'canvas' */ + source: { + cell: draggedNode, /* Joint JS graph node being dragged */ + selector: selector, /* Optional. Joint JS CSS class selector for the subelement of the dragged node*/, + port: portType /* Optional. Involved port DOM element type attribute value == port Joint JS markup 'type' property */ + }, + target: { + cell: targetNode, /* Joint JS graph node target under mouse element */ + selector: selector, /* Optional. Joint JS CSS class selector for the element under mouse within the targetNode */ + port: portType /* Optional. Sub-element under mouse is a port. Port DOM element type attribute value == port Joint JS markup 'type' property */ + }, +}; +``` + +### Joint JS Graph Node Markup +```javascript +model: /* Joint JS model object for a module shape */ + ... + attributes: + ... + angle: 0, /* Joint JS property - rotation angle */ + + id: "02be8001-ea1e-4f30-a94e-9503da5964b5" /* Joint JS property - element model UUID + + position: /* Joint JS property - coordinates of the shape's bounding rectangle */ + x: 119 + y: 46 + + size: /* Joint JS property - size of the shape's bounding rectangle */ + height: 40 + width: 120 + + type: "sinspctr.IntNode" /* Flo property - internal, type (node, link, handle, decoration, etc) */ + + z: 1 /* Joint JS property - z-index of the shape + + ports: /* Joint JS property - internal, ports available on the shape */ + input: + id: "input" + output: + id: "output" + tap: + id: "tap" + + attrs: /* Joint JS property - user defined rendering constructs and semantic properties */ + + . /*\ */ + .border /* \ */ + .box /* \ */ + .input-port /* \ */ + .label1 /* \___User defined rendering constructs implied by the markup */ + .label2 /* / */ + .output-port /* / */ + .shape /* / */ + .stream-label /* / */ + .tap-port /*/ */ + + metadata: /* Flo property. Node metadata supplied by Metamodel Service */ + + props: /* Flo property. Semantic properties of the element. Name <-> value pair map */ + dir: "/Users/x/tmp" + file: "temp.tmp" + debug: true + + ... + ... +... +``` + +### Element Metadata +Graphical element metadata supplied by Metamodel Service +```javascript +metadata: { + + get: function(), /* function taking property key string as a parameter */ + /* Returns promise that resolves to the metadata object of the property */ + /* See snippet below showing the format of a property metadata */ + + group: "source", /* Category/Group of an element. Translates into palette groups of elements */ + + name: "file", /* Name or Type of an element (should be unique within its group) */ + + metadata: { /* Additional metadata for the element */ + titleProperty: 'props/title', /* Property to be displayed at the top of all properties in properties Div */ + noEditableProps: false, /* If true then element doesn't have properties to edit and properties Div is not shown */ + allow-additional-properties: true, /* Allows user to create new properties for element in the properties Div */ + } + +} +``` +Element's property metadata is expected to be as follows +```javascript + properties: { + info: { + defaultValue: null, + description: "General information about the file", + id: "info", + name: "info", + shortDescription: "File Info" + }, + + language: { + defaultValue: "English" + description: "Language of the file contents", + id: "language", + name: "language", + shortDescription: "Text Language" + }, + ... +``` diff --git a/docs/en/spring-for-apache-kafka/README.md b/docs/en/spring-for-apache-kafka/README.md new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/docs/en/spring-for-apache-kafka/spring-kafka.md b/docs/en/spring-for-apache-kafka/spring-kafka.md new file mode 100644 index 0000000000000000000000000000000000000000..f8f4ad8c9ac116cf3ab384413136953f735f872e --- /dev/null +++ b/docs/en/spring-for-apache-kafka/spring-kafka.md @@ -0,0 +1,7929 @@ +# Spring for Apache Kafka + +## 1. Preface + +The Spring for Apache Kafka project applies core Spring concepts to the development of Kafka-based messaging solutions. +We provide a “template” as a high-level abstraction for sending messages. +We also provide support for Message-driven POJOs. + +## 2. What’s new? + +### 2.1. What’s New in 2.8 Since 2.7 + +This section covers the changes made from version 2.7 to version 2.8. +For changes in earlier version, see [[history]](#history). + +#### 2.1.1. Kafka Client Version + +This version requires the 3.0.0 `kafka-clients` + +| |When using transactions, `kafka-clients` 3.0.0 and later no longer support `EOSMode.V2` (aka `BETA`) (and automatic fallback to `V1` - aka `ALPHA`) with brokers earlier than 2.5; you must therefore override the default `EOSMode` (`V2`) with `V1` if your brokers are older (or upgrade your brokers).| +|---|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +See [Exactly Once Semantics](#exactly-once) and [KIP-447](https://cwiki.apache.org/confluence/display/KAFKA/KIP-447%3A+Producer+scalability+for+exactly+once+semantics) for more information. + +#### 2.1.2. Package Changes + +Classes and interfaces related to type mapping have been moved from `…​support.converter` to `…​support.mapping`. + +* `AbstractJavaTypeMapper` + +* `ClassMapper` + +* `DefaultJackson2JavaTypeMapper` + +* `Jackson2JavaTypeMapper` + +#### 2.1.3. Out of Order Manual Commits + +The listener container can now be configured to accept manual offset commits out of order (usually asynchronously). +The container will defer the commit until the missing offset is acknowledged. +See [Manually Committing Offsets](#ooo-commits) for more information. + +#### 2.1.4. `@KafkaListener` Changes + +It is now possible to specify whether the listener method is a batch listener on the method itself. +This allows the same container factory to be used for both record and batch listeners. + +See [Batch Listeners](#batch-listeners) for more information. + +Batch listeners can now handle conversion exceptions. + +See [Conversion Errors with Batch Error Handlers](#batch-listener-conv-errors) for more information. + +`RecordFilterStrategy`, when used with batch listeners, can now filter the entire batch in one call. +See the note at the end of [Batch Listeners](#batch-listeners) for more information. + +#### 2.1.5. `KafkaTemplate` Changes + +You can now receive a single record, given the topic, partition and offset. +See [Using `KafkaTemplate` to Receive](#kafka-template-receive) for more information. + +#### 2.1.6. `CommonErrorHandler` Added + +The legacy `GenericErrorHandler` and its sub-interface hierarchies for record an batch listeners have been replaced by a new single interface `CommonErrorHandler` with implementations corresponding to most legacy implementations of `GenericErrorHandler`. +See [Container Error Handlers](#error-handlers) for more information. + +#### 2.1.7. Listener Container Changes + +The `interceptBeforeTx` container property is now `true` by default. + +The `authorizationExceptionRetryInterval` property has been renamed to `authExceptionRetryInterval` and now applies to `AuthenticationException` s in addition to `AuthorizationException` s previously. +Both exceptions are considered fatal and the container will stop by default, unless this property is set. + +See [Using `KafkaMessageListenerContainer`](#kafka-container) and [Listener Container Properties](#container-props) for more information. + +#### 2.1.8. Serializer/Deserializer Changes + +The `DelegatingByTopicSerializer` and `DelegatingByTopicDeserializer` are now provided. +See [Delegating Serializer and Deserializer](#delegating-serialization) for more information. + +#### 2.1.9. `DeadLetterPublishingRecover` Changes + +The property `stripPreviousExceptionHeaders` is now `true` by default. + +See [Managing Dead Letter Record Headers](#dlpr-headers) for more information. + +#### 2.1.10. Retryable Topics Changes + +Now you can use the same factory for retryable and non-retryable topics. +See [Specifying a ListenerContainerFactory](#retry-topic-lcf) for more information. + +There’s now a manageable global list of fatal exceptions that will make the failed record go straight to the DLT. +Refer to [Exception Classifier](#retry-topic-ex-classifier) to see how to manage it. + +The KafkaBackOffException thrown when using the retryable topics feature is now logged at DEBUG level. +See [[change-kboe-logging-level]](#change-kboe-logging-level) if you need to change the logging level back to WARN or set it to any other level. + +## 3. Introduction + +This first part of the reference documentation is a high-level overview of Spring for Apache Kafka and the underlying concepts and some code snippets that can help you get up and running as quickly as possible. + +### 3.1. Quick Tour + +Prerequisites: You must install and run Apache Kafka. +Then you must put the Spring for Apache Kafka (`spring-kafka`) JAR and all of its dependencies on your class path. +The easiest way to do that is to declare a dependency in your build tool. + +If you are not using Spring Boot, declare the `spring-kafka` jar as a dependency in your project. + +Maven + +``` + + org.springframework.kafka + spring-kafka + 2.8.3 + +``` + +Gradle + +``` +compile 'org.springframework.kafka:spring-kafka:2.8.3' +``` + +| |When using Spring Boot, (and you haven’t used start.spring.io to create your project), omit the version and Boot will automatically bring in the correct version that is compatible with your Boot version:| +|---|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +Maven + +``` + + org.springframework.kafka + spring-kafka + +``` + +Gradle + +``` +compile 'org.springframework.kafka:spring-kafka' +``` + +However, the quickest way to get started is to use [start.spring.io](https://start.spring.io) (or the wizards in Spring Tool Suits and Intellij IDEA) and create a project, selecting 'Spring for Apache Kafka' as a dependency. + +#### 3.1.1. Compatibility + +This quick tour works with the following versions: + +* Apache Kafka Clients 3.0.0 + +* Spring Framework 5.3.x + +* Minimum Java version: 8 + +#### 3.1.2. Getting Started + +The simplest way to get started is to use [start.spring.io](https://start.spring.io) (or the wizards in Spring Tool Suits and Intellij IDEA) and create a project, selecting 'Spring for Apache Kafka' as a dependency. +Refer to the [Spring Boot documentation](https://docs.spring.io/spring-boot/docs/current/reference/html/spring-boot-features.html#boot-features-kafka) for more information about its opinionated auto configuration of the infrastructure beans. + +Here is a minimal consumer application. + +##### Spring Boot Consumer App + +Example 1. Application + +Java + +``` +@SpringBootApplication +public class Application { + + public static void main(String[] args) { + SpringApplication.run(Application.class, args); + } + + @Bean + public NewTopic topic() { + return TopicBuilder.name("topic1") + .partitions(10) + .replicas(1) + .build(); + } + + @KafkaListener(id = "myId", topics = "topic1") + public void listen(String in) { + System.out.println(in); + } + +} +``` + +Kotlin + +``` +@SpringBootApplication +class Application { + + @Bean + fun topic() = NewTopic("topic1", 10, 1) + + @KafkaListener(id = "myId", topics = ["topic1"]) + fun listen(value: String?) { + println(value) + } + +} + +fun main(args: Array) = runApplication(*args) +``` + +Example 2. application.properties + +``` +spring.kafka.consumer.auto-offset-reset=earliest +``` + +The `NewTopic` bean causes the topic to be created on the broker; it is not needed if the topic already exists. + +##### Spring Boot Producer App + +Example 3. Application + +Java + +``` +@SpringBootApplication +public class Application { + + public static void main(String[] args) { + SpringApplication.run(Application.class, args); + } + + @Bean + public NewTopic topic() { + return TopicBuilder.name("topic1") + .partitions(10) + .replicas(1) + .build(); + } + + @Bean + public ApplicationRunner runner(KafkaTemplate template) { + return args -> { + template.send("topic1", "test"); + }; + } + +} +``` + +Kotlin + +``` +@SpringBootApplication +class Application { + + @Bean + fun topic() = NewTopic("topic1", 10, 1) + + @Bean + fun runner(template: KafkaTemplate) = + ApplicationRunner { template.send("topic1", "test") } + + companion object { + @JvmStatic + fun main(args: Array) = runApplication(*args) + } + +} +``` + +##### + +| |Spring for Apache Kafka is designed to be used in a Spring Application Context.
For example, if you create the listener container yourself outside of a Spring context, not all functions will work unless you satisfy all of the `…​Aware` interfaces that the container implements.| +|---|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +Here is an example of an application that does not use Spring Boot; it has both a `Consumer` and `Producer`. + +Example 4. Without Boot + +Java + +``` +public class Sender { + + public static void main(String[] args) { + AnnotationConfigApplicationContext context = new AnnotationConfigApplicationContext(Config.class); + context.getBean(Sender.class).send("test", 42); + } + + private final KafkaTemplate template; + + public Sender(KafkaTemplate template) { + this.template = template; + } + + public void send(String toSend, int key) { + this.template.send("topic1", key, toSend); + } + +} + +public class Listener { + + @KafkaListener(id = "listen1", topics = "topic1") + public void listen1(String in) { + System.out.println(in); + } + +} + +@Configuration +@EnableKafka +public class Config { + + @Bean + ConcurrentKafkaListenerContainerFactory + kafkaListenerContainerFactory(ConsumerFactory consumerFactory) { + ConcurrentKafkaListenerContainerFactory factory = + new ConcurrentKafkaListenerContainerFactory<>(); + factory.setConsumerFactory(consumerFactory); + return factory; + } + + @Bean + public ConsumerFactory consumerFactory() { + return new DefaultKafkaConsumerFactory<>(consumerProps()); + } + + private Map consumerProps() { + Map props = new HashMap<>(); + props.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9092"); + props.put(ConsumerConfig.GROUP_ID_CONFIG, "group"); + props.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, IntegerDeserializer.class); + props.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class); + props.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest"); + // ... + return props; + } + + @Bean + public Sender sender(KafkaTemplate template) { + return new Sender(template); + } + + @Bean + public Listener listener() { + return new Listener(); + } + + @Bean + public ProducerFactory producerFactory() { + return new DefaultKafkaProducerFactory<>(senderProps()); + } + + private Map senderProps() { + Map props = new HashMap<>(); + props.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9092"); + props.put(ProducerConfig.LINGER_MS_CONFIG, 10); + props.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, IntegerSerializer.class); + props.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, StringSerializer.class); + //... + return props; + } + + @Bean + public KafkaTemplate kafkaTemplate(ProducerFactory producerFactory) { + return new KafkaTemplate(producerFactory); + } + +} +``` + +Kotlin + +``` +class Sender(private val template: KafkaTemplate) { + + fun send(toSend: String, key: Int) { + template.send("topic1", key, toSend) + } + +} + +class Listener { + + @KafkaListener(id = "listen1", topics = ["topic1"]) + fun listen1(`in`: String) { + println(`in`) + } + +} + +@Configuration +@EnableKafka +class Config { + + @Bean + fun kafkaListenerContainerFactory(consumerFactory: ConsumerFactory) = + ConcurrentKafkaListenerContainerFactory().also { it.consumerFactory = consumerFactory } + + @Bean + fun consumerFactory() = DefaultKafkaConsumerFactory(consumerProps) + + val consumerProps = mapOf( + ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG to "localhost:9092", + ConsumerConfig.GROUP_ID_CONFIG to "group", + ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG to IntegerDeserializer::class.java, + ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG to StringDeserializer::class.java, + ConsumerConfig.AUTO_OFFSET_RESET_CONFIG to "earliest" + ) + + @Bean + fun sender(template: KafkaTemplate) = Sender(template) + + @Bean + fun listener() = Listener() + + @Bean + fun producerFactory() = DefaultKafkaProducerFactory(senderProps) + + val senderProps = mapOf( + ProducerConfig.BOOTSTRAP_SERVERS_CONFIG to "localhost:9092", + ProducerConfig.LINGER_MS_CONFIG to 10, + ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG to IntegerSerializer::class.java, + ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG to StringSerializer::class.java + ) + + @Bean + fun kafkaTemplate(producerFactory: ProducerFactory) = KafkaTemplate(producerFactory) + +} +``` + +As you can see, you have to define several infrastructure beans when not using Spring Boot. + +## 4. Reference + +This part of the reference documentation details the various components that comprise Spring for Apache Kafka. +The [main chapter](#kafka) covers the core classes to develop a Kafka application with Spring. + +### 4.1. Using Spring for Apache Kafka + +This section offers detailed explanations of the various concerns that impact using Spring for Apache Kafka. +For a quick but less detailed introduction, see [Quick Tour](#quick-tour). + +#### 4.1.1. Connecting to Kafka + +* `KafkaAdmin` - see [Configuring Topics](#configuring-topics) + +* `ProducerFactory` - see [Sending Messages](#sending-messages) + +* `ConsumerFactory` - see [Receiving Messages](#receiving-messages) + +Starting with version 2.5, each of these extends `KafkaResourceFactory`. +This allows changing the bootstrap servers at runtime by adding a `Supplier` to their configuration: `setBootstrapServersSupplier(() → …​)`. +This will be called for all new connections to get the list of servers. +Consumers and Producers are generally long-lived. +To close existing Producers, call `reset()` on the `DefaultKafkaProducerFactory`. +To close existing Consumers, call `stop()` (and then `start()`) on the `KafkaListenerEndpointRegistry` and/or `stop()` and `start()` on any other listener container beans. + +For convenience, the framework also provides an `ABSwitchCluster` which supports two sets of bootstrap servers; one of which is active at any time. +Configure the `ABSwitchCluster` and add it to the producer and consumer factories, and the `KafkaAdmin`, by calling `setBootstrapServersSupplier()`. +When you want to switch, call `primary()` or `secondary()` and call `reset()` on the producer factory to establish new connection(s); for consumers, `stop()` and `start()` all listener containers. +When using `@KafkaListener` s, `stop()` and `start()` the `KafkaListenerEndpointRegistry` bean. + +See the Javadocs for more information. + +##### Factory Listeners + +Starting with version 2.5, the `DefaultKafkaProducerFactory` and `DefaultKafkaConsumerFactory` can be configured with a `Listener` to receive notifications whenever a producer or consumer is created or closed. + +Producer Factory Listener + +``` +interface Listener { + + default void producerAdded(String id, Producer producer) { + } + + default void producerRemoved(String id, Producer producer) { + } + +} +``` + +Consumer Factory Listener + +``` +interface Listener { + + default void consumerAdded(String id, Consumer consumer) { + } + + default void consumerRemoved(String id, Consumer consumer) { + } + +} +``` + +In each case, the `id` is created by appending the `client-id` property (obtained from the `metrics()` after creation) to the factory `beanName` property, separated by `.`. + +These listeners can be used, for example, to create and bind a Micrometer `KafkaClientMetrics` instance when a new client is created (and close it when the client is closed). + +The framework provides listeners that do exactly that; see [Micrometer Native Metrics](#micrometer-native). + +#### 4.1.2. Configuring Topics + +If you define a `KafkaAdmin` bean in your application context, it can automatically add topics to the broker. +To do so, you can add a `NewTopic` `@Bean` for each topic to the application context. +Version 2.3 introduced a new class `TopicBuilder` to make creation of such beans more convenient. +The following example shows how to do so: + +Java + +``` +@Bean +public KafkaAdmin admin() { + Map configs = new HashMap<>(); + configs.put(AdminClientConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9092"); + return new KafkaAdmin(configs); +} + +@Bean +public NewTopic topic1() { + return TopicBuilder.name("thing1") + .partitions(10) + .replicas(3) + .compact() + .build(); +} + +@Bean +public NewTopic topic2() { + return TopicBuilder.name("thing2") + .partitions(10) + .replicas(3) + .config(TopicConfig.COMPRESSION_TYPE_CONFIG, "zstd") + .build(); +} + +@Bean +public NewTopic topic3() { + return TopicBuilder.name("thing3") + .assignReplicas(0, Arrays.asList(0, 1)) + .assignReplicas(1, Arrays.asList(1, 2)) + .assignReplicas(2, Arrays.asList(2, 0)) + .config(TopicConfig.COMPRESSION_TYPE_CONFIG, "zstd") + .build(); +} +``` + +Kotlin + +``` +@Bean +fun admin() = KafkaAdmin(mapOf(AdminClientConfig.BOOTSTRAP_SERVERS_CONFIG to "localhost:9092")) + +@Bean +fun topic1() = + TopicBuilder.name("thing1") + .partitions(10) + .replicas(3) + .compact() + .build() + +@Bean +fun topic2() = + TopicBuilder.name("thing2") + .partitions(10) + .replicas(3) + .config(TopicConfig.COMPRESSION_TYPE_CONFIG, "zstd") + .build() + +@Bean +fun topic3() = + TopicBuilder.name("thing3") + .assignReplicas(0, Arrays.asList(0, 1)) + .assignReplicas(1, Arrays.asList(1, 2)) + .assignReplicas(2, Arrays.asList(2, 0)) + .config(TopicConfig.COMPRESSION_TYPE_CONFIG, "zstd") + .build() +``` + +Starting with version 2.6, you can omit `.partitions()` and/or `replicas()` and the broker defaults will be applied to those properties. +The broker version must be at least 2.4.0 to support this feature - see [KIP-464](https://cwiki.apache.org/confluence/display/KAFKA/KIP-464%3A+Defaults+for+AdminClient%23createTopic). + +Java + +``` +@Bean +public NewTopic topic4() { + return TopicBuilder.name("defaultBoth") + .build(); +} + +@Bean +public NewTopic topic5() { + return TopicBuilder.name("defaultPart") + .replicas(1) + .build(); +} + +@Bean +public NewTopic topic6() { + return TopicBuilder.name("defaultRepl") + .partitions(3) + .build(); +} +``` + +Kotlin + +``` +@Bean +fun topic4() = TopicBuilder.name("defaultBoth").build() + +@Bean +fun topic5() = TopicBuilder.name("defaultPart").replicas(1).build() + +@Bean +fun topic6() = TopicBuilder.name("defaultRepl").partitions(3).build() +``` + +Starting with version 2.7, you can declare multiple `NewTopic` s in a single `KafkaAdmin.NewTopics` bean definition: + +Java + +``` +@Bean +public KafkaAdmin.NewTopics topics456() { + return new NewTopics( + TopicBuilder.name("defaultBoth") + .build(), + TopicBuilder.name("defaultPart") + .replicas(1) + .build(), + TopicBuilder.name("defaultRepl") + .partitions(3) + .build()); +} +``` + +Kotlin + +``` +@Bean +fun topics456() = KafkaAdmin.NewTopics( + TopicBuilder.name("defaultBoth") + .build(), + TopicBuilder.name("defaultPart") + .replicas(1) + .build(), + TopicBuilder.name("defaultRepl") + .partitions(3) + .build() +) +``` + +| |When using Spring Boot, a `KafkaAdmin` bean is automatically registered so you only need the `NewTopic` (and/or `NewTopics`) `@Bean` s.| +|---|---------------------------------------------------------------------------------------------------------------------------------------| + +By default, if the broker is not available, a message is logged, but the context continues to load. +You can programmatically invoke the admin’s `initialize()` method to try again later. +If you wish this condition to be considered fatal, set the admin’s `fatalIfBrokerNotAvailable` property to `true`. +The context then fails to initialize. + +| |If the broker supports it (1.0.0 or higher), the admin increases the number of partitions if it is found that an existing topic has fewer partitions than the `NewTopic.numPartitions`.| +|---|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +Starting with version 2.7, the `KafkaAdmin` provides methods to create and examine topics at runtime. + +* `createOrModifyTopics` + +* `describeTopics` + +For more advanced features, you can use the `AdminClient` directly. +The following example shows how to do so: + +``` +@Autowired +private KafkaAdmin admin; + +... + + AdminClient client = AdminClient.create(admin.getConfigurationProperties()); + ... + client.close(); +``` + +#### 4.1.3. Sending Messages + +This section covers how to send messages. + +##### Using `KafkaTemplate` + +This section covers how to use `KafkaTemplate` to send messages. + +###### Overview + +The `KafkaTemplate` wraps a producer and provides convenience methods to send data to Kafka topics. +The following listing shows the relevant methods from `KafkaTemplate`: + +``` +ListenableFuture> sendDefault(V data); + +ListenableFuture> sendDefault(K key, V data); + +ListenableFuture> sendDefault(Integer partition, K key, V data); + +ListenableFuture> sendDefault(Integer partition, Long timestamp, K key, V data); + +ListenableFuture> send(String topic, V data); + +ListenableFuture> send(String topic, K key, V data); + +ListenableFuture> send(String topic, Integer partition, K key, V data); + +ListenableFuture> send(String topic, Integer partition, Long timestamp, K key, V data); + +ListenableFuture> send(ProducerRecord record); + +ListenableFuture> send(Message message); + +Map metrics(); + +List partitionsFor(String topic); + + T execute(ProducerCallback callback); + +// Flush the producer. + +void flush(); + +interface ProducerCallback { + + T doInKafka(Producer producer); + +} +``` + +See the [Javadoc](https://docs.spring.io/spring-kafka/api/org/springframework/kafka/core/KafkaTemplate.html) for more detail. + +The `sendDefault` API requires that a default topic has been provided to the template. + +The API takes in a `timestamp` as a parameter and stores this timestamp in the record. +How the user-provided timestamp is stored depends on the timestamp type configured on the Kafka topic. +If the topic is configured to use `CREATE_TIME`, the user specified timestamp is recorded (or generated if not specified). +If the topic is configured to use `LOG_APPEND_TIME`, the user-specified timestamp is ignored and the broker adds in the local broker time. + +The `metrics` and `partitionsFor` methods delegate to the same methods on the underlying [`Producer`](https://kafka.apache.org/20/javadoc/org/apache/kafka/clients/producer/Producer.html). +The `execute` method provides direct access to the underlying [`Producer`](https://kafka.apache.org/20/javadoc/org/apache/kafka/clients/producer/Producer.html). + +To use the template, you can configure a producer factory and provide it in the template’s constructor. +The following example shows how to do so: + +``` +@Bean +public ProducerFactory producerFactory() { + return new DefaultKafkaProducerFactory<>(producerConfigs()); +} + +@Bean +public Map producerConfigs() { + Map props = new HashMap<>(); + props.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9092"); + props.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, StringSerializer.class); + props.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, StringSerializer.class); + // See https://kafka.apache.org/documentation/#producerconfigs for more properties + return props; +} + +@Bean +public KafkaTemplate kafkaTemplate() { + return new KafkaTemplate(producerFactory()); +} +``` + +Starting with version 2.5, you can now override the factory’s `ProducerConfig` properties to create templates with different producer configurations from the same factory. + +``` +@Bean +public KafkaTemplate stringTemplate(ProducerFactory pf) { + return new KafkaTemplate<>(pf); +} + +@Bean +public KafkaTemplate bytesTemplate(ProducerFactory pf) { + return new KafkaTemplate<>(pf, + Collections.singletonMap(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, ByteArraySerializer.class)); +} +``` + +Note that a bean of type `ProducerFactory` (such as the one auto-configured by Spring Boot) can be referenced with different narrowed generic types. + +You can also configure the template by using standard `` definitions. + +Then, to use the template, you can invoke one of its methods. + +When you use the methods with a `Message` parameter, the topic, partition, and key information is provided in a message header that includes the following items: + +* `KafkaHeaders.TOPIC` + +* `KafkaHeaders.PARTITION_ID` + +* `KafkaHeaders.MESSAGE_KEY` + +* `KafkaHeaders.TIMESTAMP` + +The message payload is the data. + +Optionally, you can configure the `KafkaTemplate` with a `ProducerListener` to get an asynchronous callback with the results of the send (success or failure) instead of waiting for the `Future` to complete. +The following listing shows the definition of the `ProducerListener` interface: + +``` +public interface ProducerListener { + + void onSuccess(ProducerRecord producerRecord, RecordMetadata recordMetadata); + + void onError(ProducerRecord producerRecord, RecordMetadata recordMetadata, + Exception exception); + +} +``` + +By default, the template is configured with a `LoggingProducerListener`, which logs errors and does nothing when the send is successful. + +For convenience, default method implementations are provided in case you want to implement only one of the methods. + +Notice that the send methods return a `ListenableFuture`. +You can register a callback with the listener to receive the result of the send asynchronously. +The following example shows how to do so: + +``` +ListenableFuture> future = template.send("myTopic", "something"); +future.addCallback(new ListenableFutureCallback>() { + + @Override + public void onSuccess(SendResult result) { + ... + } + + @Override + public void onFailure(Throwable ex) { + ... + } + +}); +``` + +`SendResult` has two properties, a `ProducerRecord` and `RecordMetadata`. +See the Kafka API documentation for information about those objects. + +The `Throwable` in `onFailure` can be cast to a `KafkaProducerException`; its `failedProducerRecord` property contains the failed record. + +Starting with version 2.5, you can use a `KafkaSendCallback` instead of a `ListenableFutureCallback`, making it easier to extract the failed `ProducerRecord`, avoiding the need to cast the `Throwable`: + +``` +ListenableFuture> future = template.send("topic", 1, "thing"); +future.addCallback(new KafkaSendCallback() { + + @Override + public void onSuccess(SendResult result) { + ... + } + + @Override + public void onFailure(KafkaProducerException ex) { + ProducerRecord failed = ex.getFailedProducerRecord(); + ... + } + +}); +``` + +You can also use a pair of lambdas: + +``` +ListenableFuture> future = template.send("topic", 1, "thing"); +future.addCallback(result -> { + ... + }, (KafkaFailureCallback) ex -> { + ProducerRecord failed = ex.getFailedProducerRecord(); + ... + }); +``` + +If you wish to block the sending thread to await the result, you can invoke the future’s `get()` method; using the method with a timeout is recommended. +You may wish to invoke `flush()` before waiting or, for convenience, the template has a constructor with an `autoFlush` parameter that causes the template to `flush()` on each send. +Flushing is only needed if you have set the `linger.ms` producer property and want to immediately send a partial batch. + +###### Examples + +This section shows examples of sending messages to Kafka: + +Example 5. Non Blocking (Async) + +``` +public void sendToKafka(final MyOutputData data) { + final ProducerRecord record = createRecord(data); + + ListenableFuture> future = template.send(record); + future.addCallback(new KafkaSendCallback() { + + @Override + public void onSuccess(SendResult result) { + handleSuccess(data); + } + + @Override + public void onFailure(KafkaProducerException ex) { + handleFailure(data, record, ex); + } + + }); +} +``` + +Blocking (Sync) + +``` +public void sendToKafka(final MyOutputData data) { + final ProducerRecord record = createRecord(data); + + try { + template.send(record).get(10, TimeUnit.SECONDS); + handleSuccess(data); + } + catch (ExecutionException e) { + handleFailure(data, record, e.getCause()); + } + catch (TimeoutException | InterruptedException e) { + handleFailure(data, record, e); + } +} +``` + +Note that the cause of the `ExecutionException` is `KafkaProducerException` with the `failedProducerRecord` property. + +##### Using `RoutingKafkaTemplate` + +Starting with version 2.5, you can use a `RoutingKafkaTemplate` to select the producer at runtime, based on the destination `topic` name. + +| |The routing template does **not** support transactions, `execute`, `flush`, or `metrics` operations because the topic is not known for those operations.| +|---|--------------------------------------------------------------------------------------------------------------------------------------------------------| + +The template requires a map of `java.util.regex.Pattern` to `ProducerFactory` instances. +This map should be ordered (e.g. a `LinkedHashMap`) because it is traversed in order; you should add more specific patterns at the beginning. + +The following simple Spring Boot application provides an example of how to use the same template to send to different topics, each using a different value serializer. + +``` +@SpringBootApplication +public class Application { + + public static void main(String[] args) { + SpringApplication.run(Application.class, args); + } + + @Bean + public RoutingKafkaTemplate routingTemplate(GenericApplicationContext context, + ProducerFactory pf) { + + // Clone the PF with a different Serializer, register with Spring for shutdown + Map configs = new HashMap<>(pf.getConfigurationProperties()); + configs.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, ByteArraySerializer.class); + DefaultKafkaProducerFactory bytesPF = new DefaultKafkaProducerFactory<>(configs); + context.registerBean(DefaultKafkaProducerFactory.class, "bytesPF", bytesPF); + + Map> map = new LinkedHashMap<>(); + map.put(Pattern.compile("two"), bytesPF); + map.put(Pattern.compile(".+"), pf); // Default PF with StringSerializer + return new RoutingKafkaTemplate(map); + } + + @Bean + public ApplicationRunner runner(RoutingKafkaTemplate routingTemplate) { + return args -> { + routingTemplate.send("one", "thing1"); + routingTemplate.send("two", "thing2".getBytes()); + }; + } + +} +``` + +The corresponding `@KafkaListener` s for this example are shown in [Annotation Properties](#annotation-properties). + +For another technique to achieve similar results, but with the additional capability of sending different types to the same topic, see [Delegating Serializer and Deserializer](#delegating-serialization). + +##### Using `DefaultKafkaProducerFactory` + +As seen in [Using `KafkaTemplate`](#kafka-template), a `ProducerFactory` is used to create the producer. + +When not using [Transactions](#transactions), by default, the `DefaultKafkaProducerFactory` creates a singleton producer used by all clients, as recommended in the `KafkaProducer` javadocs. +However, if you call `flush()` on the template, this can cause delays for other threads using the same producer. +Starting with version 2.3, the `DefaultKafkaProducerFactory` has a new property `producerPerThread`. +When set to `true`, the factory will create (and cache) a separate producer for each thread, to avoid this issue. + +| |When `producerPerThread` is `true`, user code **must** call `closeThreadBoundProducer()` on the factory when the producer is no longer needed.
This will physically close the producer and remove it from the `ThreadLocal`.
Calling `reset()` or `destroy()` will not clean up these producers.| +|---|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +Also see [`KafkaTemplate` Transactional and non-Transactional Publishing](#tx-template-mixed). + +When creating a `DefaultKafkaProducerFactory`, key and/or value `Serializer` classes can be picked up from configuration by calling the constructor that only takes in a Map of properties (see example in [Using `KafkaTemplate`](#kafka-template)), or `Serializer` instances may be passed to the `DefaultKafkaProducerFactory` constructor (in which case all `Producer` s share the same instances). +Alternatively you can provide `Supplier` s (starting with version 2.3) that will be used to obtain separate `Serializer` instances for each `Producer`: + +``` +@Bean +public ProducerFactory producerFactory() { + return new DefaultKafkaProducerFactory<>(producerConfigs(), null, () -> new CustomValueSerializer()); +} + +@Bean +public KafkaTemplate kafkaTemplate() { + return new KafkaTemplate(producerFactory()); +} +``` + +Starting with version 2.5.10, you can now update the producer properties after the factory is created. +This might be useful, for example, if you have to update SSL key/trust store locations after a credentials change. +The changes will not affect existing producer instances; call `reset()` to close any existing producers so that new producers will be created using the new properties. +NOTE: You cannot change a transactional producer factory to non-transactional, and vice-versa. + +Two new methods are now provided: + +``` +void updateConfigs(Map updates); + +void removeConfig(String configKey); +``` + +Starting with version 2.8, if you provide serializers as objects (in the constructor or via the setters), the factory will invoke the `configure()` method to configure them with the configuration properties. + +##### Using `ReplyingKafkaTemplate` + +Version 2.1.3 introduced a subclass of `KafkaTemplate` to provide request/reply semantics. +The class is named `ReplyingKafkaTemplate` and has two additional methods; the following shows the method signatures: + +``` +RequestReplyFuture sendAndReceive(ProducerRecord record); + +RequestReplyFuture sendAndReceive(ProducerRecord record, + Duration replyTimeout); +``` + +(Also see [Request/Reply with `Message` s](#exchanging-messages)). + +The result is a `ListenableFuture` that is asynchronously populated with the result (or an exception, for a timeout). +The result also has a `sendFuture` property, which is the result of calling `KafkaTemplate.send()`. +You can use this future to determine the result of the send operation. + +If the first method is used, or the `replyTimeout` argument is `null`, the template’s `defaultReplyTimeout` property is used (5 seconds by default). + +The following Spring Boot application shows an example of how to use the feature: + +``` +@SpringBootApplication +public class KRequestingApplication { + + public static void main(String[] args) { + SpringApplication.run(KRequestingApplication.class, args).close(); + } + + @Bean + public ApplicationRunner runner(ReplyingKafkaTemplate template) { + return args -> { + ProducerRecord record = new ProducerRecord<>("kRequests", "foo"); + RequestReplyFuture replyFuture = template.sendAndReceive(record); + SendResult sendResult = replyFuture.getSendFuture().get(10, TimeUnit.SECONDS); + System.out.println("Sent ok: " + sendResult.getRecordMetadata()); + ConsumerRecord consumerRecord = replyFuture.get(10, TimeUnit.SECONDS); + System.out.println("Return value: " + consumerRecord.value()); + }; + } + + @Bean + public ReplyingKafkaTemplate replyingTemplate( + ProducerFactory pf, + ConcurrentMessageListenerContainer repliesContainer) { + + return new ReplyingKafkaTemplate<>(pf, repliesContainer); + } + + @Bean + public ConcurrentMessageListenerContainer repliesContainer( + ConcurrentKafkaListenerContainerFactory containerFactory) { + + ConcurrentMessageListenerContainer repliesContainer = + containerFactory.createContainer("kReplies"); + repliesContainer.getContainerProperties().setGroupId("repliesGroup"); + repliesContainer.setAutoStartup(false); + return repliesContainer; + } + + @Bean + public NewTopic kRequests() { + return TopicBuilder.name("kRequests") + .partitions(10) + .replicas(2) + .build(); + } + + @Bean + public NewTopic kReplies() { + return TopicBuilder.name("kReplies") + .partitions(10) + .replicas(2) + .build(); + } + +} +``` + +Note that we can use Boot’s auto-configured container factory to create the reply container. + +If a non-trivial deserializer is being used for replies, consider using an [`ErrorHandlingDeserializer`](#error-handling-deserializer) that delegates to your configured deserializer. +When so configured, the `RequestReplyFuture` will be completed exceptionally and you can catch the `ExecutionException`, with the `DeserializationException` in its `cause` property. + +Starting with version 2.6.7, in addition to detecting `DeserializationException` s, the template will call the `replyErrorChecker` function, if provided. +If it returns an exception, the future will be completed exceptionally. + +Here is an example: + +``` +template.setReplyErrorChecker(record -> { + Header error = record.headers().lastHeader("serverSentAnError"); + if (error != null) { + return new MyException(new String(error.value())); + } + else { + return null; + } +}); + +... + +RequestReplyFuture future = template.sendAndReceive(record); +try { + future.getSendFuture().get(10, TimeUnit.SECONDS); // send ok + ConsumerRecord consumerRecord = future.get(10, TimeUnit.SECONDS); + ... +} +catch (InterruptedException e) { + ... +} +catch (ExecutionException e) { + if (e.getCause instanceof MyException) { + ... + } +} +catch (TimeoutException e) { + ... +} +``` + +The template sets a header (named `KafkaHeaders.CORRELATION_ID` by default), which must be echoed back by the server side. + +In this case, the following `@KafkaListener` application responds: + +``` +@SpringBootApplication +public class KReplyingApplication { + + public static void main(String[] args) { + SpringApplication.run(KReplyingApplication.class, args); + } + + @KafkaListener(id="server", topics = "kRequests") + @SendTo // use default replyTo expression + public String listen(String in) { + System.out.println("Server received: " + in); + return in.toUpperCase(); + } + + @Bean + public NewTopic kRequests() { + return TopicBuilder.name("kRequests") + .partitions(10) + .replicas(2) + .build(); + } + + @Bean // not required if Jackson is on the classpath + public MessagingMessageConverter simpleMapperConverter() { + MessagingMessageConverter messagingMessageConverter = new MessagingMessageConverter(); + messagingMessageConverter.setHeaderMapper(new SimpleKafkaHeaderMapper()); + return messagingMessageConverter; + } + +} +``` + +The `@KafkaListener` infrastructure echoes the correlation ID and determines the reply topic. + +See [Forwarding Listener Results using `@SendTo`](#annotation-send-to) for more information about sending replies. +The template uses the default header `KafKaHeaders.REPLY_TOPIC` to indicate the topic to which the reply goes. + +Starting with version 2.2, the template tries to detect the reply topic or partition from the configured reply container. +If the container is configured to listen to a single topic or a single `TopicPartitionOffset`, it is used to set the reply headers. +If the container is configured otherwise, the user must set up the reply headers. +In this case, an `INFO` log message is written during initialization. +The following example uses `KafkaHeaders.REPLY_TOPIC`: + +``` +record.headers().add(new RecordHeader(KafkaHeaders.REPLY_TOPIC, "kReplies".getBytes())); +``` + +When you configure with a single reply `TopicPartitionOffset`, you can use the same reply topic for multiple templates, as long as each instance listens on a different partition. +When configuring with a single reply topic, each instance must use a different `group.id`. +In this case, all instances receive each reply, but only the instance that sent the request finds the correlation ID. +This may be useful for auto-scaling, but with the overhead of additional network traffic and the small cost of discarding each unwanted reply. +When you use this setting, we recommend that you set the template’s `sharedReplyTopic` to `true`, which reduces the logging level of unexpected replies to DEBUG instead of the default ERROR. + +The following is an example of configuring the reply container to use the same shared reply topic: + +``` +@Bean +public ConcurrentMessageListenerContainer replyContainer( + ConcurrentKafkaListenerContainerFactory containerFactory) { + + ConcurrentMessageListenerContainer container = containerFactory.createContainer("topic2"); + container.getContainerProperties().setGroupId(UUID.randomUUID().toString()); // unique + Properties props = new Properties(); + props.setProperty(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "latest"); // so the new group doesn't get old replies + container.getContainerProperties().setKafkaConsumerProperties(props); + return container; +} +``` + +| |If you have multiple client instances and you do not configure them as discussed in the preceding paragraph, each instance needs a dedicated reply topic.
An alternative is to set the `KafkaHeaders.REPLY_PARTITION` and use a dedicated partition for each instance.
The `Header` contains a four-byte int (big-endian).
The server must use this header to route the reply to the correct partition (`@KafkaListener` does this).
In this case, though, the reply container must not use Kafka’s group management feature and must be configured to listen on a fixed partition (by using a `TopicPartitionOffset` in its `ContainerProperties` constructor).| +|---|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +| |The `DefaultKafkaHeaderMapper` requires Jackson to be on the classpath (for the `@KafkaListener`).
If it is not available, the message converter has no header mapper, so you must configure a `MessagingMessageConverter` with a `SimpleKafkaHeaderMapper`, as shown earlier.| +|---|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +By default, 3 headers are used: + +* `KafkaHeaders.CORRELATION_ID` - used to correlate the reply to a request + +* `KafkaHeaders.REPLY_TOPIC` - used to tell the server where to reply + +* `KafkaHeaders.REPLY_PARTITION` - (optional) used to tell the server which partition to reply to + +These header names are used by the `@KafkaListener` infrastructure to route the reply. + +Starting with version 2.3, you can customize the header names - the template has 3 properties `correlationHeaderName`, `replyTopicHeaderName`, and `replyPartitionHeaderName`. +This is useful if your server is not a Spring application (or does not use the `@KafkaListener`). + +###### Request/Reply with `Message` s + +Version 2.7 added methods to the `ReplyingKafkaTemplate` to send and receive `spring-messaging` 's `Message` abstraction: + +``` +RequestReplyMessageFuture sendAndReceive(Message message); + +

RequestReplyTypedMessageFuture sendAndReceive(Message message, + ParameterizedTypeReference

returnType); +``` + +These will use the template’s default `replyTimeout`, there are also overloaded versions that can take a timeout in the method call. + +Use the first method if the consumer’s `Deserializer` or the template’s `MessageConverter` can convert the payload without any additional information, either via configuration or type metadata in the reply message. + +Use the second method if you need to provide type information for the return type, to assist the message converter. +This also allows the same template to receive different types, even if there is no type metadata in the replies, such as when the server side is not a Spring application. +The following is an example of the latter: + +Example 6. Template Bean + +Java + +``` +@Bean +ReplyingKafkaTemplate template( + ProducerFactory pf, + ConcurrentKafkaListenerContainerFactory factory) { + + ConcurrentMessageListenerContainer replyContainer = + factory.createContainer("replies"); + replyContainer.getContainerProperties().setGroupId("request.replies"); + ReplyingKafkaTemplate template = + new ReplyingKafkaTemplate<>(pf, replyContainer); + template.setMessageConverter(new ByteArrayJsonMessageConverter()); + template.setDefaultTopic("requests"); + return template; +} +``` + +Kotlin + +``` +@Bean +fun template( + pf: ProducerFactory?, + factory: ConcurrentKafkaListenerContainerFactory +): ReplyingKafkaTemplate { + val replyContainer = factory.createContainer("replies") + replyContainer.containerProperties.groupId = "request.replies" + val template = ReplyingKafkaTemplate(pf, replyContainer) + template.messageConverter = ByteArrayJsonMessageConverter() + template.defaultTopic = "requests" + return template +} +``` + +Example 7. Using the template + +Java + +``` +RequestReplyTypedMessageFuture future1 = + template.sendAndReceive(MessageBuilder.withPayload("getAThing").build(), + new ParameterizedTypeReference() { }); +log.info(future1.getSendFuture().get(10, TimeUnit.SECONDS).getRecordMetadata().toString()); +Thing thing = future1.get(10, TimeUnit.SECONDS).getPayload(); +log.info(thing.toString()); + +RequestReplyTypedMessageFuture> future2 = + template.sendAndReceive(MessageBuilder.withPayload("getThings").build(), + new ParameterizedTypeReference>() { }); +log.info(future2.getSendFuture().get(10, TimeUnit.SECONDS).getRecordMetadata().toString()); +List things = future2.get(10, TimeUnit.SECONDS).getPayload(); +things.forEach(thing1 -> log.info(thing1.toString())); +``` + +Kotlin + +``` +val future1: RequestReplyTypedMessageFuture? = + template.sendAndReceive(MessageBuilder.withPayload("getAThing").build(), + object : ParameterizedTypeReference() {}) +log.info(future1?.sendFuture?.get(10, TimeUnit.SECONDS)?.recordMetadata?.toString()) +val thing = future1?.get(10, TimeUnit.SECONDS)?.payload +log.info(thing.toString()) + +val future2: RequestReplyTypedMessageFuture?>? = + template.sendAndReceive(MessageBuilder.withPayload("getThings").build(), + object : ParameterizedTypeReference?>() {}) +log.info(future2?.sendFuture?.get(10, TimeUnit.SECONDS)?.recordMetadata.toString()) +val things = future2?.get(10, TimeUnit.SECONDS)?.payload +things?.forEach(Consumer { thing1: Thing? -> log.info(thing1.toString()) }) +``` + +##### Reply Type Message\ + +When the `@KafkaListener` returns a `Message`, with versions before 2.5, it was necessary to populate the reply topic and correlation id headers. +In this example, we use the reply topic header from the request: + +``` +@KafkaListener(id = "requestor", topics = "request") +@SendTo +public Message messageReturn(String in) { + return MessageBuilder.withPayload(in.toUpperCase()) + .setHeader(KafkaHeaders.TOPIC, replyTo) + .setHeader(KafkaHeaders.MESSAGE_KEY, 42) + .setHeader(KafkaHeaders.CORRELATION_ID, correlation) + .build(); +} +``` + +This also shows how to set a key on the reply record. + +Starting with version 2.5, the framework will detect if these headers are missing and populate them with the topic - either the topic determined from the `@SendTo` value or the incoming `KafkaHeaders.REPLY_TOPIC` header (if present). +It will also echo the incoming `KafkaHeaders.CORRELATION_ID` and `KafkaHeaders.REPLY_PARTITION`, if present. + +``` +@KafkaListener(id = "requestor", topics = "request") +@SendTo // default REPLY_TOPIC header +public Message messageReturn(String in) { + return MessageBuilder.withPayload(in.toUpperCase()) + .setHeader(KafkaHeaders.MESSAGE_KEY, 42) + .build(); +} +``` + +##### Aggregating Multiple Replies + +The template in [Using `ReplyingKafkaTemplate`](#replying-template) is strictly for a single request/reply scenario. +For cases where multiple receivers of a single message return a reply, you can use the `AggregatingReplyingKafkaTemplate`. +This is an implementation of the client-side of the [Scatter-Gather Enterprise Integration Pattern](https://www.enterpriseintegrationpatterns.com/patterns/messaging/BroadcastAggregate.html). + +Like the `ReplyingKafkaTemplate`, the `AggregatingReplyingKafkaTemplate` constructor takes a producer factory and a listener container to receive the replies; it has a third parameter `BiPredicate>, Boolean> releaseStrategy` which is consulted each time a reply is received; when the predicate returns `true`, the collection of `ConsumerRecord` s is used to complete the `Future` returned by the `sendAndReceive` method. + +There is an additional property `returnPartialOnTimeout` (default false). +When this is set to `true`, instead of completing the future with a `KafkaReplyTimeoutException`, a partial result completes the future normally (as long as at least one reply record has been received). + +Starting with version 2.3.5, the predicate is also called after a timeout (if `returnPartialOnTimeout` is `true`). +The first argument is the current list of records; the second is `true` if this call is due to a timeout. +The predicate can modify the list of records. + +``` +AggregatingReplyingKafkaTemplate template = + new AggregatingReplyingKafkaTemplate<>(producerFactory, container, + coll -> coll.size() == releaseSize); +... +RequestReplyFuture>> future = + template.sendAndReceive(record); +future.getSendFuture().get(10, TimeUnit.SECONDS); // send ok +ConsumerRecord>> consumerRecord = + future.get(30, TimeUnit.SECONDS); +``` + +Notice that the return type is a `ConsumerRecord` with a value that is a collection of `ConsumerRecord` s. +The "outer" `ConsumerRecord` is not a "real" record, it is synthesized by the template, as a holder for the actual reply records received for the request. +When a normal release occurs (release strategy returns true), the topic is set to `aggregatedResults`; if `returnPartialOnTimeout` is true, and timeout occurs (and at least one reply record has been received), the topic is set to `partialResultsAfterTimeout`. +The template provides constant static variables for these "topic" names: + +``` +/** + * Pseudo topic name for the "outer" {@link ConsumerRecords} that has the aggregated + * results in its value after a normal release by the release strategy. + */ +public static final String AGGREGATED_RESULTS_TOPIC = "aggregatedResults"; + +/** + * Pseudo topic name for the "outer" {@link ConsumerRecords} that has the aggregated + * results in its value after a timeout. + */ +public static final String PARTIAL_RESULTS_AFTER_TIMEOUT_TOPIC = "partialResultsAfterTimeout"; +``` + +The real `ConsumerRecord` s in the `Collection` contain the actual topic(s) from which the replies are received. + +| |The listener container for the replies MUST be configured with `AckMode.MANUAL` or `AckMode.MANUAL_IMMEDIATE`; the consumer property `enable.auto.commit` must be `false` (the default since version 2.3).
To avoid any possibility of losing messages, the template only commits offsets when there are zero requests outstanding, i.e. when the last outstanding request is released by the release strategy.
After a rebalance, it is possible for duplicate reply deliveries; these will be ignored for any in-flight requests; you may see error log messages when duplicate replies are received for already released replies.| +|---|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +| |If you use an [`ErrorHandlingDeserializer`](#error-handling-deserializer) with this aggregating template, the framework will not automatically detect `DeserializationException` s.
Instead, the record (with a `null` value) will be returned intact, with the deserialization exception(s) in headers.
It is recommended that applications call the utility method `ReplyingKafkaTemplate.checkDeserialization()` method to determine if a deserialization exception occurred.
See its javadocs for more information.
The `replyErrorChecker` is also not called for this aggregating template; you should perform the checks on each element of the reply.| +|---|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +#### 4.1.4. Receiving Messages + +You can receive messages by configuring a `MessageListenerContainer` and providing a message listener or by using the `@KafkaListener` annotation. + +##### Message Listeners + +When you use a [message listener container](#message-listener-container), you must provide a listener to receive data. +There are currently eight supported interfaces for message listeners. +The following listing shows these interfaces: + +``` +public interface MessageListener { (1) + + void onMessage(ConsumerRecord data); + +} + +public interface AcknowledgingMessageListener { (2) + + void onMessage(ConsumerRecord data, Acknowledgment acknowledgment); + +} + +public interface ConsumerAwareMessageListener extends MessageListener { (3) + + void onMessage(ConsumerRecord data, Consumer consumer); + +} + +public interface AcknowledgingConsumerAwareMessageListener extends MessageListener { (4) + + void onMessage(ConsumerRecord data, Acknowledgment acknowledgment, Consumer consumer); + +} + +public interface BatchMessageListener { (5) + + void onMessage(List> data); + +} + +public interface BatchAcknowledgingMessageListener { (6) + + void onMessage(List> data, Acknowledgment acknowledgment); + +} + +public interface BatchConsumerAwareMessageListener extends BatchMessageListener { (7) + + void onMessage(List> data, Consumer consumer); + +} + +public interface BatchAcknowledgingConsumerAwareMessageListener extends BatchMessageListener { (8) + + void onMessage(List> data, Acknowledgment acknowledgment, Consumer consumer); + +} +``` + +|**1**| Use this interface for processing individual `ConsumerRecord` instances received from the Kafka consumer `poll()` operation when using auto-commit or one of the container-managed [commit methods](#committing-offsets). | +|-----|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +|**2**| Use this interface for processing individual `ConsumerRecord` instances received from the Kafka consumer `poll()` operation when using one of the manual [commit methods](#committing-offsets). | +|**3**| Use this interface for processing individual `ConsumerRecord` instances received from the Kafka consumer `poll()` operation when using auto-commit or one of the container-managed [commit methods](#committing-offsets).
Access to the `Consumer` object is provided. | +|**4**| Use this interface for processing individual `ConsumerRecord` instances received from the Kafka consumer `poll()` operation when using one of the manual [commit methods](#committing-offsets).
Access to the `Consumer` object is provided. | +|**5**| Use this interface for processing all `ConsumerRecord` instances received from the Kafka consumer `poll()` operation when using auto-commit or one of the container-managed [commit methods](#committing-offsets).`AckMode.RECORD` is not supported when you use this interface, since the listener is given the complete batch. | +|**6**| Use this interface for processing all `ConsumerRecord` instances received from the Kafka consumer `poll()` operation when using one of the manual [commit methods](#committing-offsets). | +|**7**|Use this interface for processing all `ConsumerRecord` instances received from the Kafka consumer `poll()` operation when using auto-commit or one of the container-managed [commit methods](#committing-offsets).`AckMode.RECORD` is not supported when you use this interface, since the listener is given the complete batch.
Access to the `Consumer` object is provided.| +|**8**| Use this interface for processing all `ConsumerRecord` instances received from the Kafka consumer `poll()` operation when using one of the manual [commit methods](#committing-offsets).
Access to the `Consumer` object is provided. | + +| |The `Consumer` object is not thread-safe.
You must only invoke its methods on the thread that calls the listener.| +|---|---------------------------------------------------------------------------------------------------------------------| + +| |You should not execute any `Consumer` methods that affect the consumer’s positions and or committed offsets in your listener; the container needs to manage such information.| +|---|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +##### Message Listener Containers + +Two `MessageListenerContainer` implementations are provided: + +* `KafkaMessageListenerContainer` + +* `ConcurrentMessageListenerContainer` + +The `KafkaMessageListenerContainer` receives all message from all topics or partitions on a single thread. +The `ConcurrentMessageListenerContainer` delegates to one or more `KafkaMessageListenerContainer` instances to provide multi-threaded consumption. + +Starting with version 2.2.7, you can add a `RecordInterceptor` to the listener container; it will be invoked before calling the listener allowing inspection or modification of the record. +If the interceptor returns null, the listener is not called. +Starting with version 2.7, it has additional methods which are called after the listener exits (normally, or by throwing an exception). +Also, starting with version 2.7, there is now a `BatchInterceptor`, providing similar functionality for [Batch Listeners](#batch-listeners). +In addition, the `ConsumerAwareRecordInterceptor` (and `BatchInterceptor`) provide access to the `Consumer`. +This might be used, for example, to access the consumer metrics in the interceptor. + +| |You should not execute any methods that affect the consumer’s positions and or committed offsets in these interceptors; the container needs to manage such information.| +|---|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +The `CompositeRecordInterceptor` and `CompositeBatchInterceptor` can be used to invoke multiple interceptors. + +By default, starting with version 2.8, when using transactions, the interceptor is invoked before the transaction has started. +You can set the listener container’s `interceptBeforeTx` property to `false` to invoke the interceptor after the transaction has started instead. + +Starting with versions 2.3.8, 2.4.6, the `ConcurrentMessageListenerContainer` now supports [Static Membership](https://kafka.apache.org/documentation/#static_membership) when the concurrency is greater than one. +The `group.instance.id` is suffixed with `-n` with `n` starting at `1`. +This, together with an increased `session.timeout.ms`, can be used to reduce rebalance events, for example, when application instances are restarted. + +###### Using `KafkaMessageListenerContainer` + +The following constructor is available: + +``` +public KafkaMessageListenerContainer(ConsumerFactory consumerFactory, + ContainerProperties containerProperties) +``` + +It receives a `ConsumerFactory` and information about topics and partitions, as well as other configuration, in a `ContainerProperties`object.`ContainerProperties` has the following constructors: + +``` +public ContainerProperties(TopicPartitionOffset... topicPartitions) + +public ContainerProperties(String... topics) + +public ContainerProperties(Pattern topicPattern) +``` + +The first constructor takes an array of `TopicPartitionOffset` arguments to explicitly instruct the container about which partitions to use (using the consumer `assign()` method) and with an optional initial offset. +A positive value is an absolute offset by default. +A negative value is relative to the current last offset within a partition by default. +A constructor for `TopicPartitionOffset` that takes an additional `boolean` argument is provided. +If this is `true`, the initial offsets (positive or negative) are relative to the current position for this consumer. +The offsets are applied when the container is started. +The second takes an array of topics, and Kafka allocates the partitions based on the `group.id` property — distributing partitions across the group. +The third uses a regex `Pattern` to select the topics. + +To assign a `MessageListener` to a container, you can use the `ContainerProps.setMessageListener` method when creating the Container. +The following example shows how to do so: + +``` +ContainerProperties containerProps = new ContainerProperties("topic1", "topic2"); +containerProps.setMessageListener(new MessageListener() { + ... +}); +DefaultKafkaConsumerFactory cf = + new DefaultKafkaConsumerFactory<>(consumerProps()); +KafkaMessageListenerContainer container = + new KafkaMessageListenerContainer<>(cf, containerProps); +return container; +``` + +Note that when creating a `DefaultKafkaConsumerFactory`, using the constructor that just takes in the properties as above means that key and value `Deserializer` classes are picked up from configuration. +Alternatively, `Deserializer` instances may be passed to the `DefaultKafkaConsumerFactory` constructor for key and/or value, in which case all Consumers share the same instances. +Another option is to provide `Supplier` s (starting with version 2.3) that will be used to obtain separate `Deserializer` instances for each `Consumer`: + +``` +DefaultKafkaConsumerFactory cf = + new DefaultKafkaConsumerFactory<>(consumerProps(), null, () -> new CustomValueDeserializer()); +KafkaMessageListenerContainer container = + new KafkaMessageListenerContainer<>(cf, containerProps); +return container; +``` + +Refer to the [Javadoc](https://docs.spring.io/spring-kafka/api/org/springframework/kafka/listener/ContainerProperties.html) for `ContainerProperties` for more information about the various properties that you can set. + +Since version 2.1.1, a new property called `logContainerConfig` is available. +When `true` and `INFO` logging is enabled each listener container writes a log message summarizing its configuration properties. + +By default, logging of topic offset commits is performed at the `DEBUG` logging level. +Starting with version 2.1.2, a property in `ContainerProperties` called `commitLogLevel` lets you specify the log level for these messages. +For example, to change the log level to `INFO`, you can use `containerProperties.setCommitLogLevel(LogIfLevelEnabled.Level.INFO);`. + +Starting with version 2.2, a new container property called `missingTopicsFatal` has been added (default: `false` since 2.3.4). +This prevents the container from starting if any of the configured topics are not present on the broker. +It does not apply if the container is configured to listen to a topic pattern (regex). +Previously, the container threads looped within the `consumer.poll()` method waiting for the topic to appear while logging many messages. +Aside from the logs, there was no indication that there was a problem. + +As of version 2.8, a new container property `authExceptionRetryInterval` has been introduced. +This causes the container to retry fetching messages after getting any `AuthenticationException` or `AuthorizationException` from the `KafkaConsumer`. +This can happen when, for example, the configured user is denied access to read a certain topic or credentials are incorrect. +Defining `authExceptionRetryInterval` allows the container to recover when proper permissions are granted. + +| |By default, no interval is configured - authentication and authorization errors are considered fatal, which causes the container to stop.| +|---|-----------------------------------------------------------------------------------------------------------------------------------------| + +Starting with version 2.8, when creating the consumer factory, if you provide deserializers as objects (in the constructor or via the setters), the factory will invoke the `configure()` method to configure them with the configuration properties. + +###### Using `ConcurrentMessageListenerContainer` + +The single constructor is similar to the `KafkaListenerContainer` constructor. +The following listing shows the constructor’s signature: + +``` +public ConcurrentMessageListenerContainer(ConsumerFactory consumerFactory, + ContainerProperties containerProperties) +``` + +It also has a `concurrency` property. +For example, `container.setConcurrency(3)` creates three `KafkaMessageListenerContainer` instances. + +For the first constructor, Kafka distributes the partitions across the consumers using its group management capabilities. + +| |When listening to multiple topics, the default partition distribution may not be what you expect.
For example, if you have three topics with five partitions each and you want to use `concurrency=15`, you see only five active consumers, each assigned one partition from each topic, with the other 10 consumers being idle.
This is because the default Kafka `PartitionAssignor` is the `RangeAssignor` (see its Javadoc).
For this scenario, you may want to consider using the `RoundRobinAssignor` instead, which distributes the partitions across all of the consumers.
Then, each consumer is assigned one topic or partition.
To change the `PartitionAssignor`, you can set the `partition.assignment.strategy` consumer property (`ConsumerConfigs.PARTITION_ASSIGNMENT_STRATEGY_CONFIG`) in the properties provided to the `DefaultKafkaConsumerFactory`.

When using Spring Boot, you can assign set the strategy as follows:

```
spring.kafka.consumer.properties.partition.assignment.strategy=\
org.apache.kafka.clients.consumer.RoundRobinAssignor
```| +|---|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +When the container properties are configured with `TopicPartitionOffset` s, the `ConcurrentMessageListenerContainer` distributes the `TopicPartitionOffset` instances across the delegate `KafkaMessageListenerContainer` instances. + +If, say, six `TopicPartitionOffset` instances are provided and the `concurrency` is `3`; each container gets two partitions. +For five `TopicPartitionOffset` instances, two containers get two partitions, and the third gets one. +If the `concurrency` is greater than the number of `TopicPartitions`, the `concurrency` is adjusted down such that each container gets one partition. + +| |The `client.id` property (if set) is appended with `-n` where `n` is the consumer instance that corresponds to the concurrency.
This is required to provide unique names for MBeans when JMX is enabled.| +|---|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +Starting with version 1.3, the `MessageListenerContainer` provides access to the metrics of the underlying `KafkaConsumer`. +In the case of `ConcurrentMessageListenerContainer`, the `metrics()` method returns the metrics for all the target `KafkaMessageListenerContainer` instances. +The metrics are grouped into the `Map` by the `client-id` provided for the underlying `KafkaConsumer`. + +Starting with version 2.3, the `ContainerProperties` provides an `idleBetweenPolls` option to let the main loop in the listener container to sleep between `KafkaConsumer.poll()` calls. +An actual sleep interval is selected as the minimum from the provided option and difference between the `max.poll.interval.ms` consumer config and the current records batch processing time. + +###### Committing Offsets + +Several options are provided for committing offsets. +If the `enable.auto.commit` consumer property is `true`, Kafka auto-commits the offsets according to its configuration. +If it is `false`, the containers support several `AckMode` settings (described in the next list). +The default `AckMode` is `BATCH`. +Starting with version 2.3, the framework sets `enable.auto.commit` to `false` unless explicitly set in the configuration. +Previously, the Kafka default (`true`) was used if the property was not set. + +The consumer `poll()` method returns one or more `ConsumerRecords`. +The `MessageListener` is called for each record. +The following lists describes the action taken by the container for each `AckMode` (when transactions are not being used): + +* `RECORD`: Commit the offset when the listener returns after processing the record. + +* `BATCH`: Commit the offset when all the records returned by the `poll()` have been processed. + +* `TIME`: Commit the offset when all the records returned by the `poll()` have been processed, as long as the `ackTime` since the last commit has been exceeded. + +* `COUNT`: Commit the offset when all the records returned by the `poll()` have been processed, as long as `ackCount` records have been received since the last commit. + +* `COUNT_TIME`: Similar to `TIME` and `COUNT`, but the commit is performed if either condition is `true`. + +* `MANUAL`: The message listener is responsible to `acknowledge()` the `Acknowledgment`. + After that, the same semantics as `BATCH` are applied. + +* `MANUAL_IMMEDIATE`: Commit the offset immediately when the `Acknowledgment.acknowledge()` method is called by the listener. + +When using [transactions](#transactions), the offset(s) are sent to the transaction and the semantics are equivalent to `RECORD` or `BATCH`, depending on the listener type (record or batch). + +| |`MANUAL`, and `MANUAL_IMMEDIATE` require the listener to be an `AcknowledgingMessageListener` or a `BatchAcknowledgingMessageListener`.
See [Message Listeners](#message-listeners).| +|---|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +Depending on the `syncCommits` container property, the `commitSync()` or `commitAsync()` method on the consumer is used.`syncCommits` is `true` by default; also see `setSyncCommitTimeout`. +See `setCommitCallback` to get the results of asynchronous commits; the default callback is the `LoggingCommitCallback` which logs errors (and successes at debug level). + +Because the listener container has it’s own mechanism for committing offsets, it prefers the Kafka `ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG` to be `false`. +Starting with version 2.3, it unconditionally sets it to false unless specifically set in the consumer factory or the container’s consumer property overrides. + +The `Acknowledgment` has the following method: + +``` +public interface Acknowledgment { + + void acknowledge(); + +} +``` + +This method gives the listener control over when offsets are committed. + +Starting with version 2.3, the `Acknowledgment` interface has two additional methods `nack(long sleep)` and `nack(int index, long sleep)`. +The first one is used with a record listener, the second with a batch listener. +Calling the wrong method for your listener type will throw an `IllegalStateException`. + +| |If you want to commit a partial batch, using `nack()`, When using transactions, set the `AckMode` to `MANUAL`; invoking `nack()` will send the offsets of the successfully processed records to the transaction.| +|---|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +| |`nack()` can only be called on the consumer thread that invokes your listener.| +|---|------------------------------------------------------------------------------| + +With a record listener, when `nack()` is called, any pending offsets are committed, the remaing records from the last poll are discarded, and seeks are performed on their partitions so that the failed record and unprocessed records are redelivered on the next `poll()`. +The consumer thread can be paused before redelivery, by setting the `sleep` argument. +This is similar functionality to throwing an exception when the container is configured with a `DefaultErrorHandler`. + +When using a batch listener, you can specify the index within the batch where the failure occurred. +When `nack()` is called, offsets will be committed for records before the index and seeks are performed on the partitions for the failed and discarded records so that they will be redelivered on the next `poll()`. + +See [Container Error Handlers](#error-handlers) for more information. + +| |When using partition assignment via group management, it is important to ensure the `sleep` argument (plus the time spent processing records from the previous poll) is less than the consumer `max.poll.interval.ms` property.| +|---|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +###### Listener Container Auto Startup + +The listener containers implement `SmartLifecycle`, and `autoStartup` is `true` by default. +The containers are started in a late phase (`Integer.MAX-VALUE - 100`). +Other components that implement `SmartLifecycle`, to handle data from listeners, should be started in an earlier phase. +The `- 100` leaves room for later phases to enable components to be auto-started after the containers. + +##### Manually Committing Offsets + +Normally, when using `AckMode.MANUAL` or `AckMode.MANUAL_IMMEDIATE`, the acknowledgments must be acknowledged in order, because Kafka does not maintain state for each record, only a committed offset for each group/partition. +Starting with version 2.8, you can now set the container property `asyncAcks`, which allows the acknowledgments for records returned by the poll to be acknowledged in any order. +The listener container will defer the out-of-order commits until the missing acknowledgments are received. +The consumer will be paused (no new records delivered) until all the offsets for the previous poll have been committed. + +| |While this feature allows applications to process records asynchronously, it should be understood that it increases the possibility of duplicate deliveries after a failure.| +|---|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +##### `@KafkaListener` Annotation + +The `@KafkaListener` annotation is used to designate a bean method as a listener for a listener container. +The bean is wrapped in a `MessagingMessageListenerAdapter` configured with various features, such as converters to convert the data, if necessary, to match the method parameters. + +You can configure most attributes on the annotation with SpEL by using `#{…​}` or property placeholders (`${…​}`). +See the [Javadoc](https://docs.spring.io/spring-kafka/api/org/springframework/kafka/annotation/KafkaListener.html) for more information. + +###### Record Listeners + +The `@KafkaListener` annotation provides a mechanism for simple POJO listeners. +The following example shows how to use it: + +``` +public class Listener { + + @KafkaListener(id = "foo", topics = "myTopic", clientIdPrefix = "myClientId") + public void listen(String data) { + ... + } + +} +``` + +This mechanism requires an `@EnableKafka` annotation on one of your `@Configuration` classes and a listener container factory, which is used to configure the underlying `ConcurrentMessageListenerContainer`. +By default, a bean with name `kafkaListenerContainerFactory` is expected. +The following example shows how to use `ConcurrentMessageListenerContainer`: + +``` +@Configuration +@EnableKafka +public class KafkaConfig { + + @Bean + KafkaListenerContainerFactory> + kafkaListenerContainerFactory() { + ConcurrentKafkaListenerContainerFactory factory = + new ConcurrentKafkaListenerContainerFactory<>(); + factory.setConsumerFactory(consumerFactory()); + factory.setConcurrency(3); + factory.getContainerProperties().setPollTimeout(3000); + return factory; + } + + @Bean + public ConsumerFactory consumerFactory() { + return new DefaultKafkaConsumerFactory<>(consumerConfigs()); + } + + @Bean + public Map consumerConfigs() { + Map props = new HashMap<>(); + props.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, embeddedKafka.getBrokersAsString()); + ... + return props; + } +} +``` + +Notice that, to set container properties, you must use the `getContainerProperties()` method on the factory. +It is used as a template for the actual properties injected into the container. + +Starting with version 2.1.1, you can now set the `client.id` property for consumers created by the annotation. +The `clientIdPrefix` is suffixed with `-n`, where `n` is an integer representing the container number when using concurrency. + +Starting with version 2.2, you can now override the container factory’s `concurrency` and `autoStartup` properties by using properties on the annotation itself. +The properties can be simple values, property placeholders, or SpEL expressions. +The following example shows how to do so: + +``` +@KafkaListener(id = "myListener", topics = "myTopic", + autoStartup = "${listen.auto.start:true}", concurrency = "${listen.concurrency:3}") +public void listen(String data) { + ... +} +``` + +###### Explicit Partition Assignment + +You can also configure POJO listeners with explicit topics and partitions (and, optionally, their initial offsets). +The following example shows how to do so: + +``` +@KafkaListener(id = "thing2", topicPartitions = + { @TopicPartition(topic = "topic1", partitions = { "0", "1" }), + @TopicPartition(topic = "topic2", partitions = "0", + partitionOffsets = @PartitionOffset(partition = "1", initialOffset = "100")) + }) +public void listen(ConsumerRecord record) { + ... +} +``` + +You can specify each partition in the `partitions` or `partitionOffsets` attribute but not both. + +As with most annotation properties, you can use SpEL expressions; for an example of how to generate a large list of partitions, see [[tip-assign-all-parts]](#tip-assign-all-parts). + +Starting with version 2.5.5, you can apply an initial offset to all assigned partitions: + +``` +@KafkaListener(id = "thing3", topicPartitions = + { @TopicPartition(topic = "topic1", partitions = { "0", "1" }, + partitionOffsets = @PartitionOffset(partition = "*", initialOffset = "0")) + }) +public void listen(ConsumerRecord record) { + ... +} +``` + +The `*` wildcard represents all partitions in the `partitions` attribute. +There must only be one `@PartitionOffset` with the wildcard in each `@TopicPartition`. + +In addition, when the listener implements `ConsumerSeekAware`, `onPartitionsAssigned` is now called, even when using manual assignment. +This allows, for example, any arbitrary seek operations at that time. + +Starting with version 2.6.4, you can specify a comma-delimited list of partitions, or partition ranges: + +``` +@KafkaListener(id = "pp", autoStartup = "false", + topicPartitions = @TopicPartition(topic = "topic1", + partitions = "0-5, 7, 10-15")) +public void process(String in) { + ... +} +``` + +The range is inclusive; the example above will assign partitions `0, 1, 2, 3, 4, 5, 7, 10, 11, 12, 13, 14, 15`. + +The same technique can be used when specifying initial offsets: + +``` +@KafkaListener(id = "thing3", topicPartitions = + { @TopicPartition(topic = "topic1", + partitionOffsets = @PartitionOffset(partition = "0-5", initialOffset = "0")) + }) +public void listen(ConsumerRecord record) { + ... +} +``` + +The initial offset will be applied to all 6 partitions. + +###### Manual Acknowledgment + +When using manual `AckMode`, you can also provide the listener with the `Acknowledgment`. +The following example also shows how to use a different container factory. + +``` +@KafkaListener(id = "cat", topics = "myTopic", + containerFactory = "kafkaManualAckListenerContainerFactory") +public void listen(String data, Acknowledgment ack) { + ... + ack.acknowledge(); +} +``` + +###### Consumer Record Metadata + +Finally, metadata about the record is available from message headers. +You can use the following header names to retrieve the headers of the message: + +* `KafkaHeaders.OFFSET` + +* `KafkaHeaders.RECEIVED_MESSAGE_KEY` + +* `KafkaHeaders.RECEIVED_TOPIC` + +* `KafkaHeaders.RECEIVED_PARTITION_ID` + +* `KafkaHeaders.RECEIVED_TIMESTAMP` + +* `KafkaHeaders.TIMESTAMP_TYPE` + +Starting with version 2.5 the `RECEIVED_MESSAGE_KEY` is not present if the incoming record has a `null` key; previously the header was populated with a `null` value. +This change is to make the framework consistent with `spring-messaging` conventions where `null` valued headers are not present. + +The following example shows how to use the headers: + +``` +@KafkaListener(id = "qux", topicPattern = "myTopic1") +public void listen(@Payload String foo, + @Header(name = KafkaHeaders.RECEIVED_MESSAGE_KEY, required = false) Integer key, + @Header(KafkaHeaders.RECEIVED_PARTITION_ID) int partition, + @Header(KafkaHeaders.RECEIVED_TOPIC) String topic, + @Header(KafkaHeaders.RECEIVED_TIMESTAMP) long ts + ) { + ... +} +``` + +Starting with version 2.5, instead of using discrete headers, you can receive record metadata in a `ConsumerRecordMetadata` parameter. + +``` +@KafkaListener(...) +public void listen(String str, ConsumerRecordMetadata meta) { + ... +} +``` + +This contains all the data from the `ConsumerRecord` except the key and value. + +###### Batch Listeners + +Starting with version 1.1, you can configure `@KafkaListener` methods to receive the entire batch of consumer records received from the consumer poll. +To configure the listener container factory to create batch listeners, you can set the `batchListener` property. +The following example shows how to do so: + +``` +@Bean +public KafkaListenerContainerFactory batchFactory() { + ConcurrentKafkaListenerContainerFactory factory = + new ConcurrentKafkaListenerContainerFactory<>(); + factory.setConsumerFactory(consumerFactory()); + factory.setBatchListener(true); // <<<<<<<<<<<<<<<<<<<<<<<<< + return factory; +} +``` + +| |Starting with version 2.8, you can override the factory’s `batchListener` propery using the `batch` property on the `@KafkaListener` annotation.
This, together with the changes to [Container Error Handlers](#error-handlers) allows the same factory to be used for both record and batch listeners.| +|---|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +The following example shows how to receive a list of payloads: + +``` +@KafkaListener(id = "list", topics = "myTopic", containerFactory = "batchFactory") +public void listen(List list) { + ... +} +``` + +The topic, partition, offset, and so on are available in headers that parallel the payloads. +The following example shows how to use the headers: + +``` +@KafkaListener(id = "list", topics = "myTopic", containerFactory = "batchFactory") +public void listen(List list, + @Header(KafkaHeaders.RECEIVED_MESSAGE_KEY) List keys, + @Header(KafkaHeaders.RECEIVED_PARTITION_ID) List partitions, + @Header(KafkaHeaders.RECEIVED_TOPIC) List topics, + @Header(KafkaHeaders.OFFSET) List offsets) { + ... +} +``` + +Alternatively, you can receive a `List` of `Message` objects with each offset and other details in each message, but it must be the only parameter (aside from optional `Acknowledgment`, when using manual commits, and/or `Consumer` parameters) defined on the method. +The following example shows how to do so: + +``` +@KafkaListener(id = "listMsg", topics = "myTopic", containerFactory = "batchFactory") +public void listen14(List> list) { + ... +} + +@KafkaListener(id = "listMsgAck", topics = "myTopic", containerFactory = "batchFactory") +public void listen15(List> list, Acknowledgment ack) { + ... +} + +@KafkaListener(id = "listMsgAckConsumer", topics = "myTopic", containerFactory = "batchFactory") +public void listen16(List> list, Acknowledgment ack, Consumer consumer) { + ... +} +``` + +No conversion is performed on the payloads in this case. + +If the `BatchMessagingMessageConverter` is configured with a `RecordMessageConverter`, you can also add a generic type to the `Message` parameter and the payloads are converted. +See [Payload Conversion with Batch Listeners](#payload-conversion-with-batch) for more information. + +You can also receive a list of `ConsumerRecord` objects, but it must be the only parameter (aside from optional `Acknowledgment`, when using manual commits and `Consumer` parameters) defined on the method. +The following example shows how to do so: + +``` +@KafkaListener(id = "listCRs", topics = "myTopic", containerFactory = "batchFactory") +public void listen(List> list) { + ... +} + +@KafkaListener(id = "listCRsAck", topics = "myTopic", containerFactory = "batchFactory") +public void listen(List> list, Acknowledgment ack) { + ... +} +``` + +Starting with version 2.2, the listener can receive the complete `ConsumerRecords` object returned by the `poll()` method, letting the listener access additional methods, such as `partitions()` (which returns the `TopicPartition` instances in the list) and `records(TopicPartition)` (which gets selective records). +Again, this must be the only parameter (aside from optional `Acknowledgment`, when using manual commits or `Consumer` parameters) on the method. +The following example shows how to do so: + +``` +@KafkaListener(id = "pollResults", topics = "myTopic", containerFactory = "batchFactory") +public void pollResults(ConsumerRecords records) { + ... +} +``` + +| |If the container factory has a `RecordFilterStrategy` configured, it is ignored for `ConsumerRecords` listeners, with a `WARN` log message emitted.
Records can only be filtered with a batch listener if the `>` form of listener is used.
By default, records are filtered one-at-a-time; starting with version 2.8, you can override `filterBatch` to filter the entire batch in one call.| +|---|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +###### Annotation Properties + +Starting with version 2.0, the `id` property (if present) is used as the Kafka consumer `group.id` property, overriding the configured property in the consumer factory, if present. +You can also set `groupId` explicitly or set `idIsGroup` to false to restore the previous behavior of using the consumer factory `group.id`. + +You can use property placeholders or SpEL expressions within most annotation properties, as the following example shows: + +``` +@KafkaListener(topics = "${some.property}") + +@KafkaListener(topics = "#{someBean.someProperty}", + groupId = "#{someBean.someProperty}.group") +``` + +Starting with version 2.1.2, the SpEL expressions support a special token: `__listener`. +It is a pseudo bean name that represents the current bean instance within which this annotation exists. + +Consider the following example: + +``` +@Bean +public Listener listener1() { + return new Listener("topic1"); +} + +@Bean +public Listener listener2() { + return new Listener("topic2"); +} +``` + +Given the beans in the previous example, we can then use the following: + +``` +public class Listener { + + private final String topic; + + public Listener(String topic) { + this.topic = topic; + } + + @KafkaListener(topics = "#{__listener.topic}", + groupId = "#{__listener.topic}.group") + public void listen(...) { + ... + } + + public String getTopic() { + return this.topic; + } + +} +``` + +If, in the unlikely event that you have an actual bean called `__listener`, you can change the expression token byusing the `beanRef` attribute. +The following example shows how to do so: + +``` +@KafkaListener(beanRef = "__x", topics = "#{__x.topic}", + groupId = "#{__x.topic}.group") +``` + +Starting with version 2.2.4, you can specify Kafka consumer properties directly on the annotation, these will override any properties with the same name configured in the consumer factory. You **cannot** specify the `group.id` and `client.id` properties this way; they will be ignored; use the `groupId` and `clientIdPrefix` annotation properties for those. + +The properties are specified as individual strings with the normal Java `Properties` file format: `foo:bar`, `foo=bar`, or `foo bar`. + +``` +@KafkaListener(topics = "myTopic", groupId = "group", properties = { + "max.poll.interval.ms:60000", + ConsumerConfig.MAX_POLL_RECORDS_CONFIG + "=100" +}) +``` + +The following is an example of the corresponding listeners for the example in [Using `RoutingKafkaTemplate`](#routing-template). + +``` +@KafkaListener(id = "one", topics = "one") +public void listen1(String in) { + System.out.println("1: " + in); +} + +@KafkaListener(id = "two", topics = "two", + properties = "value.deserializer:org.apache.kafka.common.serialization.ByteArrayDeserializer") +public void listen2(byte[] in) { + System.out.println("2: " + new String(in)); +} +``` + +##### Obtaining the Consumer `group.id` + +When running the same listener code in multiple containers, it may be useful to be able to determine which container (identified by its `group.id` consumer property) that a record came from. + +You can call `KafkaUtils.getConsumerGroupId()` on the listener thread to do this. +Alternatively, you can access the group id in a method parameter. + +``` +@KafkaListener(id = "bar", topicPattern = "${topicTwo:annotated2}", exposeGroupId = "${always:true}") +public void listener(@Payload String foo, + @Header(KafkaHeaders.GROUP_ID) String groupId) { +... +} +``` + +| |This is available in record listeners and batch listeners that receive a `List` of records.
It is **not** available in a batch listener that receives a `ConsumerRecords` argument.
Use the `KafkaUtils` mechanism in that case.| +|---|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +##### Container Thread Naming + +Listener containers currently use two task executors, one to invoke the consumer and another that is used to invoke the listener when the kafka consumer property `enable.auto.commit` is `false`. +You can provide custom executors by setting the `consumerExecutor` and `listenerExecutor` properties of the container’s `ContainerProperties`. +When using pooled executors, be sure that enough threads are available to handle the concurrency across all the containers in which they are used. +When using the `ConcurrentMessageListenerContainer`, a thread from each is used for each consumer (`concurrency`). + +If you do not provide a consumer executor, a `SimpleAsyncTaskExecutor` is used. +This executor creates threads with names similar to `-C-1` (consumer thread). +For the `ConcurrentMessageListenerContainer`, the `` part of the thread name becomes `-m`, where `m` represents the consumer instance.`n` increments each time the container is started. +So, with a bean name of `container`, threads in this container will be named `container-0-C-1`, `container-1-C-1` etc., after the container is started the first time; `container-0-C-2`, `container-1-C-2` etc., after a stop and subsequent start. + +##### `@KafkaListener` as a Meta Annotation + +Starting with version 2.2, you can now use `@KafkaListener` as a meta annotation. +The following example shows how to do so: + +``` +@Target(ElementType.METHOD) +@Retention(RetentionPolicy.RUNTIME) +@KafkaListener +public @interface MyThreeConsumersListener { + + @AliasFor(annotation = KafkaListener.class, attribute = "id") + String id(); + + @AliasFor(annotation = KafkaListener.class, attribute = "topics") + String[] topics(); + + @AliasFor(annotation = KafkaListener.class, attribute = "concurrency") + String concurrency() default "3"; + +} +``` + +You must alias at least one of `topics`, `topicPattern`, or `topicPartitions` (and, usually, `id` or `groupId` unless you have specified a `group.id` in the consumer factory configuration). +The following example shows how to do so: + +``` +@MyThreeConsumersListener(id = "my.group", topics = "my.topic") +public void listen1(String in) { + ... +} +``` + +##### `@KafkaListener` on a Class + +When you use `@KafkaListener` at the class-level, you must specify `@KafkaHandler` at the method level. +When messages are delivered, the converted message payload type is used to determine which method to call. +The following example shows how to do so: + +``` +@KafkaListener(id = "multi", topics = "myTopic") +static class MultiListenerBean { + + @KafkaHandler + public void listen(String foo) { + ... + } + + @KafkaHandler + public void listen(Integer bar) { + ... + } + + @KafkaHandler(isDefault = true) + public void listenDefault(Object object) { + ... + } + +} +``` + +Starting with version 2.1.3, you can designate a `@KafkaHandler` method as the default method that is invoked if there is no match on other methods. +At most, one method can be so designated. +When using `@KafkaHandler` methods, the payload must have already been converted to the domain object (so the match can be performed). +Use a custom deserializer, the `JsonDeserializer`, or the `JsonMessageConverter` with its `TypePrecedence` set to `TYPE_ID`. +See [Serialization, Deserialization, and Message Conversion](#serdes) for more information. + +| |Due to some limitations in the way Spring resolves method arguments, a default `@KafkaHandler` cannot receive discrete headers; it must use the `ConsumerRecordMetadata` as discussed in [Consumer Record Metadata](#consumer-record-metadata).| +|---|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +For example: + +``` +@KafkaHandler(isDefault = true) +public void listenDefault(Object object, @Header(KafkaHeaders.RECEIVED_TOPIC) String topic) { + ... +} +``` + +This won’t work if the object is a `String`; the `topic` parameter will also get a reference to `object`. + +If you need metadata about the record in a default method, use this: + +``` +@KafkaHandler(isDefault = true) +void listen(Object in, @Header(KafkaHeaders.RECORD_METADATA) ConsumerRecordMetadata meta) { + String topic = meta.topic(); + ... +} +``` + +##### `@KafkaListener` Attribute Modification + +Starting with version 2.7.2, you can now programmatically modify annotation attributes before the container is created. +To do so, add one or more `KafkaListenerAnnotationBeanPostProcessor.AnnotationEnhancer` to the application context.`AnnotationEnhancer` is a `BiFunction, AnnotatedElement, Map` and must return a map of attributes. +The attribute values can contain SpEL and/or property placeholders; the enhancer is called before any resolution is performed. +If more than one enhancer is present, and they implement `Ordered`, they will be invoked in order. + +| |`AnnotationEnhancer` bean definitions must be declared `static` because they are required very early in the application context’s lifecycle.| +|---|--------------------------------------------------------------------------------------------------------------------------------------------| + +An example follows: + +``` +@Bean +public static AnnotationEnhancer groupIdEnhancer() { + return (attrs, element) -> { + attrs.put("groupId", attrs.get("id") + "." + (element instanceof Class + ? ((Class) element).getSimpleName() + : ((Method) element).getDeclaringClass().getSimpleName() + + "." + ((Method) element).getName())); + return attrs; + }; +} +``` + +##### `@KafkaListener` Lifecycle Management + +The listener containers created for `@KafkaListener` annotations are not beans in the application context. +Instead, they are registered with an infrastructure bean of type `KafkaListenerEndpointRegistry`. +This bean is automatically declared by the framework and manages the containers' lifecycles; it will auto-start any containers that have `autoStartup` set to `true`. +All containers created by all container factories must be in the same `phase`. +See [Listener Container Auto Startup](#container-auto-startup) for more information. +You can manage the lifecycle programmatically by using the registry. +Starting or stopping the registry will start or stop all the registered containers. +Alternatively, you can get a reference to an individual container by using its `id` attribute. +You can set `autoStartup` on the annotation, which overrides the default setting configured into the container factory. +You can get a reference to the bean from the application context, such as auto-wiring, to manage its registered containers. +The following examples show how to do so: + +``` +@KafkaListener(id = "myContainer", topics = "myTopic", autoStartup = "false") +public void listen(...) { ... } +``` + +``` +@Autowired +private KafkaListenerEndpointRegistry registry; + +... + + this.registry.getListenerContainer("myContainer").start(); + +... +``` + +The registry only maintains the life cycle of containers it manages; containers declared as beans are not managed by the registry and can be obtained from the application context. +A collection of managed containers can be obtained by calling the registry’s `getListenerContainers()` method. +Version 2.2.5 added a convenience method `getAllListenerContainers()`, which returns a collection of all containers, including those managed by the registry and those declared as beans. +The collection returned will include any prototype beans that have been initialized, but it will not initialize any lazy bean declarations. + +##### `@KafkaListener` `@Payload` Validation + +Starting with version 2.2, it is now easier to add a `Validator` to validate `@KafkaListener` `@Payload` arguments. +Previously, you had to configure a custom `DefaultMessageHandlerMethodFactory` and add it to the registrar. +Now, you can add the validator to the registrar itself. +The following code shows how to do so: + +``` +@Configuration +@EnableKafka +public class Config implements KafkaListenerConfigurer { + + ... + + @Override + public void configureKafkaListeners(KafkaListenerEndpointRegistrar registrar) { + registrar.setValidator(new MyValidator()); + } + +} +``` + +| |When you use Spring Boot with the validation starter, a `LocalValidatorFactoryBean` is auto-configured, as the following example shows:| +|---|---------------------------------------------------------------------------------------------------------------------------------------| + +``` +@Configuration +@EnableKafka +public class Config implements KafkaListenerConfigurer { + + @Autowired + private LocalValidatorFactoryBean validator; + ... + + @Override + public void configureKafkaListeners(KafkaListenerEndpointRegistrar registrar) { + registrar.setValidator(this.validator); + } +} +``` + +The following examples show how to validate: + +``` +public static class ValidatedClass { + + @Max(10) + private int bar; + + public int getBar() { + return this.bar; + } + + public void setBar(int bar) { + this.bar = bar; + } + +} +``` + +``` +@KafkaListener(id="validated", topics = "annotated35", errorHandler = "validationErrorHandler", + containerFactory = "kafkaJsonListenerContainerFactory") +public void validatedListener(@Payload @Valid ValidatedClass val) { + ... +} + +@Bean +public KafkaListenerErrorHandler validationErrorHandler() { + return (m, e) -> { + ... + }; +} +``` + +Starting with version 2.5.11, validation now works on payloads for `@KafkaHandler` methods in a class-level listener. +See [`@KafkaListener` on a Class](#class-level-kafkalistener). + +##### Rebalancing Listeners + +`ContainerProperties` has a property called `consumerRebalanceListener`, which takes an implementation of the Kafka client’s `ConsumerRebalanceListener` interface. +If this property is not provided, the container configures a logging listener that logs rebalance events at the `INFO` level. +The framework also adds a sub-interface `ConsumerAwareRebalanceListener`. +The following listing shows the `ConsumerAwareRebalanceListener` interface definition: + +``` +public interface ConsumerAwareRebalanceListener extends ConsumerRebalanceListener { + + void onPartitionsRevokedBeforeCommit(Consumer consumer, Collection partitions); + + void onPartitionsRevokedAfterCommit(Consumer consumer, Collection partitions); + + void onPartitionsAssigned(Consumer consumer, Collection partitions); + + void onPartitionsLost(Consumer consumer, Collection partitions); + +} +``` + +Notice that there are two callbacks when partitions are revoked. +The first is called immediately. +The second is called after any pending offsets are committed. +This is useful if you wish to maintain offsets in some external repository, as the following example shows: + +``` +containerProperties.setConsumerRebalanceListener(new ConsumerAwareRebalanceListener() { + + @Override + public void onPartitionsRevokedBeforeCommit(Consumer consumer, Collection partitions) { + // acknowledge any pending Acknowledgments (if using manual acks) + } + + @Override + public void onPartitionsRevokedAfterCommit(Consumer consumer, Collection partitions) { + // ... + store(consumer.position(partition)); + // ... + } + + @Override + public void onPartitionsAssigned(Collection partitions) { + // ... + consumer.seek(partition, offsetTracker.getOffset() + 1); + // ... + } +}); +``` + +| |Starting with version 2.4, a new method `onPartitionsLost()` has been added (similar to a method with the same name in `ConsumerRebalanceLister`).
The default implementation on `ConsumerRebalanceLister` simply calls `onPartionsRevoked`.
The default implementation on `ConsumerAwareRebalanceListener` does nothing.
When supplying the listener container with a custom listener (of either type), it is important that your implementation not call `onPartitionsRevoked` from `onPartitionsLost`.
If you implement `ConsumerRebalanceListener` you should override the default method.
This is because the listener container will call its own `onPartitionsRevoked` from its implementation of `onPartitionsLost` after calling the method on your implementation.
If you implementation delegates to the default behavior, `onPartitionsRevoked` will be called twice each time the `Consumer` calls that method on the container’s listener.| +|---|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +##### Forwarding Listener Results using `@SendTo` + +Starting with version 2.0, if you also annotate a `@KafkaListener` with a `@SendTo` annotation and the method invocation returns a result, the result is forwarded to the topic specified by the `@SendTo`. + +The `@SendTo` value can have several forms: + +* `@SendTo("someTopic")` routes to the literal topic + +* `@SendTo("#{someExpression}")` routes to the topic determined by evaluating the expression once during application context initialization. + +* `@SendTo("!{someExpression}")` routes to the topic determined by evaluating the expression at runtime. + The `#root` object for the evaluation has three properties: + + * `request`: The inbound `ConsumerRecord` (or `ConsumerRecords` object for a batch listener)) + + * `source`: The `org.springframework.messaging.Message` converted from the `request`. + + * `result`: The method return result. + +* `@SendTo` (no properties): This is treated as `!{source.headers['kafka_replyTopic']}` (since version 2.1.3). + +Starting with versions 2.1.11 and 2.2.1, property placeholders are resolved within `@SendTo` values. + +The result of the expression evaluation must be a `String` that represents the topic name. +The following examples show the various ways to use `@SendTo`: + +``` +@KafkaListener(topics = "annotated21") +@SendTo("!{request.value()}") // runtime SpEL +public String replyingListener(String in) { + ... +} + +@KafkaListener(topics = "${some.property:annotated22}") +@SendTo("#{myBean.replyTopic}") // config time SpEL +public Collection replyingBatchListener(List in) { + ... +} + +@KafkaListener(topics = "annotated23", errorHandler = "replyErrorHandler") +@SendTo("annotated23reply") // static reply topic definition +public String replyingListenerWithErrorHandler(String in) { + ... +} +... +@KafkaListener(topics = "annotated25") +@SendTo("annotated25reply1") +public class MultiListenerSendTo { + + @KafkaHandler + public String foo(String in) { + ... + } + + @KafkaHandler + @SendTo("!{'annotated25reply2'}") + public String bar(@Payload(required = false) KafkaNull nul, + @Header(KafkaHeaders.RECEIVED_MESSAGE_KEY) int key) { + ... + } + +} +``` + +| |In order to support `@SendTo`, the listener container factory must be provided with a `KafkaTemplate` (in its `replyTemplate` property), which is used to send the reply.
This should be a `KafkaTemplate` and not a `ReplyingKafkaTemplate` which is used on the client-side for request/reply processing.
When using Spring Boot, boot will auto-configure the template into the factory; when configuring your own factory, it must be set as shown in the examples below.| +|---|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +Starting with version 2.2, you can add a `ReplyHeadersConfigurer` to the listener container factory. +This is consulted to determine which headers you want to set in the reply message. +The following example shows how to add a `ReplyHeadersConfigurer`: + +``` +@Bean +public ConcurrentKafkaListenerContainerFactory kafkaListenerContainerFactory() { + ConcurrentKafkaListenerContainerFactory factory = + new ConcurrentKafkaListenerContainerFactory<>(); + factory.setConsumerFactory(cf()); + factory.setReplyTemplate(template()); + factory.setReplyHeadersConfigurer((k, v) -> k.equals("cat")); + return factory; +} +``` + +You can also add more headers if you wish. +The following example shows how to do so: + +``` +@Bean +public ConcurrentKafkaListenerContainerFactory kafkaListenerContainerFactory() { + ConcurrentKafkaListenerContainerFactory factory = + new ConcurrentKafkaListenerContainerFactory<>(); + factory.setConsumerFactory(cf()); + factory.setReplyTemplate(template()); + factory.setReplyHeadersConfigurer(new ReplyHeadersConfigurer() { + + @Override + public boolean shouldCopy(String headerName, Object headerValue) { + return false; + } + + @Override + public Map additionalHeaders() { + return Collections.singletonMap("qux", "fiz"); + } + + }); + return factory; +} +``` + +When you use `@SendTo`, you must configure the `ConcurrentKafkaListenerContainerFactory` with a `KafkaTemplate` in its `replyTemplate` property to perform the send. + +| |Unless you use [request/reply semantics](#replying-template) only the simple `send(topic, value)` method is used, so you may wish to create a subclass to generate the partition or key.
The following example shows how to do so:| +|---|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +``` +@Bean +public KafkaTemplate myReplyingTemplate() { + return new KafkaTemplate(producerFactory()) { + + @Override + public ListenableFuture> send(String topic, String data) { + return super.send(topic, partitionForData(data), keyForData(data), data); + } + + ... + + }; +} +``` + +| |If the listener method returns `Message` or `Collection>`, the listener method is responsible for setting up the message headers for the reply.
For example, when handling a request from a `ReplyingKafkaTemplate`, you might do the following:

```
@KafkaListener(id = "messageReturned", topics = "someTopic")
public Message listen(String in, @Header(KafkaHeaders.REPLY_TOPIC) byte[] replyTo,
@Header(KafkaHeaders.CORRELATION_ID) byte[] correlation) {
return MessageBuilder.withPayload(in.toUpperCase())
.setHeader(KafkaHeaders.TOPIC, replyTo)
.setHeader(KafkaHeaders.MESSAGE_KEY, 42)
.setHeader(KafkaHeaders.CORRELATION_ID, correlation)
.setHeader("someOtherHeader", "someValue")
.build();
}
```| +|---|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +When using request/reply semantics, the target partition can be requested by the sender. + +| |You can annotate a `@KafkaListener` method with `@SendTo` even if no result is returned.
This is to allow the configuration of an `errorHandler` that can forward information about a failed message delivery to some topic.
The following example shows how to do so:

```
@KafkaListener(id = "voidListenerWithReplyingErrorHandler", topics = "someTopic",
errorHandler = "voidSendToErrorHandler")
@SendTo("failures")
public void voidListenerWithReplyingErrorHandler(String in) {
throw new RuntimeException("fail");
}

@Bean
public KafkaListenerErrorHandler voidSendToErrorHandler() {
return (m, e) -> {
return ... // some information about the failure and input data
};
}
```

See [Handling Exceptions](#annotation-error-handling) for more information.| +|---|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +| |If a listener method returns an `Iterable`, by default a record for each element as the value is sent.
Starting with version 2.3.5, set the `splitIterables` property on `@KafkaListener` to `false` and the entire result will be sent as the value of a single `ProducerRecord`.
This requires a suitable serializer in the reply template’s producer configuration.
However, if the reply is `Iterable>` the property is ignored and each message is sent separately.| +|---|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +##### Filtering Messages + +In certain scenarios, such as rebalancing, a message that has already been processed may be redelivered. +The framework cannot know whether such a message has been processed or not. +That is an application-level function. +This is known as the [Idempotent Receiver](https://www.enterpriseintegrationpatterns.com/patterns/messaging/IdempotentReceiver.html) pattern and Spring Integration provides an [implementation of it](https://docs.spring.io/spring-integration/reference/html/#idempotent-receiver). + +The Spring for Apache Kafka project also provides some assistance by means of the `FilteringMessageListenerAdapter` class, which can wrap your `MessageListener`. +This class takes an implementation of `RecordFilterStrategy` in which you implement the `filter` method to signal that a message is a duplicate and should be discarded. +This has an additional property called `ackDiscarded`, which indicates whether the adapter should acknowledge the discarded record. +It is `false` by default. + +When you use `@KafkaListener`, set the `RecordFilterStrategy` (and optionally `ackDiscarded`) on the container factory so that the listener is wrapped in the appropriate filtering adapter. + +In addition, a `FilteringBatchMessageListenerAdapter` is provided, for when you use a batch [message listener](#message-listeners). + +| |The `FilteringBatchMessageListenerAdapter` is ignored if your `@KafkaListener` receives a `ConsumerRecords` instead of `List>`, because `ConsumerRecords` is immutable.| +|---|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +##### Retrying Deliveries + +See the `DefaultErrorHandler` in [Handling Exceptions](#annotation-error-handling). + +##### Starting `@KafkaListener` s in Sequence + +A common use case is to start a listener after another listener has consumed all the records in a topic. +For example, you may want to load the contents of one or more compacted topics into memory before processing records from other topics. +Starting with version 2.7.3, a new component `ContainerGroupSequencer` has been introduced. +It uses the `@KafkaListener` `containerGroup` property to group containers together and start the containers in the next group, when all the containers in the current group have gone idle. + +It is best illustrated with an example. + +``` +@KafkaListener(id = "listen1", topics = "topic1", containerGroup = "g1", concurrency = "2") +public void listen1(String in) { +} + +@KafkaListener(id = "listen2", topics = "topic2", containerGroup = "g1", concurrency = "2") +public void listen2(String in) { +} + +@KafkaListener(id = "listen3", topics = "topic3", containerGroup = "g2", concurrency = "2") +public void listen3(String in) { +} + +@KafkaListener(id = "listen4", topics = "topic4", containerGroup = "g2", concurrency = "2") +public void listen4(String in) { +} + +@Bean +ContainerGroupSequencer sequencer(KafkaListenerEndpointRegistry registry) { + return new ContainerGroupSequencer(registry, 5000, "g1", "g2"); +} +``` + +Here, we have 4 listeners in two groups, `g1` and `g2`. + +During application context initialization, the sequencer, sets the `autoStartup` property of all the containers in the provided groups to `false`. +It also sets the `idleEventInterval` for any containers (that do not already have one set) to the supplied value (5000ms in this case). +Then, when the sequencer is started by the application context, the containers in the first group are started. +As `ListenerContainerIdleEvent` s are received, each individual child container in each container is stopped. +When all child containers in a `ConcurrentMessageListenerContainer` are stopped, the parent container is stopped. +When all containers in a group have been stopped, the containers in the next group are started. +There is no limit to the number of groups or containers in a group. + +By default, the containers in the final group (`g2` above) are not stopped when they go idle. +To modify that behavior, set `stopLastGroupWhenIdle` to `true` on the sequencer. + +As an aside; previously, containers in each group were added to a bean of type `Collection` with the bean name being the `containerGroup`. +These collections are now deprecated in favor of beans of type `ContainerGroup` with a bean name that is the group name, suffixed with `.group`; in the example above, there would be 2 beans `g1.group` and `g2.group`. +The `Collection` beans will be removed in a future release. + +##### Using `KafkaTemplate` to Receive + +This section covers how to use `KafkaTemplate` to receive messages. + +Starting with version 2.8, the template has four `receive()` methods: + +``` +ConsumerRecord receive(String topic, int partition, long offset); + +ConsumerRecord receive(String topic, int partition, long offset, Duration pollTimeout); + +ConsumerRecords receive(Collection requested); + +ConsumerRecords receive(Collection requested, Duration pollTimeout); +``` + +As you can see, you need to know the partition and offset of the record(s) you need to retrieve; a new `Consumer` is created (and closed) for each operation. + +With the last two methods, each record is retrieved individually and the results assembled into a `ConsumerRecords` object. +When creating the `TopicPartitionOffset` s for the request, only positive, absolute offsets are supported. + +#### 4.1.5. Listener Container Properties + +| Property | Default | Description | +|---------------------------------------------------------------|-------------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| | 1 | The number of records before committing pending offsets when the `ackMode` is `COUNT` or `COUNT_TIME`. | +| wrapping the message listener, invoked in order. | +| . | +| `] | +| | 5000 | The time in milliseconds after which pending offsets are committed when the `ackMode` is `TIME` or `COUNT_TIME`. | +| | LATEST\_ONLY \_NO\_TX | Whether or not to commit the initial position on assignment; by default, the initial offset will only be committed if the `ConsumerConfig.AUTO_OFFSET_RESET_CONFIG` is `latest` and it won’t run in a transaction even if there is a transaction manager present.
See the javadocs for `ContainerProperties.AssignmentCommitOption` for more information about the available options. | +|| `null` | When not null, a `Duration` to sleep between polls when an `AuthenticationException` or `AuthorizationException` is thrown by the Kafka client.
When null, such exceptions are considered fatal and the container will stop. | +| | A prefix for the `client.id` consumer property.
Overrides the consumer factory `client.id` property; in a concurrent container, `-n` is added as a suffix for each consumer instance. | +| | false | Set to `true` to always check for a `DeserializationException` header when a `null` `key` is received.
Useful when the consumer code cannot determine that an `ErrorHandlingDeserializer` has been configured, such as when using a delegating deserializer. | +| | false | Set to `true` to always check for a `DeserializationException` header when a `null` `value` is received.
Useful when the consumer code cannot determine that an `ErrorHandlingDeserializer` has been configured, such as when using a delegating deserializer. | +| | `null` | When present and `syncCommits` is `false` a callback invoked after the commit completes. | +| | DEBUG | The logging level for logs pertaining to committing offsets. | +| . | +| | 30s | The time to wait for the consumer to start before logging an error; this might happen if, say, you use a task executor with insufficient threads. | +| |`SimpleAsyncTaskExecutor`| A task executor to run the consumer threads.
The default executor creates threads named `-C-n`; with the `KafkaMessageListenerContainer`, the name is the bean name; with the `ConcurrentMessageListenerContainer` the name is the bean name suffixed with `-n` where n is incremented for each child container. | +| . | +| . | +| for more information.| +| | `null` | Overrides the consumer `group.id` property; automatically set by the `@KafkaListener` `id` or `groupId` property. | +| | 5.0 | Multiplier for `idleEventInterval` that is applied before any records are received.
After a record is received, the multiplier is no longer applied.
Available since version 2.8. | +| | 0 | Used to slow down deliveries by sleeping the thread between polls.
The time to process a batch of records plus this value must be less than the `max.poll.interval.ms` consumer property. | +| .
Also see `idleBeforeDataMultiplier`. | +|. | +| | None | Used to override any arbitrary consumer properties configured on the consumer factory. | +| | `false` | Set to true to log at INFO level all container properties. | +| | `null` | The message listener. | +| | `true` | Whether or not to maintain Micrometer timers for the consumer threads. | +| are not present on the broker. | +| | 30s | How often to check the state of the consumer threads for `NonResponsiveConsumerEvent` s.
See `noPollThreshold` and `pollTimeout`. | +| | 3.0 | Multiplied by `pollTimeOut` to determine whether to publish a `NonResponsiveConsumerEvent`.
See `monitorInterval`. | +| `. | +| `. | +| |`ThreadPoolTaskScheduler`| A scheduler on which to run the consumer monitor task. | +| ` method until all consumers stop and before publishing the container stopped event. | +| for more information. | +| | `false` | When the container is stopped, stop processing after the current record instead of after processing all the records from the previous poll. | +| . | +| | `null` | The timeout to use when `syncCommits` is `true`.
When not set, the container will attempt to determine the `default.api.timeout.ms` consumer property and use that; otherwise it will use 60 seconds. | +| | `true` | Whether to use sync or async commits for offsets; see `commitCallback`. | +| | n/a | The configured topics, topic pattern or explicitly assigned topics/partitions.
Mutually exclusive; at least one must be provided; enforced by `ContainerProperties` constructors. | +| . | + +| Property | Default | Description | +|-------------------------------------------------------------|-------------------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| |`DefaultAfterRollbackProcessor`| An `AfterRollbackProcessor` to invoke after a transaction is rolled back. | +|| application context | The event publisher. | +| | See desc. | Deprecated - see `commonErrorHandler`. | +| | `null` | Set a `BatchInterceptor` to call before invoking the batch listener; does not apply to record listeners.
Also see `interceptBeforeTx`. | +| | bean name | The bean name of the container; suffixed with `-n` for child containers. | +| .| +| | `ContainerProperties` | The container properties instance. | +| | See desc. | Deprecated - see `commonErrorHandler`. | +| | See desc. | Deprecated - see `commonErrorHandler`. | +| | See desc. | The `containerProperties.groupId`, if present, otherwise the `group.id` property from the consumer factory. | +| | `true` | Determines whether the `recordInterceptor` is called before or after a transaction starts. | +| | See desc. | The bean name for user-configured containers or the `id` attribute of `@KafkaListener` s. | +| | True if a consumer pause has been requested. | +| | `null` | Set a `RecordInterceptor` to call before invoking the record listener; does not apply to batch listeners.
Also see `interceptBeforeTx`. | +| | 30s | When the `missingTopicsFatal` container property is `true`, how long to wait, in seconds, for the `describeTopics` operation to complete. | + +| Property | Default | Description | +|-------------------------------------------------------------------|-----------|----------------------------------------------------------------------------------------------| +| . | +|. | +| | `null` |Used by the concurrent container to give each child container’s consumer a unique `client.id`.| +| | n/a | True if pause has been requested and the consumer has actually paused. | + +| Property | Default | Description | +|-------------------------------------------------------------------|-----------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| | `true` | Set to false to suppress adding a suffix to the `client.id` consumer property, when the `concurrency` is only 1. | +| . | +|, keyed by the child container’s consumer’s `client.id` property.| +| | 1 | The number of child `KafkaMessageListenerContainer` s to manage. | +| | n/a | True if pause has been requested and all child containers' consumer has actually paused. | +| | n/a | A reference to all child `KafkaMessageListenerContainer` s. | + +#### 4.1.6. Application Events + +The following Spring application events are published by listener containers and their consumers: + +* `ConsumerStartingEvent` - published when a consumer thread is first started, before it starts polling. + +* `ConsumerStartedEvent` - published when a consumer is about to start polling. + +* `ConsumerFailedToStartEvent` - published if no `ConsumerStartingEvent` is published within the `consumerStartTimeout` container property. + This event might signal that the configured task executor has insufficient threads to support the containers it is used in and their concurrency. + An error message is also logged when this condition occurs. + +* `ListenerContainerIdleEvent`: published when no messages have been received in `idleInterval` (if configured). + +* `ListenerContainerNoLongerIdleEvent`: published when a record is consumed after previously publishing a `ListenerContainerIdleEvent`. + +* `ListenerContainerPartitionIdleEvent`: published when no messages have been received from that partition in `idlePartitionEventInterval` (if configured). + +* `ListenerContainerPartitionNoLongerIdleEvent`: published when a record is consumed from a partition that has previously published a `ListenerContainerPartitionIdleEvent`. + +* `NonResponsiveConsumerEvent`: published when the consumer appears to be blocked in the `poll` method. + +* `ConsumerPartitionPausedEvent`: published by each consumer when a partition is paused. + +* `ConsumerPartitionResumedEvent`: published by each consumer when a partition is resumed. + +* `ConsumerPausedEvent`: published by each consumer when the container is paused. + +* `ConsumerResumedEvent`: published by each consumer when the container is resumed. + +* `ConsumerStoppingEvent`: published by each consumer just before stopping. + +* `ConsumerStoppedEvent`: published after the consumer is closed. + See [Thread Safety](#thread-safety). + +* `ContainerStoppedEvent`: published when all consumers have stopped. + +| |By default, the application context’s event multicaster invokes event listeners on the calling thread.
If you change the multicaster to use an async executor, you must not invoke any `Consumer` methods when the event contains a reference to the consumer.| +|---|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +The `ListenerContainerIdleEvent` has the following properties: + +* `source`: The listener container instance that published the event. + +* `container`: The listener container or the parent listener container, if the source container is a child. + +* `id`: The listener ID (or container bean name). + +* `idleTime`: The time the container had been idle when the event was published. + +* `topicPartitions`: The topics and partitions that the container was assigned at the time the event was generated. + +* `consumer`: A reference to the Kafka `Consumer` object. + For example, if the consumer’s `pause()` method was previously called, it can `resume()` when the event is received. + +* `paused`: Whether the container is currently paused. + See [Pausing and Resuming Listener Containers](#pause-resume) for more information. + +The `ListenerContainerNoLongerIdleEvent` has the same properties, except `idleTime` and `paused`. + +The `ListenerContainerPartitionIdleEvent` has the following properties: + +* `source`: The listener container instance that published the event. + +* `container`: The listener container or the parent listener container, if the source container is a child. + +* `id`: The listener ID (or container bean name). + +* `idleTime`: The time partition consumption had been idle when the event was published. + +* `topicPartition`: The topic and partition that triggered the event. + +* `consumer`: A reference to the Kafka `Consumer` object. + For example, if the consumer’s `pause()` method was previously called, it can `resume()` when the event is received. + +* `paused`: Whether that partition consumption is currently paused for that consumer. + See [Pausing and Resuming Listener Containers](#pause-resume) for more information. + +The `ListenerContainerPartitionNoLongerIdleEvent` has the same properties, except `idleTime` and `paused`. + +The `NonResponsiveConsumerEvent` has the following properties: + +* `source`: The listener container instance that published the event. + +* `container`: The listener container or the parent listener container, if the source container is a child. + +* `id`: The listener ID (or container bean name). + +* `timeSinceLastPoll`: The time just before the container last called `poll()`. + +* `topicPartitions`: The topics and partitions that the container was assigned at the time the event was generated. + +* `consumer`: A reference to the Kafka `Consumer` object. + For example, if the consumer’s `pause()` method was previously called, it can `resume()` when the event is received. + +* `paused`: Whether the container is currently paused. + See [Pausing and Resuming Listener Containers](#pause-resume) for more information. + +The `ConsumerPausedEvent`, `ConsumerResumedEvent`, and `ConsumerStopping` events have the following properties: + +* `source`: The listener container instance that published the event. + +* `container`: The listener container or the parent listener container, if the source container is a child. + +* `partitions`: The `TopicPartition` instances involved. + +The `ConsumerPartitionPausedEvent`, `ConsumerPartitionResumedEvent` events have the following properties: + +* `source`: The listener container instance that published the event. + +* `container`: The listener container or the parent listener container, if the source container is a child. + +* `partition`: The `TopicPartition` instance involved. + +The `ConsumerStartingEvent`, `ConsumerStartingEvent`, `ConsumerFailedToStartEvent`, `ConsumerStoppedEvent` and `ContainerStoppedEvent` events have the following properties: + +* `source`: The listener container instance that published the event. + +* `container`: The listener container or the parent listener container, if the source container is a child. + +All containers (whether a child or a parent) publish `ContainerStoppedEvent`. +For a parent container, the source and container properties are identical. + +In addition, the `ConsumerStoppedEvent` has the following additional property: + +* `reason` + + * `NORMAL` - the consumer stopped normally (container was stopped). + + * `ERROR` - a `java.lang.Error` was thrown. + + * `FENCED` - the transactional producer was fenced and the `stopContainerWhenFenced` container property is `true`. + + * `AUTH` - an `AuthenticationException` or `AuthorizationException` was thrown and the `authExceptionRetryInterval` is not configured. + + * `NO_OFFSET` - there is no offset for a partition and the `auto.offset.reset` policy is `none`. + +You can use this event to restart the container after such a condition: + +``` +if (event.getReason.equals(Reason.FENCED)) { + event.getSource(MessageListenerContainer.class).start(); +} +``` + +##### Detecting Idle and Non-Responsive Consumers + +While efficient, one problem with asynchronous consumers is detecting when they are idle. +You might want to take some action if no messages arrive for some period of time. + +You can configure the listener container to publish a `ListenerContainerIdleEvent` when some time passes with no message delivery. +While the container is idle, an event is published every `idleEventInterval` milliseconds. + +To configure this feature, set the `idleEventInterval` on the container. +The following example shows how to do so: + +``` +@Bean +public KafkaMessageListenerContainer(ConsumerFactory consumerFactory) { + ContainerProperties containerProps = new ContainerProperties("topic1", "topic2"); + ... + containerProps.setIdleEventInterval(60000L); + ... + KafkaMessageListenerContainer container = new KafKaMessageListenerContainer<>(...); + return container; +} +``` + +The following example shows how to set the `idleEventInterval` for a `@KafkaListener`: + +``` +@Bean +public ConcurrentKafkaListenerContainerFactory kafkaListenerContainerFactory() { + ConcurrentKafkaListenerContainerFactory factory = + new ConcurrentKafkaListenerContainerFactory<>(); + ... + factory.getContainerProperties().setIdleEventInterval(60000L); + ... + return factory; +} +``` + +In each of these cases, an event is published once per minute while the container is idle. + +If, for some reason, the consumer `poll()` method does not exit, no messages are received and idle events cannot be generated (this was a problem with early versions of the `kafka-clients` when the broker wasn’t reachable). +In this case, the container publishes a `NonResponsiveConsumerEvent` if a poll does not return within `3x` the `pollTimeout` property. +By default, this check is performed once every 30 seconds in each container. +You can modify this behavior by setting the `monitorInterval` (default 30 seconds) and `noPollThreshold` (default 3.0) properties in the `ContainerProperties` when configuring the listener container. +The `noPollThreshold` should be greater than `1.0` to avoid getting spurious events due to a race condition. +Receiving such an event lets you stop the containers, thus waking the consumer so that it can stop. + +Starting with version 2.6.2, if a container has published a `ListenerContainerIdleEvent`, it will publish a `ListenerContainerNoLongerIdleEvent` when a record is subsequently received. + +##### Event Consumption + +You can capture these events by implementing `ApplicationListener` — either a general listener or one narrowed to only receive this specific event. +You can also use `@EventListener`, introduced in Spring Framework 4.2. + +The next example combines `@KafkaListener` and `@EventListener` into a single class. +You should understand that the application listener gets events for all containers, so you may need to check the listener ID if you want to take specific action based on which container is idle. +You can also use the `@EventListener` `condition` for this purpose. + +See [Application Events](#events) for information about event properties. + +The event is normally published on the consumer thread, so it is safe to interact with the `Consumer` object. + +The following example uses both `@KafkaListener` and `@EventListener`: + +``` +public class Listener { + + @KafkaListener(id = "qux", topics = "annotated") + public void listen4(@Payload String foo, Acknowledgment ack) { + ... + } + + @EventListener(condition = "event.listenerId.startsWith('qux-')") + public void eventHandler(ListenerContainerIdleEvent event) { + ... + } + +} +``` + +| |Event listeners see events for all containers.
Consequently, in the preceding example, we narrow the events received based on the listener ID.
Since containers created for the `@KafkaListener` support concurrency, the actual containers are named `id-n` where the `n` is a unique value for each instance to support the concurrency.
That is why we use `startsWith` in the condition.| +|---|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +| |If you wish to use the idle event to stop the lister container, you should not call `container.stop()` on the thread that calls the listener.
Doing so causes delays and unnecessary log messages.
Instead, you should hand off the event to a different thread that can then stop the container.
Also, you should not `stop()` the container instance if it is a child container.
You should stop the concurrent container instead.| +|---|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +###### Current Positions when Idle + +Note that you can obtain the current positions when idle is detected by implementing `ConsumerSeekAware` in your listener. +See `onIdleContainer()` in [Seeking to a Specific Offset](#seek). + +#### 4.1.7. Topic/Partition Initial Offset + +There are several ways to set the initial offset for a partition. + +When manually assigning partitions, you can set the initial offset (if desired) in the configured `TopicPartitionOffset` arguments (see [Message Listener Containers](#message-listener-container)). +You can also seek to a specific offset at any time. + +When you use group management where the broker assigns partitions: + +* For a new `group.id`, the initial offset is determined by the `auto.offset.reset` consumer property (`earliest` or `latest`). + +* For an existing group ID, the initial offset is the current offset for that group ID. + You can, however, seek to a specific offset during initialization (or at any time thereafter). + +#### 4.1.8. Seeking to a Specific Offset + +In order to seek, your listener must implement `ConsumerSeekAware`, which has the following methods: + +``` +void registerSeekCallback(ConsumerSeekCallback callback); + +void onPartitionsAssigned(Map assignments, ConsumerSeekCallback callback); + +void onPartitionsRevoked(Collection partitions) + +void onIdleContainer(Map assignments, ConsumerSeekCallback callback); +``` + +The `registerSeekCallback` is called when the container is started and whenever partitions are assigned. +You should use this callback when seeking at some arbitrary time after initialization. +You should save a reference to the callback. +If you use the same listener in multiple containers (or in a `ConcurrentMessageListenerContainer`), you should store the callback in a `ThreadLocal` or some other structure keyed by the listener `Thread`. + +When using group management, `onPartitionsAssigned` is called when partitions are assigned. +You can use this method, for example, for setting initial offsets for the partitions, by calling the callback. +You can also use this method to associate this thread’s callback with the assigned partitions (see the example below). +You must use the callback argument, not the one passed into `registerSeekCallback`. +Starting with version 2.5.5, this method is called, even when using [manual partition assignment](#manual-assignment). + +`onPartitionsRevoked` is called when the container is stopped or Kafka revokes assignments. +You should discard this thread’s callback and remove any associations to the revoked partitions. + +The callback has the following methods: + +``` +void seek(String topic, int partition, long offset); + +void seekToBeginning(String topic, int partition); + +void seekToBeginning(Collection= partitions); + +void seekToEnd(String topic, int partition); + +void seekToEnd(Collection= partitions); + +void seekRelative(String topic, int partition, long offset, boolean toCurrent); + +void seekToTimestamp(String topic, int partition, long timestamp); + +void seekToTimestamp(Collection topicPartitions, long timestamp); +``` + +`seekRelative` was added in version 2.3, to perform relative seeks. + +* `offset` negative and `toCurrent` `false` - seek relative to the end of the partition. + +* `offset` positive and `toCurrent` `false` - seek relative to the beginning of the partition. + +* `offset` negative and `toCurrent` `true` - seek relative to the current position (rewind). + +* `offset` positive and `toCurrent` `true` - seek relative to the current position (fast forward). + +The `seekToTimestamp` methods were also added in version 2.3. + +| |When seeking to the same timestamp for multiple partitions in the `onIdleContainer` or `onPartitionsAssigned` methods, the second method is preferred because it is more efficient to find the offsets for the timestamps in a single call to the consumer’s `offsetsForTimes` method.
When called from other locations, the container will gather all timestamp seek requests and make one call to `offsetsForTimes`.| +|---|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +You can also perform seek operations from `onIdleContainer()` when an idle container is detected. +See [Detecting Idle and Non-Responsive Consumers](#idle-containers) for how to enable idle container detection. + +| |The `seekToBeginning` method that accepts a collection is useful, for example, when processing a compacted topic and you wish to seek to the beginning every time the application is started:| +|---|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +``` +public class MyListener implements ConsumerSeekAware { + +... + + @Override + public void onPartitionsAssigned(Map assignments, ConsumerSeekCallback callback) { + callback.seekToBeginning(assignments.keySet()); + } + +} +``` + +To arbitrarily seek at runtime, use the callback reference from the `registerSeekCallback` for the appropriate thread. + +Here is a trivial Spring Boot application that demonstrates how to use the callback; it sends 10 records to the topic; hitting `` in the console causes all partitions to seek to the beginning. + +``` +@SpringBootApplication +public class SeekExampleApplication { + + public static void main(String[] args) { + SpringApplication.run(SeekExampleApplication.class, args); + } + + @Bean + public ApplicationRunner runner(Listener listener, KafkaTemplate template) { + return args -> { + IntStream.range(0, 10).forEach(i -> template.send( + new ProducerRecord<>("seekExample", i % 3, "foo", "bar"))); + while (true) { + System.in.read(); + listener.seekToStart(); + } + }; + } + + @Bean + public NewTopic topic() { + return new NewTopic("seekExample", 3, (short) 1); + } + +} + +@Component +class Listener implements ConsumerSeekAware { + + private static final Logger logger = LoggerFactory.getLogger(Listener.class); + + private final ThreadLocal callbackForThread = new ThreadLocal<>(); + + private final Map callbacks = new ConcurrentHashMap<>(); + + @Override + public void registerSeekCallback(ConsumerSeekCallback callback) { + this.callbackForThread.set(callback); + } + + @Override + public void onPartitionsAssigned(Map assignments, ConsumerSeekCallback callback) { + assignments.keySet().forEach(tp -> this.callbacks.put(tp, this.callbackForThread.get())); + } + + @Override + public void onPartitionsRevoked(Collection partitions) { + partitions.forEach(tp -> this.callbacks.remove(tp)); + this.callbackForThread.remove(); + } + + @Override + public void onIdleContainer(Map assignments, ConsumerSeekCallback callback) { + } + + @KafkaListener(id = "seekExample", topics = "seekExample", concurrency = "3") + public void listen(ConsumerRecord in) { + logger.info(in.toString()); + } + + public void seekToStart() { + this.callbacks.forEach((tp, callback) -> callback.seekToBeginning(tp.topic(), tp.partition())); + } + +} +``` + +To make things simpler, version 2.3 added the `AbstractConsumerSeekAware` class, which keeps track of which callback is to be used for a topic/partition. +The following example shows how to seek to the last record processed, in each partition, each time the container goes idle. +It also has methods that allow arbitrary external calls to rewind partitions by one record. + +``` +public class SeekToLastOnIdleListener extends AbstractConsumerSeekAware { + + @KafkaListener(id = "seekOnIdle", topics = "seekOnIdle") + public void listen(String in) { + ... + } + + @Override + public void onIdleContainer(Map assignments, + ConsumerSeekCallback callback) { + + assignments.keySet().forEach(tp -> callback.seekRelative(tp.topic(), tp.partition(), -1, true)); + } + + /** + * Rewind all partitions one record. + */ + public void rewindAllOneRecord() { + getSeekCallbacks() + .forEach((tp, callback) -> + callback.seekRelative(tp.topic(), tp.partition(), -1, true)); + } + + /** + * Rewind one partition one record. + */ + public void rewindOnePartitionOneRecord(String topic, int partition) { + getSeekCallbackFor(new org.apache.kafka.common.TopicPartition(topic, partition)) + .seekRelative(topic, partition, -1, true); + } + +} +``` + +Version 2.6 added convenience methods to the abstract class: + +* `seekToBeginning()` - seeks all assigned partitions to the beginning + +* `seekToEnd()` - seeks all assigned partitions to the end + +* `seekToTimestamp(long time)` - seeks all assigned partitions to the offset represented by that timestamp. + +Example: + +``` +public class MyListener extends AbstractConsumerSeekAware { + + @KafkaListener(...) + void listn(...) { + ... + } +} + +public class SomeOtherBean { + + MyListener listener; + + ... + + void someMethod() { + this.listener.seekToTimestamp(System.currentTimeMillis - 60_000); + } + +} +``` + +#### 4.1.9. Container factory + +As discussed in [`@KafkaListener` Annotation](#kafka-listener-annotation), a `ConcurrentKafkaListenerContainerFactory` is used to create containers for annotated methods. + +Starting with version 2.2, you can use the same factory to create any `ConcurrentMessageListenerContainer`. +This might be useful if you want to create several containers with similar properties or you wish to use some externally configured factory, such as the one provided by Spring Boot auto-configuration. +Once the container is created, you can further modify its properties, many of which are set by using `container.getContainerProperties()`. +The following example configures a `ConcurrentMessageListenerContainer`: + +``` +@Bean +public ConcurrentMessageListenerContainer( + ConcurrentKafkaListenerContainerFactory factory) { + + ConcurrentMessageListenerContainer container = + factory.createContainer("topic1", "topic2"); + container.setMessageListener(m -> { ... } ); + return container; +} +``` + +| |Containers created this way are not added to the endpoint registry.
They should be created as `@Bean` definitions so that they are registered with the application context.| +|---|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +Starting with version 2.3.4, you can add a `ContainerCustomizer` to the factory to further configure each container after it has been created and configured. + +``` +@Bean +public KafkaListenerContainerFactory kafkaListenerContainerFactory() { + ConcurrentKafkaListenerContainerFactory factory = + new ConcurrentKafkaListenerContainerFactory<>(); + ... + factory.setContainerCustomizer(container -> { /* customize the container */ }); + return factory; +} +``` + +#### 4.1.10. Thread Safety + +When using a concurrent message listener container, a single listener instance is invoked on all consumer threads. +Listeners, therefore, need to be thread-safe, and it is preferable to use stateless listeners. +If it is not possible to make your listener thread-safe or adding synchronization would significantly reduce the benefit of adding concurrency, you can use one of a few techniques: + +* Use `n` containers with `concurrency=1` with a prototype scoped `MessageListener` bean so that each container gets its own instance (this is not possible when using `@KafkaListener`). + +* Keep the state in `ThreadLocal` instances. + +* Have the singleton listener delegate to a bean that is declared in `SimpleThreadScope` (or a similar scope). + +To facilitate cleaning up thread state (for the second and third items in the preceding list), starting with version 2.2, the listener container publishes a `ConsumerStoppedEvent` when each thread exits. +You can consume these events with an `ApplicationListener` or `@EventListener` method to remove `ThreadLocal` instances or `remove()` thread-scoped beans from the scope. +Note that `SimpleThreadScope` does not destroy beans that have a destruction interface (such as `DisposableBean`), so you should `destroy()` the instance yourself. + +| |By default, the application context’s event multicaster invokes event listeners on the calling thread.
If you change the multicaster to use an async executor, thread cleanup is not effective.| +|---|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +#### 4.1.11. Monitoring + +##### Monitoring Listener Performance + +Starting with version 2.3, the listener container will automatically create and update Micrometer `Timer` s for the listener, if `Micrometer` is detected on the class path, and a single `MeterRegistry` is present in the application context. +The timers can be disabled by setting the `ContainerProperty` `micrometerEnabled` to `false`. + +Two timers are maintained - one for successful calls to the listener and one for failures. + +The timers are named `spring.kafka.listener` and have the following tags: + +* `name` : (container bean name) + +* `result` : `success` or `failure` + +* `exception` : `none` or `ListenerExecutionFailedException` + +You can add additional tags using the `ContainerProperties` `micrometerTags` property. + +| |With the concurrent container, timers are created for each thread and the `name` tag is suffixed with `-n` where n is `0` to `concurrency-1`.| +|---|---------------------------------------------------------------------------------------------------------------------------------------------| + +##### Monitoring KafkaTemplate Performance + +Starting with version 2.5, the template will automatically create and update Micrometer `Timer` s for send operations, if `Micrometer` is detected on the class path, and a single `MeterRegistry` is present in the application context. +The timers can be disabled by setting the template’s `micrometerEnabled` property to `false`. + +Two timers are maintained - one for successful calls to the listener and one for failures. + +The timers are named `spring.kafka.template` and have the following tags: + +* `name` : (template bean name) + +* `result` : `success` or `failure` + +* `exception` : `none` or the exception class name for failures + +You can add additional tags using the template’s `micrometerTags` property. + +##### Micrometer Native Metrics + +Starting with version 2.5, the framework provides [Factory Listeners](#factory-listeners) to manage a Micrometer `KafkaClientMetrics` instance whenever producers and consumers are created and closed. + +To enable this feature, simply add the listeners to your producer and consumer factories: + +``` +@Bean +public ConsumerFactory myConsumerFactory() { + Map configs = consumerConfigs(); + ... + DefaultKafkaConsumerFactory cf = new DefaultKafkaConsumerFactory<>(configs); + ... + cf.addListener(new MicrometerConsumerListener(meterRegistry(), + Collections.singletonList(new ImmutableTag("customTag", "customTagValue")))); + ... + return cf; +} + +@Bean +public ProducerFactory myProducerFactory() { + Map configs = producerConfigs(); + configs.put(ProducerConfig.CLIENT_ID_CONFIG, "myClientId"); + ... + DefaultKafkaProducerFactory pf = new DefaultKafkaProducerFactory<>(configs); + ... + pf.addListener(new MicrometerProducerListener(meterRegistry(), + Collections.singletonList(new ImmutableTag("customTag", "customTagValue")))); + ... + return pf; +} +``` + +The consumer/producer `id` passed to the listener is added to the meter’s tags with tag name `spring.id`. + +An example of obtaining one of the Kafka metrics + +``` +double count = this.meterRegistry.get("kafka.producer.node.incoming.byte.total") + .tag("customTag", "customTagValue") + .tag("spring.id", "myProducerFactory.myClientId-1") + .functionCounter() + .count() +``` + +A similar listener is provided for the `StreamsBuilderFactoryBean` - see [KafkaStreams Micrometer Support](#streams-micrometer). + +#### 4.1.12. Transactions + +This section describes how Spring for Apache Kafka supports transactions. + +##### Overview + +The 0.11.0.0 client library added support for transactions. +Spring for Apache Kafka adds support in the following ways: + +* `KafkaTransactionManager`: Used with normal Spring transaction support (`@Transactional`, `TransactionTemplate` etc). + +* Transactional `KafkaMessageListenerContainer` + +* Local transactions with `KafkaTemplate` + +* Transaction synchronization with other transaction managers + +Transactions are enabled by providing the `DefaultKafkaProducerFactory` with a `transactionIdPrefix`. +In that case, instead of managing a single shared `Producer`, the factory maintains a cache of transactional producers. +When the user calls `close()` on a producer, it is returned to the cache for reuse instead of actually being closed. +The `transactional.id` property of each producer is `transactionIdPrefix` + `n`, where `n` starts with `0` and is incremented for each new producer, unless the transaction is started by a listener container with a record-based listener. +In that case, the `transactional.id` is `...`. +This is to properly support fencing zombies, [as described here](https://www.confluent.io/blog/transactions-apache-kafka/). +This new behavior was added in versions 1.3.7, 2.0.6, 2.1.10, and 2.2.0. +If you wish to revert to the previous behavior, you can set the `producerPerConsumerPartition` property on the `DefaultKafkaProducerFactory` to `false`. + +| |While transactions are supported with batch listeners, by default, zombie fencing is not supported because a batch may contain records from multiple topics or partitions.
However, starting with version 2.3.2, zombie fencing is supported if you set the container property `subBatchPerPartition` to true.
In that case, the batch listener is invoked once per partition received from the last poll, as if each poll only returned records for a single partition.
This is `true` by default since version 2.5 when transactions are enabled with `EOSMode.ALPHA`; set it to `false` if you are using transactions but are not concerned about zombie fencing.
Also see [Exactly Once Semantics](#exactly-once).| +|---|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +Also see [`transactionIdPrefix`](#transaction-id-prefix). + +With Spring Boot, it is only necessary to set the `spring.kafka.producer.transaction-id-prefix` property - Boot will automatically configure a `KafkaTransactionManager` bean and wire it into the listener container. + +| |Starting with version 2.5.8, you can now configure the `maxAge` property on the producer factory.
This is useful when using transactional producers that might lay idle for the broker’s `transactional.id.expiration.ms`.
With current `kafka-clients`, this can cause a `ProducerFencedException` without a rebalance.
By setting the `maxAge` to less than `transactional.id.expiration.ms`, the factory will refresh the producer if it is past it’s max age.| +|---|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +##### Using `KafkaTransactionManager` + +The `KafkaTransactionManager` is an implementation of Spring Framework’s `PlatformTransactionManager`. +It is provided with a reference to the producer factory in its constructor. +If you provide a custom producer factory, it must support transactions. +See `ProducerFactory.transactionCapable()`. + +You can use the `KafkaTransactionManager` with normal Spring transaction support (`@Transactional`, `TransactionTemplate`, and others). +If a transaction is active, any `KafkaTemplate` operations performed within the scope of the transaction use the transaction’s `Producer`. +The manager commits or rolls back the transaction, depending on success or failure. +You must configure the `KafkaTemplate` to use the same `ProducerFactory` as the transaction manager. + +##### Transaction Synchronization + +This section refers to producer-only transactions (transactions not started by a listener container); see [Using Consumer-Initiated Transactions](#container-transaction-manager) for information about chaining transactions when the container starts the transaction. + +If you want to send records to kafka and perform some database updates, you can use normal Spring transaction management with, say, a `DataSourceTransactionManager`. + +``` +@Transactional +public void process(List things) { + things.forEach(thing -> this.kafkaTemplate.send("topic", thing)); + updateDb(things); +} +``` + +The interceptor for the `@Transactional` annotation starts the transaction and the `KafkaTemplate` will synchronize a transaction with that transaction manager; each send will participate in that transaction. +When the method exits, the database transaction will commit followed by the Kafka transaction. +If you wish the commits to be performed in the reverse order (Kafka first), use nested `@Transactional` methods, with the outer method configured to use the `DataSourceTransactionManager`, and the inner method configured to use the `KafkaTransactionManager`. + +See [[ex-jdbc-sync]](#ex-jdbc-sync) for examples of an application that synchronizes JDBC and Kafka transactions in Kafka-first or DB-first configurations. + +| |Starting with versions 2.5.17, 2.6.12, 2.7.9 and 2.8.0, if the commit fails on the synchronized transaction (after the primary transaction has committed), the exception will be thrown to the caller.
Previously, this was silently ignored (logged at debug).
Applications should take remedial action, if necessary, to compensate for the committed primary transaction.| +|---|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +##### Using Consumer-Initiated Transactions + +The `ChainedKafkaTransactionManager` is now deprecated, since version 2.7; see the javadocs for its super class `ChainedTransactionManager` for more information. +Instead, use a `KafkaTransactionManager` in the container to start the Kafka transaction and annotate the listener method with `@Transactional` to start the other transaction. + +See [[ex-jdbc-sync]](#ex-jdbc-sync) for an example application that chains JDBC and Kafka transactions. + +##### `KafkaTemplate` Local Transactions + +You can use the `KafkaTemplate` to execute a series of operations within a local transaction. +The following example shows how to do so: + +``` +boolean result = template.executeInTransaction(t -> { + t.sendDefault("thing1", "thing2"); + t.sendDefault("cat", "hat"); + return true; +}); +``` + +The argument in the callback is the template itself (`this`). +If the callback exits normally, the transaction is committed. +If an exception is thrown, the transaction is rolled back. + +| |If there is a `KafkaTransactionManager` (or synchronized) transaction in process, it is not used.
Instead, a new "nested" transaction is used.| +|---|--------------------------------------------------------------------------------------------------------------------------------------------------| + +##### `transactionIdPrefix` + +As mentioned in [the overview](#transactions), the producer factory is configured with this property to build the producer `transactional.id` property. +There is a dichotomy when specifying this property in that, when running multiple instances of the application with `EOSMode.ALPHA`, it must be the same on all instances to satisfy fencing zombies (also mentioned in the overview) when producing records on a listener container thread. +However, when producing records using transactions that are **not** started by a listener container, the prefix has to be different on each instance. +Version 2.3, makes this simpler to configure, especially in a Spring Boot application. +In previous versions, you had to create two producer factories and `KafkaTemplate` s - one for producing records on a listener container thread and one for stand-alone transactions started by `kafkaTemplate.executeInTransaction()` or by a transaction interceptor on a `@Transactional` method. + +Now, you can override the factory’s `transactionalIdPrefix` on the `KafkaTemplate` and the `KafkaTransactionManager`. + +When using a transaction manager and template for a listener container, you would normally leave this to default to the producer factory’s property. +This value should be the same for all application instances when using `EOSMode.ALPHA`. +With `EOSMode.BETA` it is no longer necessary to use the same `transactional.id`, even for consumer-initiated transactions; in fact, it must be unique on each instance the same as producer-initiated transactions. +For transactions started by the template (or the transaction manager for `@Transaction`) you should set the property on the template and transaction manager respectively. +This property must have a different value on each application instance. + +This problem (different rules for `transactional.id`) has been eliminated when `EOSMode.BETA` is being used (with broker versions \>= 2.5); see [Exactly Once Semantics](#exactly-once). + +##### `KafkaTemplate` Transactional and non-Transactional Publishing + +Normally, when a `KafkaTemplate` is transactional (configured with a transaction-capable producer factory), transactions are required. +The transaction can be started by a `TransactionTemplate`, a `@Transactional` method, calling `executeInTransaction`, or by a listener container, when configured with a `KafkaTransactionManager`. +Any attempt to use the template outside the scope of a transaction results in the template throwing an `IllegalStateException`. +Starting with version 2.4.3, you can set the template’s `allowNonTransactional` property to `true`. +In that case, the template will allow the operation to run without a transaction, by calling the `ProducerFactory` 's `createNonTransactionalProducer()` method; the producer will be cached, or thread-bound, as normal for reuse. +See [Using `DefaultKafkaProducerFactory`](#producer-factory). + +##### Transactions with Batch Listeners + +When a listener fails while transactions are being used, the `AfterRollbackProcessor` is invoked to take some action after the rollback occurs. +When using the default `AfterRollbackProcessor` with a record listener, seeks are performed so that the failed record will be redelivered. +With a batch listener, however, the whole batch will be redelivered because the framework doesn’t know which record in the batch failed. +See [After-rollback Processor](#after-rollback) for more information. + +When using a batch listener, version 2.4.2 introduced an alternative mechanism to deal with failures while processing a batch; the `BatchToRecordAdapter`. +When a container factory with `batchListener` set to true is configured with a `BatchToRecordAdapter`, the listener is invoked with one record at a time. +This enables error handling within the batch, while still making it possible to stop processing the entire batch, depending on the exception type. +A default `BatchToRecordAdapter` is provided, that can be configured with a standard `ConsumerRecordRecoverer` such as the `DeadLetterPublishingRecoverer`. +The following test case configuration snippet illustrates how to use this feature: + +``` +public static class TestListener { + + final List values = new ArrayList<>(); + + @KafkaListener(id = "batchRecordAdapter", topics = "test") + public void listen(String data) { + values.add(data); + if ("bar".equals(data)) { + throw new RuntimeException("reject partial"); + } + } + +} + +@Configuration +@EnableKafka +public static class Config { + + ConsumerRecord failed; + + @Bean + public TestListener test() { + return new TestListener(); + } + + @Bean + public ConsumerFactory consumerFactory() { + return mock(ConsumerFactory.class); + } + + @Bean + public ConcurrentKafkaListenerContainerFactory kafkaListenerContainerFactory() { + ConcurrentKafkaListenerContainerFactory factory = new ConcurrentKafkaListenerContainerFactory(); + factory.setConsumerFactory(consumerFactory()); + factory.setBatchListener(true); + factory.setBatchToRecordAdapter(new DefaultBatchToRecordAdapter<>((record, ex) -> { + this.failed = record; + })); + return factory; + } + +} +``` + +#### 4.1.13. Exactly Once Semantics + +You can provide a listener container with a `KafkaAwareTransactionManager` instance. +When so configured, the container starts a transaction before invoking the listener. +Any `KafkaTemplate` operations performed by the listener participate in the transaction. +If the listener successfully processes the record (or multiple records, when using a `BatchMessageListener`), the container sends the offset(s) to the transaction by using `producer.sendOffsetsToTransaction()`), before the transaction manager commits the transaction. +If the listener throws an exception, the transaction is rolled back and the consumer is repositioned so that the rolled-back record(s) can be retrieved on the next poll. +See [After-rollback Processor](#after-rollback) for more information and for handling records that repeatedly fail. + +Using transactions enables Exactly Once Semantics (EOS). + +This means that, for a `read→process-write` sequence, it is guaranteed that the **sequence** is completed exactly once. +(The read and process are have at least once semantics). + +Spring for Apache Kafka version 2.5 and later supports two EOS modes: + +* `ALPHA` - alias for `V1` (deprecated) + +* `BETA` - alias for `V2` (deprecated) + +* `V1` - aka `transactional.id` fencing (since version 0.11.0.0) + +* `V2` - aka fetch-offset-request fencing (since version 2.5) + +With mode `V1`, the producer is "fenced" if another instance with the same `transactional.id` is started. +Spring manages this by using a `Producer` for each `group.id/topic/partition`; when a rebalance occurs a new instance will use the same `transactional.id` and the old producer is fenced. + +With mode `V2`, it is not necessary to have a producer for each `group.id/topic/partition` because consumer metadata is sent along with the offsets to the transaction and the broker can determine if the producer is fenced using that information instead. + +Starting with version 2.6, the default `EOSMode` is `V2`. + +To configure the container to use mode `ALPHA`, set the container property `EOSMode` to `ALPHA`, to revert to the previous behavior. + +| |With `V2` (default), your brokers must be version 2.5 or later; `kafka-clients` version 3.0, the producer will no longer fall back to `V1`; if the broker does not support `V2`, an exception is thrown.
If your brokers are earlier than 2.5, you must set the `EOSMode` to `V1`, leave the `DefaultKafkaProducerFactory` `producerPerConsumerPartition` set to `true` and, if you are using a batch listener, you should set `subBatchPerPartition` to `true`.| +|---|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +When your brokers are upgraded to 2.5 or later, you should switch the mode to `V2`, but the number of producers will remain as before. +You can then do a rolling upgrade of your application with `producerPerConsumerPartition` set to `false` to reduce the number of producers; you should also no longer set the `subBatchPerPartition` container property. + +If your brokers are already 2.5 or newer, you should set the `DefaultKafkaProducerFactory` `producerPerConsumerPartition` property to `false`, to reduce the number of producers needed. + +| |When using `EOSMode.V2` with `producerPerConsumerPartition=false` the `transactional.id` must be unique across all application instances.| +|---|-----------------------------------------------------------------------------------------------------------------------------------------| + +When using `V2` mode, it is no longer necessary to set the `subBatchPerPartition` to `true`; it will default to `false` when the `EOSMode` is `V2`. + +Refer to [KIP-447](https://cwiki.apache.org/confluence/display/KAFKA/KIP-447%3A+Producer+scalability+for+exactly+once+semantics) for more information. + +`V1` and `V2` were previously `ALPHA` and `BETA`; they have been changed to align the framework with [KIP-732](https://cwiki.apache.org/confluence/display/KAFKA/KIP-732%3A+Deprecate+eos-alpha+and+replace+eos-beta+with+eos-v2). + +#### 4.1.14. Wiring Spring Beans into Producer/Consumer Interceptors + +Apache Kafka provides a mechanism to add interceptors to producers and consumers. +These objects are managed by Kafka, not Spring, and so normal Spring dependency injection won’t work for wiring in dependent Spring Beans. +However, you can manually wire in those dependencies using the interceptor `config()` method. +The following Spring Boot application shows how to do this by overriding boot’s default factories to add some dependent bean into the configuration properties. + +``` +@SpringBootApplication +public class Application { + + public static void main(String[] args) { + SpringApplication.run(Application.class, args); + } + + @Bean + public ConsumerFactory kafkaConsumerFactory(SomeBean someBean) { + Map consumerProperties = new HashMap<>(); + // consumerProperties.put(..., ...) + // ... + consumerProperties.put(ConsumerConfig.INTERCEPTOR_CLASSES_CONFIG, MyConsumerInterceptor.class.getName()); + consumerProperties.put("some.bean", someBean); + return new DefaultKafkaConsumerFactory<>(consumerProperties); + } + + @Bean + public ProducerFactory kafkaProducerFactory(SomeBean someBean) { + Map producerProperties = new HashMap<>(); + // producerProperties.put(..., ...) + // ... + Map producerProperties = properties.buildProducerProperties(); + producerProperties.put(ProducerConfig.INTERCEPTOR_CLASSES_CONFIG, MyProducerInterceptor.class.getName()); + producerProperties.put("some.bean", someBean); + DefaultKafkaProducerFactory factory = new DefaultKafkaProducerFactory<>(producerProperties); + return factory; + } + + @Bean + public SomeBean someBean() { + return new SomeBean(); + } + + @KafkaListener(id = "kgk897", topics = "kgh897") + public void listen(String in) { + System.out.println("Received " + in); + } + + @Bean + public ApplicationRunner runner(KafkaTemplate template) { + return args -> template.send("kgh897", "test"); + } + + @Bean + public NewTopic kRequests() { + return TopicBuilder.name("kgh897") + .partitions(1) + .replicas(1) + .build(); + } + +} +``` + +``` +public class SomeBean { + + public void someMethod(String what) { + System.out.println(what + " in my foo bean"); + } + +} +``` + +``` +public class MyProducerInterceptor implements ProducerInterceptor { + + private SomeBean bean; + + @Override + public void configure(Map configs) { + this.bean = (SomeBean) configs.get("some.bean"); + } + + @Override + public ProducerRecord onSend(ProducerRecord record) { + this.bean.someMethod("producer interceptor"); + return record; + } + + @Override + public void onAcknowledgement(RecordMetadata metadata, Exception exception) { + } + + @Override + public void close() { + } + +} +``` + +``` +public class MyConsumerInterceptor implements ConsumerInterceptor { + + private SomeBean bean; + + @Override + public void configure(Map configs) { + this.bean = (SomeBean) configs.get("some.bean"); + } + + @Override + public ConsumerRecords onConsume(ConsumerRecords records) { + this.bean.someMethod("consumer interceptor"); + return records; + } + + @Override + public void onCommit(Map offsets) { + } + + @Override + public void close() { + } + +} +``` + +Result: + +``` +producer interceptor in my foo bean +consumer interceptor in my foo bean +Received test +``` + +#### 4.1.15. Pausing and Resuming Listener Containers + +Version 2.1.3 added `pause()` and `resume()` methods to listener containers. +Previously, you could pause a consumer within a `ConsumerAwareMessageListener` and resume it by listening for a `ListenerContainerIdleEvent`, which provides access to the `Consumer` object. +While you could pause a consumer in an idle container by using an event listener, in some cases, this was not thread-safe, since there is no guarantee that the event listener is invoked on the consumer thread. +To safely pause and resume consumers, you should use the `pause` and `resume` methods on the listener containers. +A `pause()` takes effect just before the next `poll()`; a `resume()` takes effect just after the current `poll()` returns. +When a container is paused, it continues to `poll()` the consumer, avoiding a rebalance if group management is being used, but it does not retrieve any records. +See the Kafka documentation for more information. + +Starting with version 2.1.5, you can call `isPauseRequested()` to see if `pause()` has been called. +However, the consumers might not have actually paused yet.`isConsumerPaused()` returns true if all `Consumer` instances have actually paused. + +In addition (also since 2.1.5), `ConsumerPausedEvent` and `ConsumerResumedEvent` instances are published with the container as the `source` property and the `TopicPartition` instances involved in the `partitions` property. + +The following simple Spring Boot application demonstrates by using the container registry to get a reference to a `@KafkaListener` method’s container and pausing or resuming its consumers as well as receiving the corresponding events: + +``` +@SpringBootApplication +public class Application implements ApplicationListener { + + public static void main(String[] args) { + SpringApplication.run(Application.class, args).close(); + } + + @Override + public void onApplicationEvent(KafkaEvent event) { + System.out.println(event); + } + + @Bean + public ApplicationRunner runner(KafkaListenerEndpointRegistry registry, + KafkaTemplate template) { + return args -> { + template.send("pause.resume.topic", "thing1"); + Thread.sleep(10_000); + System.out.println("pausing"); + registry.getListenerContainer("pause.resume").pause(); + Thread.sleep(10_000); + template.send("pause.resume.topic", "thing2"); + Thread.sleep(10_000); + System.out.println("resuming"); + registry.getListenerContainer("pause.resume").resume(); + Thread.sleep(10_000); + }; + } + + @KafkaListener(id = "pause.resume", topics = "pause.resume.topic") + public void listen(String in) { + System.out.println(in); + } + + @Bean + public NewTopic topic() { + return TopicBuilder.name("pause.resume.topic") + .partitions(2) + .replicas(1) + .build(); + } + +} +``` + +The following listing shows the results of the preceding example: + +``` +partitions assigned: [pause.resume.topic-1, pause.resume.topic-0] +thing1 +pausing +ConsumerPausedEvent [partitions=[pause.resume.topic-1, pause.resume.topic-0]] +resuming +ConsumerResumedEvent [partitions=[pause.resume.topic-1, pause.resume.topic-0]] +thing2 +``` + +#### 4.1.16. Pausing and Resuming Partitions on Listener Containers + +Since version 2.7 you can pause and resume the consumption of specific partitions assigned to that consumer by using the `pausePartition(TopicPartition topicPartition)` and `resumePartition(TopicPartition topicPartition)` methods in the listener containers. +The pausing and resuming takes place respectively before and after the `poll()` similar to the `pause()` and `resume()` methods. +The `isPartitionPauseRequested()` method returns true if pause for that partition has been requested. +The `isPartitionPaused()` method returns true if that partition has effectively been paused. + +Also since version 2.7 `ConsumerPartitionPausedEvent` and `ConsumerPartitionResumedEvent` instances are published with the container as the `source` property and the `TopicPartition` instance. + +#### 4.1.17. Serialization, Deserialization, and Message Conversion + +##### Overview + +Apache Kafka provides a high-level API for serializing and deserializing record values as well as their keys. +It is present with the `org.apache.kafka.common.serialization.Serializer` and`org.apache.kafka.common.serialization.Deserializer` abstractions with some built-in implementations. +Meanwhile, we can specify serializer and deserializer classes by using `Producer` or `Consumer` configuration properties. +The following example shows how to do so: + +``` +props.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, IntegerDeserializer.class); +props.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class); +... +props.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, IntegerSerializer.class); +props.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, StringSerializer.class); +``` + +For more complex or particular cases, the `KafkaConsumer` (and, therefore, `KafkaProducer`) provides overloaded +constructors to accept `Serializer` and `Deserializer` instances for `keys` and `values`, respectively. + +When you use this API, the `DefaultKafkaProducerFactory` and `DefaultKafkaConsumerFactory` also provide properties (through constructors or setter methods) to inject custom `Serializer` and `Deserializer` instances into the target `Producer` or `Consumer`. +Also, you can pass in `Supplier` or `Supplier` instances through constructors - these `Supplier` s are called on creation of each `Producer` or `Consumer`. + +##### String serialization + +Since version 2.5, Spring for Apache Kafka provides `ToStringSerializer` and `ParseStringDeserializer` classes that use String representation of entities. +They rely on methods `toString` and some `Function` or `BiFunction` to parse the String and populate properties of an instance. +Usually, this would invoke some static method on the class, such as `parse`: + +``` +ToStringSerializer thingSerializer = new ToStringSerializer<>(); +//... +ParseStringDeserializer deserializer = new ParseStringDeserializer<>(Thing::parse); +``` + +By default, the `ToStringSerializer` is configured to convey type information about the serialized entity in the record `Headers`. +You can disable this by setting the `addTypeInfo` property to false. +This information can be used by `ParseStringDeserializer` on the receiving side. + +* `ToStringSerializer.ADD_TYPE_INFO_HEADERS` (default `true`): You can set it to `false` to disable this feature on the `ToStringSerializer` (sets the `addTypeInfo` property). + +``` +ParseStringDeserializer deserializer = new ParseStringDeserializer<>((str, headers) -> { + byte[] header = headers.lastHeader(ToStringSerializer.VALUE_TYPE).value(); + String entityType = new String(header); + + if (entityType.contains("Thing")) { + return Thing.parse(str); + } + else { + // ...parsing logic + } +}); +``` + +You can configure the `Charset` used to convert `String` to/from `byte[]` with the default being `UTF-8`. + +You can configure the deserializer with the name of the parser method using `ConsumerConfig` properties: + +* `ParseStringDeserializer.KEY_PARSER` + +* `ParseStringDeserializer.VALUE_PARSER` + +The properties must contain the fully qualified name of the class followed by the method name, separated by a period `.`. +The method must be static and have a signature of either `(String, Headers)` or `(String)`. + +A `ToFromStringSerde` is also provided, for use with Kafka Streams. + +##### JSON + +Spring for Apache Kafka also provides `JsonSerializer` and `JsonDeserializer` implementations that are based on the +Jackson JSON object mapper. +The `JsonSerializer` allows writing any Java object as a JSON `byte[]`. +The `JsonDeserializer` requires an additional `Class targetType` argument to allow the deserialization of a consumed `byte[]` to the proper target object. +The following example shows how to create a `JsonDeserializer`: + +``` +JsonDeserializer thingDeserializer = new JsonDeserializer<>(Thing.class); +``` + +You can customize both `JsonSerializer` and `JsonDeserializer` with an `ObjectMapper`. +You can also extend them to implement some particular configuration logic in the `configure(Map configs, boolean isKey)` method. + +Starting with version 2.3, all the JSON-aware components are configured by default with a `JacksonUtils.enhancedObjectMapper()` instance, which comes with the `MapperFeature.DEFAULT_VIEW_INCLUSION` and `DeserializationFeature.FAIL_ON_UNKNOWN_PROPERTIES` features disabled. +Also such an instance is supplied with well-known modules for custom data types, such a Java time and Kotlin support. +See `JacksonUtils.enhancedObjectMapper()` JavaDocs for more information. +This method also registers a `org.springframework.kafka.support.JacksonMimeTypeModule` for `org.springframework.util.MimeType` objects serialization into the plain string for inter-platform compatibility over the network. +A `JacksonMimeTypeModule` can be registered as a bean in the application context and it will be auto-configured into [Spring Boot `ObjectMapper` instance](https://docs.spring.io/spring-boot/docs/current/reference/html/howto-spring-mvc.html#howto-customize-the-jackson-objectmapper). + +Also starting with version 2.3, the `JsonDeserializer` provides `TypeReference`-based constructors for better handling of target generic container types. + +Starting with version 2.1, you can convey type information in record `Headers`, allowing the handling of multiple types. +In addition, you can configure the serializer and deserializer by using the following Kafka properties. +They have no effect if you have provided `Serializer` and `Deserializer` instances for `KafkaConsumer` and `KafkaProducer`, respectively. + +###### Configuration Properties + +* `JsonSerializer.ADD_TYPE_INFO_HEADERS` (default `true`): You can set it to `false` to disable this feature on the `JsonSerializer` (sets the `addTypeInfo` property). + +* `JsonSerializer.TYPE_MAPPINGS` (default `empty`): See [Mapping Types](#serdes-mapping-types). + +* `JsonDeserializer.USE_TYPE_INFO_HEADERS` (default `true`): You can set it to `false` to ignore headers set by the serializer. + +* `JsonDeserializer.REMOVE_TYPE_INFO_HEADERS` (default `true`): You can set it to `false` to retain headers set by the serializer. + +* `JsonDeserializer.KEY_DEFAULT_TYPE`: Fallback type for deserialization of keys if no header information is present. + +* `JsonDeserializer.VALUE_DEFAULT_TYPE`: Fallback type for deserialization of values if no header information is present. + +* `JsonDeserializer.TRUSTED_PACKAGES` (default `java.util`, `java.lang`): Comma-delimited list of package patterns allowed for deserialization.`*` means deserialize all. + +* `JsonDeserializer.TYPE_MAPPINGS` (default `empty`): See [Mapping Types](#serdes-mapping-types). + +* `JsonDeserializer.KEY_TYPE_METHOD` (default `empty`): See [Using Methods to Determine Types](#serdes-type-methods). + +* `JsonDeserializer.VALUE_TYPE_METHOD` (default `empty`): See [Using Methods to Determine Types](#serdes-type-methods). + +Starting with version 2.2, the type information headers (if added by the serializer) are removed by the deserializer. +You can revert to the previous behavior by setting the `removeTypeHeaders` property to `false`, either directly on the deserializer or with the configuration property described earlier. + +See also [[tip-json]](#tip-json). + +| |Starting with version 2.8, if you construct the serializer or deserializer programmatically as shown in [Programmatic Construction](#prog-json), the above properties will be applied by the factories, as long as you have not set any properties explicitly (using `set*()` methods or using the fluent API).
Previously, when creating programmatically, the configuration properties were never applied; this is still the case if you explicitly set properties on the object directly.| +|---|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +###### Mapping Types + +Starting with version 2.2, when using JSON, you can now provide type mappings by using the properties in the preceding list. +Previously, you had to customize the type mapper within the serializer and deserializer. +Mappings consist of a comma-delimited list of `token:className` pairs. +On outbound, the payload’s class name is mapped to the corresponding token. +On inbound, the token in the type header is mapped to the corresponding class name. + +The following example creates a set of mappings: + +``` +senderProps.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, JsonSerializer.class); +senderProps.put(JsonSerializer.TYPE_MAPPINGS, "cat:com.mycat.Cat, hat:com.myhat.hat"); +... +consumerProps.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, JsonDeserializer.class); +consumerProps.put(JsonDeSerializer.TYPE_MAPPINGS, "cat:com.yourcat.Cat, hat:com.yourhat.hat"); +``` + +| |The corresponding objects must be compatible.| +|---|---------------------------------------------| + +If you use [Spring Boot](https://docs.spring.io/spring-boot/docs/current/reference/html/boot-features-messaging.html#boot-features-kafka), you can provide these properties in the `application.properties` (or yaml) file. +The following example shows how to do so: + +``` +spring.kafka.producer.value-serializer=org.springframework.kafka.support.serializer.JsonSerializer +spring.kafka.producer.properties.spring.json.type.mapping=cat:com.mycat.Cat,hat:com.myhat.Hat +``` + +| |You can perform only simple configuration with properties.
For more advanced configuration (such as using a custom `ObjectMapper` in the serializer and deserializer), you should use the producer and consumer factory constructors that accept a pre-built serializer and deserializer.
The following Spring Boot example overrides the default factories:

```
@Bean
public ConsumerFactory kafkaConsumerFactory(JsonDeserializer customValueDeserializer) {
Map properties = new HashMap<>();
// properties.put(..., ...)
// ...
return new DefaultKafkaConsumerFactory<>(properties,
new StringDeserializer(), customValueDeserializer);
}

@Bean
public ProducerFactory kafkaProducerFactory(JsonSerializer customValueSerializer) {

return new DefaultKafkaProducerFactory<>(properties.buildProducerProperties(),
new StringSerializer(), customValueSerializer);
}
```

Setters are also provided, as an alternative to using these constructors.| +|---|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +Starting with version 2.2, you can explicitly configure the deserializer to use the supplied target type and ignore type information in headers by using one of the overloaded constructors that have a boolean `useHeadersIfPresent` (which is `true` by default). +The following example shows how to do so: + +``` +DefaultKafkaConsumerFactory cf = new DefaultKafkaConsumerFactory<>(props, + new IntegerDeserializer(), new JsonDeserializer<>(Cat1.class, false)); +``` + +###### Using Methods to Determine Types + +Starting with version 2.5, you can now configure the deserializer, via properties, to invoke a method to determine the target type. +If present, this will override any of the other techniques discussed above. +This can be useful if the data is published by an application that does not use the Spring serializer and you need to deserialize to different types depending on the data, or other headers. +Set these properties to the method name - a fully qualified class name followed by the method name, separated by a period `.`. +The method must be declared as `public static`, have one of three signatures `(String topic, byte[] data, Headers headers)`, `(byte[] data, Headers headers)` or `(byte[] data)` and return a Jackson `JavaType`. + +* `JsonDeserializer.KEY_TYPE_METHOD` : `spring.json.key.type.method` + +* `JsonDeserializer.VALUE_TYPE_METHOD` : `spring.json.value.type.method` + +You can use arbitrary headers or inspect the data to determine the type. + +Example + +``` +JavaType thing1Type = TypeFactory.defaultInstance().constructType(Thing1.class); + +JavaType thing2Type = TypeFactory.defaultInstance().constructType(Thing2.class); + +public static JavaType thingOneOrThingTwo(byte[] data, Headers headers) { + // {"thisIsAFieldInThing1":"value", ... + if (data[21] == '1') { + return thing1Type; + } + else { + return thing2Type; + } +} +``` + +For more sophisticated data inspection consider using `JsonPath` or similar but, the simpler the test to determine the type, the more efficient the process will be. + +The following is an example of creating the deserializer programmatically (when providing the consumer factory with the deserializer in the constructor): + +``` +JsonDeserializer deser = new JsonDeserializer<>() + .trustedPackages("*") + .typeResolver(SomeClass::thing1Thing2JavaTypeForTopic); + +... + +public static JavaType thing1Thing2JavaTypeForTopic(String topic, byte[] data, Headers headers) { + ... +} +``` + +###### Programmatic Construction + +When constructing the serializer/deserializer programmatically for use in the producer/consumer factory, since version 2.3, you can use the fluent API, which simplifies configuration. + +``` +@Bean +public ProducerFactory pf() { + Map props = new HashMap<>(); + // props.put(..., ...) + // ... + DefaultKafkaProducerFactory pf = new DefaultKafkaProducerFactory<>(props, + new JsonSerializer() + .forKeys() + .noTypeInfo(), + new JsonSerializer() + .noTypeInfo()); + return pf; +} + +@Bean +public ConsumerFactory cf() { + Map props = new HashMap<>(); + // props.put(..., ...) + // ... + DefaultKafkaConsumerFactory cf = new DefaultKafkaConsumerFactory<>(props, + new JsonDeserializer<>(MyKeyType.class) + .forKeys() + .ignoreTypeHeaders(), + new JsonDeserializer<>(MyValueType.class) + .ignoreTypeHeaders()); + return cf; +} +``` + +To provide type mapping programmatically, similar to [Using Methods to Determine Types](#serdes-type-methods), use the `typeFunction` property. + +Example + +``` +JsonDeserializer deser = new JsonDeserializer<>() + .trustedPackages("*") + .typeFunction(MyUtils::thingOneOrThingTwo); +``` + +Alternatively, as long as you don’t use the fluent API to configure properties, or set them using `set*()` methods, the factories will configure the serializer/deserializer using the configuration properties; see [Configuration Properties](#serdes-json-config). + +##### Delegating Serializer and Deserializer + +###### Using Headers + +Version 2.3 introduced the `DelegatingSerializer` and `DelegatingDeserializer`, which allow producing and consuming records with different key and/or value types. +Producers must set a header `DelegatingSerializer.VALUE_SERIALIZATION_SELECTOR` to a selector value that is used to select which serializer to use for the value and `DelegatingSerializer.KEY_SERIALIZATION_SELECTOR` for the key; if a match is not found, an `IllegalStateException` is thrown. + +For incoming records, the deserializer uses the same headers to select the deserializer to use; if a match is not found or the header is not present, the raw `byte[]` is returned. + +You can configure the map of selector to `Serializer` / `Deserializer` via a constructor, or you can configure it via Kafka producer/consumer properties with the keys `DelegatingSerializer.VALUE_SERIALIZATION_SELECTOR_CONFIG` and `DelegatingSerializer.KEY_SERIALIZATION_SELECTOR_CONFIG`. +For the serializer, the producer property can be a `Map` where the key is the selector and the value is a `Serializer` instance, a serializer `Class` or the class name. +The property can also be a String of comma-delimited map entries, as shown below. + +For the deserializer, the consumer property can be a `Map` where the key is the selector and the value is a `Deserializer` instance, a deserializer `Class` or the class name. +The property can also be a String of comma-delimited map entries, as shown below. + +To configure using properties, use the following syntax: + +``` +producerProps.put(DelegatingSerializer.VALUE_SERIALIZATION_SELECTOR_CONFIG, + "thing1:com.example.MyThing1Serializer, thing2:com.example.MyThing2Serializer") + +consumerProps.put(DelegatingDeserializer.VALUE_SERIALIZATION_SELECTOR_CONFIG, + "thing1:com.example.MyThing1Deserializer, thing2:com.example.MyThing2Deserializer") +``` + +Producers would then set the `DelegatingSerializer.VALUE_SERIALIZATION_SELECTOR` header to `thing1` or `thing2`. + +This technique supports sending different types to the same topic (or different topics). + +| |Starting with version 2.5.1, it is not necessary to set the selector header, if the type (key or value) is one of the standard types supported by `Serdes` (`Long`, `Integer`, etc).
Instead, the serializer will set the header to the class name of the type.
It is not necessary to configure serializers or deserializers for these types, they will be created (once) dynamically.| +|---|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +For another technique to send different types to different topics, see [Using `RoutingKafkaTemplate`](#routing-template). + +###### By Type + +Version 2.8 introduced the `DelegatingByTypeSerializer`. + +``` +@Bean +public ProducerFactory producerFactory(Map config) { + return new DefaultKafkaProducerFactory<>(config, + null, new DelegatingByTypeSerializer(Map.of( + byte[].class, new ByteArraySerializer(), + Bytes.class, new BytesSerializer(), + String.class, new StringSerializer()))); +} +``` + +Starting with version 2.8.3, you can configure the serializer to check if the map key is assignable from the target object, useful when a delegate serializer can serialize sub classes. +In this case, if there are amiguous matches, an ordered `Map`, such as a `LinkedHashMap` should be provided. + +###### By Topic + +Starting with version 2.8, the `DelegatingByTopicSerializer` and `DelegatingByTopicDeserializer` allow selection of a serializer/deserializer based on the topic name. +Regex `Pattern` s are used to lookup the instance to use. +The map can be configured using a constructor, or via properties (a comma delimited list of `pattern:serializer`). + +``` +producerConfigs.put(DelegatingByTopicSerializer.VALUE_SERIALIZATION_TOPIC_CONFIG, + "topic[0-4]:" + ByteArraySerializer.class.getName() + + ", topic[5-9]:" + StringSerializer.class.getName()); +... +ConsumerConfigs.put(DelegatingByTopicDeserializer.VALUE_SERIALIZATION_TOPIC_CONFIG, + "topic[0-4]:" + ByteArrayDeserializer.class.getName() + + ", topic[5-9]:" + StringDeserializer.class.getName()); +``` + +Use `KEY_SERIALIZATION_TOPIC_CONFIG` when using this for keys. + +``` +@Bean +public ProducerFactory producerFactory(Map config) { + return new DefaultKafkaProducerFactory<>(config, + null, + new DelegatingByTopicSerializer(Map.of( + Pattern.compile("topic[0-4]"), new ByteArraySerializer(), + Pattern.compile("topic[5-9]"), new StringSerializer())), + new JsonSerializer()); // default +} +``` + +You can specify a default serializer/deserializer to use when there is no pattern match using `DelegatingByTopicSerialization.KEY_SERIALIZATION_TOPIC_DEFAULT` and `DelegatingByTopicSerialization.VALUE_SERIALIZATION_TOPIC_DEFAULT`. + +An additional property `DelegatingByTopicSerialization.CASE_SENSITIVE` (default `true`), when set to `false` makes the topic lookup case insensitive. + +##### Retrying Deserializer + +The `RetryingDeserializer` uses a delegate `Deserializer` and `RetryTemplate` to retry deserialization when the delegate might have transient errors, such a network issues, during deserialization. + +``` +ConsumerFactory cf = new DefaultKafkaConsumerFactory(myConsumerConfigs, + new RetryingDeserializer(myUnreliableKeyDeserializer, retryTemplate), + new RetryingDeserializer(myUnreliableValueDeserializer, retryTemplate)); +``` + +Refer to the [spring-retry](https://github.com/spring-projects/spring-retry) project for configuration of the `RetryTemplate` with a retry policy, back off policy, etc. + +##### Spring Messaging Message Conversion + +Although the `Serializer` and `Deserializer` API is quite simple and flexible from the low-level Kafka `Consumer` and `Producer` perspective, you might need more flexibility at the Spring Messaging level, when using either `@KafkaListener` or [Spring Integration’s Apache Kafka Support](https://docs.spring.io/spring-integration/docs/current/reference/html/kafka.html#kafka). +To let you easily convert to and from `org.springframework.messaging.Message`, Spring for Apache Kafka provides a `MessageConverter` abstraction with the `MessagingMessageConverter` implementation and its `JsonMessageConverter` (and subclasses) customization. +You can inject the `MessageConverter` into a `KafkaTemplate` instance directly and by using `AbstractKafkaListenerContainerFactory` bean definition for the `@KafkaListener.containerFactory()` property. +The following example shows how to do so: + +``` +@Bean +public KafkaListenerContainerFactory kafkaJsonListenerContainerFactory() { + ConcurrentKafkaListenerContainerFactory factory = + new ConcurrentKafkaListenerContainerFactory<>(); + factory.setConsumerFactory(consumerFactory()); + factory.setMessageConverter(new JsonMessageConverter()); + return factory; +} +... +@KafkaListener(topics = "jsonData", + containerFactory = "kafkaJsonListenerContainerFactory") +public void jsonListener(Cat cat) { +... +} +``` + +When using Spring Boot, simply define the converter as a `@Bean` and Spring Boot auto configuration will wire it into the auto-configured template and container factory. + +When you use a `@KafkaListener`, the parameter type is provided to the message converter to assist with the conversion. + +| |This type inference can be achieved only when the `@KafkaListener` annotation is declared at the method level.
With a class-level `@KafkaListener`, the payload type is used to select which `@KafkaHandler` method to invoke, so it must already have been converted before the method can be chosen.| +|---|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +| |On the consumer side, you can configure a `JsonMessageConverter`; it can handle `ConsumerRecord` values of type `byte[]`, `Bytes` and `String` so should be used in conjunction with a `ByteArrayDeserializer`, `BytesDeserializer` or `StringDeserializer`.
(`byte[]` and `Bytes` are more efficient because they avoid an unnecessary `byte[]` to `String` conversion).
You can also configure the specific subclass of `JsonMessageConverter` corresponding to the deserializer, if you so wish.

On the producer side, when you use Spring Integration or the `KafkaTemplate.send(Message message)` method (see [Using `KafkaTemplate`](#kafka-template)), you must configure a message converter that is compatible with the configured Kafka `Serializer`.

* `StringJsonMessageConverter` with `StringSerializer`

* `BytesJsonMessageConverter` with `BytesSerializer`

* `ByteArrayJsonMessageConverter` with `ByteArraySerializer`

Again, using `byte[]` or `Bytes` is more efficient because they avoid a `String` to `byte[]` conversion.

For convenience, starting with version 2.3, the framework also provides a `StringOrBytesSerializer` which can serialize all three value types so it can be used with any of the message converters.| +|---|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +Starting with version 2.7.1, message payload conversion can be delegated to a `spring-messaging` `SmartMessageConverter`; this enables conversion, for example, to be based on the `MessageHeaders.CONTENT_TYPE` header. + +| |The `KafkaMessageConverter.fromMessage()` method is called for outbound conversion to a `ProducerRecord` with the message payload in the `ProducerRecord.value()` property.
The `KafkaMessageConverter.toMessage()` method is called for inbound conversion from `ConsumerRecord` with the payload being the `ConsumerRecord.value()` property.
The `SmartMessageConverter.toMessage()` method is called to create a new outbound `Message` from the `Message` passed to`fromMessage()` (usually by `KafkaTemplate.send(Message msg)`).
Similarly, in the `KafkaMessageConverter.toMessage()` method, after the converter has created a new `Message` from the `ConsumerRecord`, the `SmartMessageConverter.fromMessage()` method is called and then the final inbound message is created with the newly converted payload.
In either case, if the `SmartMessageConverter` returns `null`, the original message is used.| +|---|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +When the default converter is used in the `KafkaTemplate` and listener container factory, you configure the `SmartMessageConverter` by calling `setMessagingConverter()` on the template and via the `contentMessageConverter` property on `@KafkaListener` methods. + +Examples: + +``` +template.setMessagingConverter(mySmartConverter); +``` + +``` +@KafkaListener(id = "withSmartConverter", topics = "someTopic", + contentTypeConverter = "mySmartConverter") +public void smart(Thing thing) { + ... +} +``` + +###### Using Spring Data Projection Interfaces + +Starting with version 2.1.1, you can convert JSON to a Spring Data Projection interface instead of a concrete type. +This allows very selective, and low-coupled bindings to data, including the lookup of values from multiple places inside the JSON document. +For example the following interface can be defined as message payload type: + +``` +interface SomeSample { + + @JsonPath({ "$.username", "$.user.name" }) + String getUsername(); + +} +``` + +``` +@KafkaListener(id="projection.listener", topics = "projection") +public void projection(SomeSample in) { + String username = in.getUsername(); + ... +} +``` + +Accessor methods will be used to lookup the property name as field in the received JSON document by default. +The `@JsonPath` expression allows customization of the value lookup, and even to define multiple JSON Path expressions, to lookup values from multiple places until an expression returns an actual value. + +To enable this feature, use a `ProjectingMessageConverter` configured with an appropriate delegate converter (used for outbound conversion and converting non-projection interfaces). +You must also add `spring-data:spring-data-commons` and `com.jayway.jsonpath:json-path` to the class path. + +When used as the parameter to a `@KafkaListener` method, the interface type is automatically passed to the converter as normal. + +##### Using `ErrorHandlingDeserializer` + +When a deserializer fails to deserialize a message, Spring has no way to handle the problem, because it occurs before the `poll()` returns. +To solve this problem, the `ErrorHandlingDeserializer` has been introduced. +This deserializer delegates to a real deserializer (key or value). +If the delegate fails to deserialize the record content, the `ErrorHandlingDeserializer` returns a `null` value and a `DeserializationException` in a header that contains the cause and the raw bytes. +When you use a record-level `MessageListener`, if the `ConsumerRecord` contains a `DeserializationException` header for either the key or value, the container’s `ErrorHandler` is called with the failed `ConsumerRecord`. +The record is not passed to the listener. + +Alternatively, you can configure the `ErrorHandlingDeserializer` to create a custom value by providing a `failedDeserializationFunction`, which is a `Function`. +This function is invoked to create an instance of `T`, which is passed to the listener in the usual fashion. +An object of type `FailedDeserializationInfo`, which contains all the contextual information is provided to the function. +You can find the `DeserializationException` (as a serialized Java object) in headers. +See the [Javadoc](https://docs.spring.io/spring-kafka/api/org/springframework/kafka/support/serializer/ErrorHandlingDeserializer.html) for the `ErrorHandlingDeserializer` for more information. + +You can use the `DefaultKafkaConsumerFactory` constructor that takes key and value `Deserializer` objects and wire in appropriate `ErrorHandlingDeserializer` instances that you have configured with the proper delegates. +Alternatively, you can use consumer configuration properties (which are used by the `ErrorHandlingDeserializer`) to instantiate the delegates. +The property names are `ErrorHandlingDeserializer.KEY_DESERIALIZER_CLASS` and `ErrorHandlingDeserializer.VALUE_DESERIALIZER_CLASS`. +The property value can be a class or class name. +The following example shows how to set these properties: + +``` +... // other props +props.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, ErrorHandlingDeserializer.class); +props.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, ErrorHandlingDeserializer.class); +props.put(ErrorHandlingDeserializer.KEY_DESERIALIZER_CLASS, JsonDeserializer.class); +props.put(JsonDeserializer.KEY_DEFAULT_TYPE, "com.example.MyKey") +props.put(ErrorHandlingDeserializer.VALUE_DESERIALIZER_CLASS, JsonDeserializer.class.getName()); +props.put(JsonDeserializer.VALUE_DEFAULT_TYPE, "com.example.MyValue") +props.put(JsonDeserializer.TRUSTED_PACKAGES, "com.example") +return new DefaultKafkaConsumerFactory<>(props); +``` + +The following example uses a `failedDeserializationFunction`. + +``` +public class BadFoo extends Foo { + + private final FailedDeserializationInfo failedDeserializationInfo; + + public BadFoo(FailedDeserializationInfo failedDeserializationInfo) { + this.failedDeserializationInfo = failedDeserializationInfo; + } + + public FailedDeserializationInfo getFailedDeserializationInfo() { + return this.failedDeserializationInfo; + } + +} + +public class FailedFooProvider implements Function { + + @Override + public Foo apply(FailedDeserializationInfo info) { + return new BadFoo(info); + } + +} +``` + +The preceding example uses the following configuration: + +``` +... +consumerProps.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, ErrorHandlingDeserializer.class); +consumerProps.put(ErrorHandlingDeserializer.VALUE_DESERIALIZER_CLASS, JsonDeserializer.class); +consumerProps.put(ErrorHandlingDeserializer.VALUE_FUNCTION, FailedFooProvider.class); +... +``` + +| |If the consumer is configured with an `ErrorHandlingDeserializer` it is important to configure the `KafkaTemplate` and its producer with a serializer that can handle normal objects as well as raw `byte[]` values, which result from deserialization exceptions.
The generic value type of the template should be `Object`.
One technique is to use the `DelegatingByTypeSerializer`; an example follows:| +|---|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +``` +@Bean +public ProducerFactory producerFactory() { + return new DefaultKafkaProducerFactory<>(producerConfiguration(), new StringSerializer(), + new DelegatingByTypeSerializer(Map.of(byte[].class, new ByteArraySerializer(), + MyNormalObject.class, new JsonSerializer()))); +} + +@Bean +public KafkaTemplate kafkaTemplate() { + return new KafkaTemplate<>(producerFactory()); +} +``` + +When using an `ErrorHandlingDeserializer` with a batch listener, you must check for the deserialization exceptions in message headers. +When used with a `DefaultBatchErrorHandler`, you can use that header to determine which record the exception failed on and communicate to the error handler via a `BatchListenerFailedException`. + +``` +@KafkaListener(id = "test", topics = "test") +void listen(List in, @Header(KafkaHeaders.BATCH_CONVERTED_HEADERS) List> headers) { + for (int i = 0; i < in.size(); i++) { + Thing thing = in.get(i); + if (thing == null + && headers.get(i).get(SerializationUtils.VALUE_DESERIALIZER_EXCEPTION_HEADER) != null) { + DeserializationException deserEx = ListenerUtils.byteArrayToDeserializationException(this.logger, + (byte[]) headers.get(i).get(SerializationUtils.VALUE_DESERIALIZER_EXCEPTION_HEADER)); + if (deserEx != null) { + logger.error(deserEx, "Record at index " + i + " could not be deserialized"); + } + throw new BatchListenerFailedException("Deserialization", deserEx, i); + } + process(thing); + } +} +``` + +`ListenerUtils.byteArrayToDeserializationException()` can be used to convert the header to a `DeserializationException`. + +When consuming `List`, `ListenerUtils.getExceptionFromHeader()` is used instead: + +``` +@KafkaListener(id = "kgh2036", topics = "kgh2036") +void listen(List> in) { + for (int i = 0; i < in.size(); i++) { + ConsumerRecord rec = in.get(i); + if (rec.value() == null) { + DeserializationException deserEx = ListenerUtils.getExceptionFromHeader(rec, + SerializationUtils.VALUE_DESERIALIZER_EXCEPTION_HEADER, this.logger); + if (deserEx != null) { + logger.error(deserEx, "Record at offset " + rec.offset() + " could not be deserialized"); + throw new BatchListenerFailedException("Deserialization", deserEx, i); + } + } + process(rec.value()); + } +} +``` + +##### Payload Conversion with Batch Listeners + +You can also use a `JsonMessageConverter` within a `BatchMessagingMessageConverter` to convert batch messages when you use a batch listener container factory. +See [Serialization, Deserialization, and Message Conversion](#serdes) and [Spring Messaging Message Conversion](#messaging-message-conversion) for more information. + +By default, the type for the conversion is inferred from the listener argument. +If you configure the `JsonMessageConverter` with a `DefaultJackson2TypeMapper` that has its `TypePrecedence` set to `TYPE_ID` (instead of the default `INFERRED`), the converter uses the type information in headers (if present) instead. +This allows, for example, listener methods to be declared with interfaces instead of concrete classes. +Also, the type converter supports mapping, so the deserialization can be to a different type than the source (as long as the data is compatible). +This is also useful when you use [class-level `@KafkaListener` instances](#class-level-kafkalistener) where the payload must have already been converted to determine which method to invoke. +The following example creates beans that use this method: + +``` +@Bean +public KafkaListenerContainerFactory kafkaListenerContainerFactory() { + ConcurrentKafkaListenerContainerFactory factory = + new ConcurrentKafkaListenerContainerFactory<>(); + factory.setConsumerFactory(consumerFactory()); + factory.setBatchListener(true); + factory.setMessageConverter(new BatchMessagingMessageConverter(converter())); + return factory; +} + +@Bean +public JsonMessageConverter converter() { + return new JsonMessageConverter(); +} +``` + +Note that, for this to work, the method signature for the conversion target must be a container object with a single generic parameter type, such as the following: + +``` +@KafkaListener(topics = "blc1") +public void listen(List foos, @Header(KafkaHeaders.OFFSET) List offsets) { + ... +} +``` + +Note that you can still access the batch headers. + +If the batch converter has a record converter that supports it, you can also receive a list of messages where the payloads are converted according to the generic type. +The following example shows how to do so: + +``` +@KafkaListener(topics = "blc3", groupId = "blc3") +public void listen1(List> fooMessages) { + ... +} +``` + +##### `ConversionService` Customization + +Starting with version 2.1.1, the `org.springframework.core.convert.ConversionService` used by the default `o.s.messaging.handler.annotation.support.MessageHandlerMethodFactory` to resolve parameters for the invocation of a listener method is supplied with all beans that implement any of the following interfaces: + +* `org.springframework.core.convert.converter.Converter` + +* `org.springframework.core.convert.converter.GenericConverter` + +* `org.springframework.format.Formatter` + +This lets you further customize listener deserialization without changing the default configuration for `ConsumerFactory` and `KafkaListenerContainerFactory`. + +| |Setting a custom `MessageHandlerMethodFactory` on the `KafkaListenerEndpointRegistrar` through a `KafkaListenerConfigurer` bean disables this feature.| +|---|------------------------------------------------------------------------------------------------------------------------------------------------------| + +##### Adding custom `HandlerMethodArgumentResolver` to `@KafkaListener` + +Starting with version 2.4.2 you are able to add your own `HandlerMethodArgumentResolver` and resolve custom method parameters. +All you need is to implement `KafkaListenerConfigurer` and use method `setCustomMethodArgumentResolvers()` from class `KafkaListenerEndpointRegistrar`. + +``` +@Configuration +class CustomKafkaConfig implements KafkaListenerConfigurer { + + @Override + public void configureKafkaListeners(KafkaListenerEndpointRegistrar registrar) { + registrar.setCustomMethodArgumentResolvers( + new HandlerMethodArgumentResolver() { + + @Override + public boolean supportsParameter(MethodParameter parameter) { + return CustomMethodArgument.class.isAssignableFrom(parameter.getParameterType()); + } + + @Override + public Object resolveArgument(MethodParameter parameter, Message message) { + return new CustomMethodArgument( + message.getHeaders().get(KafkaHeaders.RECEIVED_TOPIC, String.class) + ); + } + } + ); + } + +} +``` + +You can also completely replace the framework’s argument resolution by adding a custom `MessageHandlerMethodFactory` to the `KafkaListenerEndpointRegistrar` bean. +If you do this, and your application needs to handle tombstone records, with a `null` `value()` (e.g. from a compacted topic), you should add a `KafkaNullAwarePayloadArgumentResolver` to the factory; it must be the last resolver because it supports all types and can match arguments without a `@Payload` annotation. +If you are using a `DefaultMessageHandlerMethodFactory`, set this resolver as the last custom resolver; the factory will ensure that this resolver will be used before the standard `PayloadMethodArgumentResolver`, which has no knowledge of `KafkaNull` payloads. + +See also [Null Payloads and Log Compaction of 'Tombstone' Records](#tombstones). + +#### 4.1.18. Message Headers + +The 0.11.0.0 client introduced support for headers in messages. +As of version 2.0, Spring for Apache Kafka now supports mapping these headers to and from `spring-messaging` `MessageHeaders`. + +| |Previous versions mapped `ConsumerRecord` and `ProducerRecord` to spring-messaging `Message`, where the value property is mapped to and from the `payload` and other properties (`topic`, `partition`, and so on) were mapped to headers.
This is still the case, but additional (arbitrary) headers can now be mapped.| +|---|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +Apache Kafka headers have a simple API, shown in the following interface definition: + +``` +public interface Header { + + String key(); + + byte[] value(); + +} +``` + +The `KafkaHeaderMapper` strategy is provided to map header entries between Kafka `Headers` and `MessageHeaders`. +Its interface definition is as follows: + +``` +public interface KafkaHeaderMapper { + + void fromHeaders(MessageHeaders headers, Headers target); + + void toHeaders(Headers source, Map target); + +} +``` + +The `DefaultKafkaHeaderMapper` maps the key to the `MessageHeaders` header name and, in order to support rich header types for outbound messages, JSON conversion is performed. +A “special” header (with a key of `spring_json_header_types`) contains a JSON map of `:`. +This header is used on the inbound side to provide appropriate conversion of each header value to the original type. + +On the inbound side, all Kafka `Header` instances are mapped to `MessageHeaders`. +On the outbound side, by default, all `MessageHeaders` are mapped, except `id`, `timestamp`, and the headers that map to `ConsumerRecord` properties. + +You can specify which headers are to be mapped for outbound messages, by providing patterns to the mapper. +The following listing shows a number of example mappings: + +``` +public DefaultKafkaHeaderMapper() { (1) + ... +} + +public DefaultKafkaHeaderMapper(ObjectMapper objectMapper) { (2) + ... +} + +public DefaultKafkaHeaderMapper(String... patterns) { (3) + ... +} + +public DefaultKafkaHeaderMapper(ObjectMapper objectMapper, String... patterns) { (4) + ... +} +``` + +|**1**| Uses a default Jackson `ObjectMapper` and maps most headers, as discussed before the example. | +|-----|------------------------------------------------------------------------------------------------| +|**2**|Uses the provided Jackson `ObjectMapper` and maps most headers, as discussed before the example.| +|**3**| Uses a default Jackson `ObjectMapper` and maps headers according to the provided patterns. | +|**4**| Uses the provided Jackson `ObjectMapper` and maps headers according to the provided patterns. | + +Patterns are rather simple and can contain a leading wildcard (``**), a trailing wildcard, or both (for example, ``**`.cat.*`). +You can negate patterns with a leading `!`. +The first pattern that matches a header name (whether positive or negative) wins. + +When you provide your own patterns, we recommend including `!id` and `!timestamp`, since these headers are read-only on the inbound side. + +| |By default, the mapper deserializes only classes in `java.lang` and `java.util`.
You can trust other (or all) packages by adding trusted packages with the `addTrustedPackages` method.
If you receive messages from untrusted sources, you may wish to add only those packages you trust.
To trust all packages, you can use `mapper.addTrustedPackages("*")`.| +|---|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +| |Mapping `String` header values in a raw form is useful when communicating with systems that are not aware of the mapper’s JSON format.| +|---|--------------------------------------------------------------------------------------------------------------------------------------| + +Starting with version 2.2.5, you can specify that certain string-valued headers should not be mapped using JSON, but to/from a raw `byte[]`. +The `AbstractKafkaHeaderMapper` has new properties; `mapAllStringsOut` when set to true, all string-valued headers will be converted to `byte[]` using the `charset` property (default `UTF-8`). +In addition, there is a property `rawMappedHeaders`, which is a map of `header name : boolean`; if the map contains a header name, and the header contains a `String` value, it will be mapped as a raw `byte[]` using the charset. +This map is also used to map raw incoming `byte[]` headers to `String` using the charset if, and only if, the boolean in the map value is `true`. +If the boolean is `false`, or the header name is not in the map with a `true` value, the incoming header is simply mapped as the raw unmapped header. + +The following test case illustrates this mechanism. + +``` +@Test +public void testSpecificStringConvert() { + DefaultKafkaHeaderMapper mapper = new DefaultKafkaHeaderMapper(); + Map rawMappedHeaders = new HashMap<>(); + rawMappedHeaders.put("thisOnesAString", true); + rawMappedHeaders.put("thisOnesBytes", false); + mapper.setRawMappedHeaders(rawMappedHeaders); + Map headersMap = new HashMap<>(); + headersMap.put("thisOnesAString", "thing1"); + headersMap.put("thisOnesBytes", "thing2"); + headersMap.put("alwaysRaw", "thing3".getBytes()); + MessageHeaders headers = new MessageHeaders(headersMap); + Headers target = new RecordHeaders(); + mapper.fromHeaders(headers, target); + assertThat(target).containsExactlyInAnyOrder( + new RecordHeader("thisOnesAString", "thing1".getBytes()), + new RecordHeader("thisOnesBytes", "thing2".getBytes()), + new RecordHeader("alwaysRaw", "thing3".getBytes())); + headersMap.clear(); + mapper.toHeaders(target, headersMap); + assertThat(headersMap).contains( + entry("thisOnesAString", "thing1"), + entry("thisOnesBytes", "thing2".getBytes()), + entry("alwaysRaw", "thing3".getBytes())); +} +``` + +By default, the `DefaultKafkaHeaderMapper` is used in the `MessagingMessageConverter` and `BatchMessagingMessageConverter`, as long as Jackson is on the class path. + +With the batch converter, the converted headers are available in the `KafkaHeaders.BATCH_CONVERTED_HEADERS` as a `List>` where the map in a position of the list corresponds to the data position in the payload. + +If there is no converter (either because Jackson is not present or it is explicitly set to `null`), the headers from the consumer record are provided unconverted in the `KafkaHeaders.NATIVE_HEADERS` header. +This header is a `Headers` object (or a `List` in the case of the batch converter), where the position in the list corresponds to the data position in the payload). + +| |Certain types are not suitable for JSON serialization, and a simple `toString()` serialization might be preferred for these types.
The `DefaultKafkaHeaderMapper` has a method called `addToStringClasses()` that lets you supply the names of classes that should be treated this way for outbound mapping.
During inbound mapping, they are mapped as `String`.
By default, only `org.springframework.util.MimeType` and `org.springframework.http.MediaType` are mapped this way.| +|---|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +| |Starting with version 2.3, handling of String-valued headers is simplified.
Such headers are no longer JSON encoded, by default (i.e. they do not have enclosing `"…​"` added).
The type is still added to the JSON\_TYPES header so the receiving system can convert back to a String (from `byte[]`).
The mapper can handle (decode) headers produced by older versions (it checks for a leading `"`); in this way an application using 2.3 can consume records from older versions.| +|---|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +| |To be compatible with earlier versions, set `encodeStrings` to `true`, if records produced by a version using 2.3 might be consumed by applications using earlier versions.
When all applications are using 2.3 or higher, you can leave the property at its default value of `false`.| +|---|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +``` +@Bean +MessagingMessageConverter converter() { + MessagingMessageConverter converter = new MessagingMessageConverter(); + DefaultKafkaHeaderMapper mapper = new DefaultKafkaHeaderMapper(); + mapper.setEncodeStrings(true); + converter.setHeaderMapper(mapper); + return converter; +} +``` + +If using Spring Boot, it will auto configure this converter bean into the auto-configured `KafkaTemplate`; otherwise you should add this converter to the template. + +#### 4.1.19. Null Payloads and Log Compaction of 'Tombstone' Records + +When you use [Log Compaction](https://kafka.apache.org/documentation/#compaction), you can send and receive messages with `null` payloads to identify the deletion of a key. + +You can also receive `null` values for other reasons, such as a `Deserializer` that might return `null` when it cannot deserialize a value. + +To send a `null` payload by using the `KafkaTemplate`, you can pass null into the value argument of the `send()` methods. +One exception to this is the `send(Message message)` variant. +Since `spring-messaging` `Message` cannot have a `null` payload, you can use a special payload type called `KafkaNull`, and the framework sends `null`. +For convenience, the static `KafkaNull.INSTANCE` is provided. + +When you use a message listener container, the received `ConsumerRecord` has a `null` `value()`. + +To configure the `@KafkaListener` to handle `null` payloads, you must use the `@Payload` annotation with `required = false`. +If it is a tombstone message for a compacted log, you usually also need the key so that your application can determine which key was “deleted”. +The following example shows such a configuration: + +``` +@KafkaListener(id = "deletableListener", topics = "myTopic") +public void listen(@Payload(required = false) String value, @Header(KafkaHeaders.RECEIVED_MESSAGE_KEY) String key) { + // value == null represents key deletion +} +``` + +When you use a class-level `@KafkaListener` with multiple `@KafkaHandler` methods, some additional configuration is needed. +Specifically, you need a `@KafkaHandler` method with a `KafkaNull` payload. +The following example shows how to configure one: + +``` +@KafkaListener(id = "multi", topics = "myTopic") +static class MultiListenerBean { + + @KafkaHandler + public void listen(String cat) { + ... + } + + @KafkaHandler + public void listen(Integer hat) { + ... + } + + @KafkaHandler + public void delete(@Payload(required = false) KafkaNull nul, @Header(KafkaHeaders.RECEIVED_MESSAGE_KEY) int key) { + ... + } + +} +``` + +Note that the argument is `null`, not `KafkaNull`. + +| |See [[tip-assign-all-parts]](#tip-assign-all-parts).| +|---|----------------------------------------------------| + +| |This feature requires the use of a `KafkaNullAwarePayloadArgumentResolver` which the framework will configure when using the default `MessageHandlerMethodFactory`.
When using a custom `MessageHandlerMethodFactory`, see [Adding custom `HandlerMethodArgumentResolver` to `@KafkaListener`](#custom-arg-resolve).| +|---|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +#### 4.1.20. Handling Exceptions + +This section describes how to handle various exceptions that may arise when you use Spring for Apache Kafka. + +##### Listener Error Handlers + +Starting with version 2.0, the `@KafkaListener` annotation has a new attribute: `errorHandler`. + +You can use the `errorHandler` to provide the bean name of a `KafkaListenerErrorHandler` implementation. +This functional interface has one method, as the following listing shows: + +``` +@FunctionalInterface +public interface KafkaListenerErrorHandler { + + Object handleError(Message message, ListenerExecutionFailedException exception) throws Exception; + +} +``` + +You have access to the spring-messaging `Message` object produced by the message converter and the exception that was thrown by the listener, which is wrapped in a `ListenerExecutionFailedException`. +The error handler can throw the original or a new exception, which is thrown to the container. +Anything returned by the error handler is ignored. + +Starting with version 2.7, you can set the `rawRecordHeader` property on the `MessagingMessageConverter` and `BatchMessagingMessageConverter` which causes the raw `ConsumerRecord` to be added to the converted `Message` in the `KafkaHeaders.RAW_DATA` header. +This is useful, for example, if you wish to use a `DeadLetterPublishingRecoverer` in a listener error handler. +It might be used in a request/reply scenario where you wish to send a failure result to the sender, after some number of retries, after capturing the failed record in a dead letter topic. + +``` +@Bean +KafkaListenerErrorHandler eh(DeadLetterPublishingRecoverer recoverer) { + return (msg, ex) -> { + if (msg.getHeaders().get(KafkaHeaders.DELIVERY_ATTEMPT, Integer.class) > 9) { + recoverer.accept(msg.getHeaders().get(KafkaHeaders.RAW_DATA, ConsumerRecord.class), ex); + return "FAILED"; + } + throw ex; + }; +} +``` + +It has a sub-interface (`ConsumerAwareListenerErrorHandler`) that has access to the consumer object, through the following method: + +``` +Object handleError(Message message, ListenerExecutionFailedException exception, Consumer consumer); +``` + +If your error handler implements this interface, you can, for example, adjust the offsets accordingly. +For example, to reset the offset to replay the failed message, you could do something like the following: + +``` +@Bean +public ConsumerAwareListenerErrorHandler listen3ErrorHandler() { + return (m, e, c) -> { + this.listen3Exception = e; + MessageHeaders headers = m.getHeaders(); + c.seek(new org.apache.kafka.common.TopicPartition( + headers.get(KafkaHeaders.RECEIVED_TOPIC, String.class), + headers.get(KafkaHeaders.RECEIVED_PARTITION_ID, Integer.class)), + headers.get(KafkaHeaders.OFFSET, Long.class)); + return null; + }; +} +``` + +Similarly, you could do something like the following for a batch listener: + +``` +@Bean +public ConsumerAwareListenerErrorHandler listen10ErrorHandler() { + return (m, e, c) -> { + this.listen10Exception = e; + MessageHeaders headers = m.getHeaders(); + List topics = headers.get(KafkaHeaders.RECEIVED_TOPIC, List.class); + List partitions = headers.get(KafkaHeaders.RECEIVED_PARTITION_ID, List.class); + List offsets = headers.get(KafkaHeaders.OFFSET, List.class); + Map offsetsToReset = new HashMap<>(); + for (int i = 0; i < topics.size(); i++) { + int index = i; + offsetsToReset.compute(new TopicPartition(topics.get(i), partitions.get(i)), + (k, v) -> v == null ? offsets.get(index) : Math.min(v, offsets.get(index))); + } + offsetsToReset.forEach((k, v) -> c.seek(k, v)); + return null; + }; +} +``` + +This resets each topic/partition in the batch to the lowest offset in the batch. + +| |The preceding two examples are simplistic implementations, and you would probably want more checking in the error handler.| +|---|--------------------------------------------------------------------------------------------------------------------------| + +##### Container Error Handlers + +Starting with version 2.8, the legacy `ErrorHandler` and `BatchErrorHandler` interfaces have been superceded by a new `CommonErrorHandler`. +These error handlers can handle errors for both record and batch listeners, allowing a single listener container factory to create containers for both types of listener.`CommonErrorHandler` implementations to replace most legacy framework error handler implementations are provided and the legacy error handlers deprecated. +The legacy interfaces are still supported by listener containers and listener container factories; they will be deprecated in a future release. + +When transactions are being used, no error handlers are configured, by default, so that the exception will roll back the transaction. +Error handling for transactional containers are handled by the [`AfterRollbackProcessor`](#after-rollback). +If you provide a custom error handler when using transactions, it must throw an exception if you want the transaction rolled back. + +This interface has a default method `isAckAfterHandle()` which is called by the container to determine whether the offset(s) should be committed if the error handler returns without throwing an exception; it returns true by default. + +Typically, the error handlers provided by the framework will throw an exception when the error is not "handled" (e.g. after performing a seek operation). +By default, such exceptions are logged by the container at `ERROR` level. +All of the framework error handlers extend `KafkaExceptionLogLevelAware` which allows you to control the level at which these exceptions are logged. + +``` +/** + * Set the level at which the exception thrown by this handler is logged. + * @param logLevel the level (default ERROR). + */ +public void setLogLevel(KafkaException.Level logLevel) { + ... +} +``` + +You can specify a global error handler to be used for all listeners in the container factory. +The following example shows how to do so: + +``` +@Bean +public KafkaListenerContainerFactory> + kafkaListenerContainerFactory() { + ConcurrentKafkaListenerContainerFactory factory = + new ConcurrentKafkaListenerContainerFactory<>(); + ... + factory.setCommonErrorHandler(myErrorHandler); + ... + return factory; +} +``` + +By default, if an annotated listener method throws an exception, it is thrown to the container, and the message is handled according to the container configuration. + +The container commits any pending offset commits before calling the error handler. + +If you are using Spring Boot, you simply need to add the error handler as a `@Bean` and Boot will add it to the auto-configured factory. + +##### DefaultErrorHandler + +This new error handler replaces the `SeekToCurrentErrorHandler` and `RecoveringBatchErrorHandler`, which have been the default error handlers for several releases now. +One difference is that the fallback behavior for batch listeners (when an exception other than a `BatchListenerFailedException` is thrown) is the equivalent of the [Retrying Complete Batches](#retrying-batch-eh). + +The error handler can recover (skip) a record that keeps failing. +By default, after ten failures, the failed record is logged (at the `ERROR` level). +You can configure the handler with a custom recoverer (`BiConsumer`) and a `BackOff` that controls the delivery attempts and delays between each. +Using a `FixedBackOff` with `FixedBackOff.UNLIMITED_ATTEMPTS` causes (effectively) infinite retries. +The following example configures recovery after three tries: + +``` +DefaultErrorHandler errorHandler = + new DefaultErrorHandler((record, exception) -> { + // recover after 3 failures, with no back off - e.g. send to a dead-letter topic + }, new FixedBackOff(0L, 2L)); +``` + +To configure the listener container with a customized instance of this handler, add it to the container factory. + +For example, with the `@KafkaListener` container factory, you can add `DefaultErrorHandler` as follows: + +``` +@Bean +public ConcurrentKafkaListenerContainerFactory kafkaListenerContainerFactory() { + ConcurrentKafkaListenerContainerFactory factory = new ConcurrentKafkaListenerContainerFactory(); + factory.setConsumerFactory(consumerFactory()); + factory.getContainerProperties().setAckOnError(false); + factory.getContainerProperties().setAckMode(AckMode.RECORD); + factory.setCommonErrorHandler(new DefaultErrorHandler(new FixedBackOff(1000L, 2L))); + return factory; +} +``` + +For a record listener, this will retry a delivery up to 2 times (3 delivery attempts) with a back off of 1 second, instead of the default configuration (`FixedBackOff(0L, 9)`). +Failures are simply logged after retries are exhausted. + +As an example; if the `poll` returns six records (two from each partition 0, 1, 2) and the listener throws an exception on the fourth record, the container acknowledges the first three messages by committing their offsets. +The `DefaultErrorHandler` seeks to offset 1 for partition 1 and offset 0 for partition 2. +The next `poll()` returns the three unprocessed records. + +If the `AckMode` was `BATCH`, the container commits the offsets for the first two partitions before calling the error handler. + +For a batch listener, the listener must throw a `BatchListenerFailedException` indicating which records in the batch failed. + +The sequence of events is: + +* Commit the offsets of the records before the index. + +* If retries are not exhausted, perform seeks so that all the remaining records (including the failed record) will be redelivered. + +* If retries are exhausted, attempt recovery of the failed record (default log only) and perform seeks so that the remaining records (excluding the failed record) will be redelivered. + The recovered record’s offset is committed + +* If retries are exhausted and recovery fails, seeks are performed as if retries are not exhausted. + +The default recoverer logs the failed record after retries are exhausted. +You can use a custom recoverer, or one provided by the framework such as the [`DeadLetterPublishingRecoverer`](#dead-letters). + +When using a POJO batch listener (e.g. `List`), and you don’t have the full consumer record to add to the exception, you can just add the index of the record that failed: + +``` +@KafkaListener(id = "recovering", topics = "someTopic") +public void listen(List things) { + for (int i = 0; i < records.size(); i++) { + try { + process(things.get(i)); + } + catch (Exception e) { + throw new BatchListenerFailedException("Failed to process", i); + } + } +} +``` + +When the container is configured with `AckMode.MANUAL_IMMEDIATE`, the error handler can be configured to commit the offset of recovered records; set the `commitRecovered` property to `true`. + +See also [Publishing Dead-letter Records](#dead-letters). + +When using transactions, similar functionality is provided by the `DefaultAfterRollbackProcessor`. +See [After-rollback Processor](#after-rollback). + +The `DefaultErrorHandler` considers certain exceptions to be fatal, and retries are skipped for such exceptions; the recoverer is invoked on the first failure. +The exceptions that are considered fatal, by default, are: + +* `DeserializationException` + +* `MessageConversionException` + +* `ConversionException` + +* `MethodArgumentResolutionException` + +* `NoSuchMethodException` + +* `ClassCastException` + +since these exceptions are unlikely to be resolved on a retried delivery. + +You can add more exception types to the not-retryable category, or completely replace the map of classified exceptions. +See the Javadocs for `DefaultErrorHandler.addNotRetryableException()` and `DefaultErrorHandler.setClassifications()` for more information, as well as those for the `spring-retry` `BinaryExceptionClassifier`. + +Here is an example that adds `IllegalArgumentException` to the not-retryable exceptions: + +``` +@Bean +public DefaultErrorHandler errorHandler(ConsumerRecordRecoverer recoverer) { + DefaultErrorHandler handler = new DefaultErrorHandler(recoverer); + handler.addNotRetryableExceptions(IllegalArgumentException.class); + return handler; +} +``` + +The error handler can be configured with one or more `RetryListener` s, receiving notifications of retry and recovery progress. + +``` +@FunctionalInterface +public interface RetryListener { + + void failedDelivery(ConsumerRecord record, Exception ex, int deliveryAttempt); + + default void recovered(ConsumerRecord record, Exception ex) { + } + + default void recoveryFailed(ConsumerRecord record, Exception original, Exception failure) { + } + +} +``` + +See the javadocs for more information. + +| |If the recoverer fails (throws an exception), the failed record will be included in the seeks.
If the recoverer fails, the `BackOff` will be reset by default and redeliveries will again go through the back offs before recovery is attempted again.
To skip retries after a recovery failure, set the error handler’s `resetStateOnRecoveryFailure` to `false`.| +|---|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +You can provide the error handler with a `BiFunction, Exception, BackOff>` to determine the `BackOff` to use, based on the failed record and/or the exception: + +``` +handler.setBackOffFunction((record, ex) -> { ... }); +``` + +If the function returns `null`, the handler’s default `BackOff` will be used. + +Set `resetStateOnExceptionChange` to `true` and the retry sequence will be restarted (including the selection of a new `BackOff`, if so configured) if the exception type changes between failures. +By default, the exception type is not considered. + +Also see [Delivery Attempts Header](#delivery-header). + +#### 4.1.21. Conversion Errors with Batch Error Handlers + +Starting with version 2.8, batch listeners can now properly handle conversion errors, when using a `MessageConverter` with a `ByteArrayDeserializer`, a `BytesDeserializer` or a `StringDeserializer`, as well as a `DefaultErrorHandler`. +When a conversion error occurs, the payload is set to null and a deserialization exception is added to the record headers, similar to the `ErrorHandlingDeserializer`. +A list of `ConversionException` s is available in the listener so the listener can throw a `BatchListenerFailedException` indicating the first index at which a conversion exception occurred. + +Example: + +``` +@KafkaListener(id = "test", topics = "topic") +void listen(List in, @Header(KafkaHeaders.CONVERSION_FAILURES) List exceptions) { + for (int i = 0; i < in.size(); i++) { + Foo foo = in.get(i); + if (foo == null && exceptions.get(i) != null) { + throw new BatchListenerFailedException("Conversion error", exceptions.get(i), i); + } + process(foo); + } +} +``` + +##### Retrying Complete Batches + +This is now the fallback behavior of the `DefaultErrorHandler` for a batch listener where the listener throws an exception other than a `BatchListenerFailedException`. + +There is no guarantee that, when a batch is redelivered, the batch has the same number of records and/or the redelivered records are in the same order. +It is impossible, therefore, to easily maintain retry state for a batch. +The `FallbackBatchErrorHandler` takes a the following approach. +If a batch listener throws an exception that is not a `BatchListenerFailedException`, the retries are performed from the in-memory batch of records. +In order to avoid a rebalance during an extended retry sequence, the error handler pauses the consumer, polls it before sleeping for the back off, for each retry, and calls the listener again. +If/when retries are exhausted, the `ConsumerRecordRecoverer` is called for each record in the batch. +If the recoverer throws an exception, or the thread is interrupted during its sleep, the batch of records will be redelivered on the next poll. +Before exiting, regardless of the outcome, the consumer is resumed. + +| |This mechanism cannot be used with transactions.| +|---|------------------------------------------------| + +While waiting for a `BackOff` interval, the error handler will loop with a short sleep until the desired delay is reached, while checking to see if the container has been stopped, allowing the sleep to exit soon after the `stop()` rather than causing a delay. + +##### Container Stopping Error Handlers + +The `CommonContainerStoppingErrorHandler` stops the container if the listener throws an exception. +For record listeners, when the `AckMode` is `RECORD`, offsets for already processed records are committed. +For record listeners, when the `AckMode` is any manual value, offsets for already acknowledged records are committed. +For record listeners, wWhen the `AckMode` is `BATCH`, or for batch listeners, the entire batch is replayed when the container is restarted. + +After the container stops, an exception that wraps the `ListenerExecutionFailedException` is thrown. +This is to cause the transaction to roll back (if transactions are enabled). + +##### Delegating Error Handler + +The `CommonDelegatingErrorHandler` can delegate to different error handlers, depending on the exception type. +For example, you may wish to invoke a `DefaultErrorHandler` for most exceptions, or a `CommonContainerStoppingErrorHandler` for others. + +##### Logging Error Handler + +The `CommonLoggingErrorHandler` simply logs the exception; with a record listener, the remaining records from the previous poll are passed to the listener. +For a batch listener, all the records in the batch are logged. + +##### Using Different Common Error Handlers for Record and Batch Listeners + +If you wish to use a different error handling strategy for record and batch listeners, the `CommonMixedErrorHandler` is provided allowing the configuration of a specific error handler for each listener type. + +##### Common Error Handler Summery + +* `DefaultErrorHandler` + +* `CommonContainerStoppingErrorHandler` + +* `CommonDelegatingErrorHandler` + +* `CommonLoggingErrorHandler` + +* `CommonMixedErrorHandler` + +##### Legacy Error Handlers and Their Replacements + +| Legacy Error Handler | Replacement | +|----------------------------------------|-------------------------------------------------------------------------------------------------------------| +| `LoggingErrorHandler` | `CommonLoggingErrorHandler` | +| `BatchLoggingErrorHandler` | `CommonLoggingErrorHandler` | +| `ConditionalDelegatingErrorHandler` | `DelegatingErrorHandler` | +|`ConditionalDelegatingBatchErrorHandler`| `DelegatingErrorHandler` | +| `ContainerStoppingErrorHandler` | `CommonContainerStoppingErrorHandler` | +| `ContainerStoppingBatchErrorHandler` | `CommonContainerStoppingErrorHandler` | +| `SeekToCurrentErrorHandler` | `DefaultErrorHandler` | +| `SeekToCurrentBatchErrorHandler` | No replacement, use `DefaultErrorHandler` with an infinite `BackOff`. | +| `RecoveringBatchErrorHandler` | `DefaultErrorHandler` | +| `RetryingBatchErrorHandler` |No replacements - use `DefaultErrorHandler` and throw an exception other than `BatchListenerFailedException`.| + +##### After-rollback Processor + +When using transactions, if the listener throws an exception (and an error handler, if present, throws an exception), the transaction is rolled back. +By default, any unprocessed records (including the failed record) are re-fetched on the next poll. +This is achieved by performing `seek` operations in the `DefaultAfterRollbackProcessor`. +With a batch listener, the entire batch of records is reprocessed (the container has no knowledge of which record in the batch failed). +To modify this behavior, you can configure the listener container with a custom `AfterRollbackProcessor`. +For example, with a record-based listener, you might want to keep track of the failed record and give up after some number of attempts, perhaps by publishing it to a dead-letter topic. + +Starting with version 2.2, the `DefaultAfterRollbackProcessor` can now recover (skip) a record that keeps failing. +By default, after ten failures, the failed record is logged (at the `ERROR` level). +You can configure the processor with a custom recoverer (`BiConsumer`) and maximum failures. +Setting the `maxFailures` property to a negative number causes infinite retries. +The following example configures recovery after three tries: + +``` +AfterRollbackProcessor processor = + new DefaultAfterRollbackProcessor((record, exception) -> { + // recover after 3 failures, with no back off - e.g. send to a dead-letter topic + }, new FixedBackOff(0L, 2L)); +``` + +When you do not use transactions, you can achieve similar functionality by configuring a `DefaultErrorHandler`. +See [Container Error Handlers](#error-handlers). + +| |Recovery is not possible with a batch listener, since the framework has no knowledge about which record in the batch keeps failing.
In such cases, the application listener must handle a record that keeps failing.| +|---|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +See also [Publishing Dead-letter Records](#dead-letters). + +Starting with version 2.2.5, the `DefaultAfterRollbackProcessor` can be invoked in a new transaction (started after the failed transaction rolls back). +Then, if you are using the `DeadLetterPublishingRecoverer` to publish a failed record, the processor will send the recovered record’s offset in the original topic/partition to the transaction. +To enable this feature, set the `commitRecovered` and `kafkaTemplate` properties on the `DefaultAfterRollbackProcessor`. + +| |If the recoverer fails (throws an exception), the failed record will be included in the seeks.
Starting with version 2.5.5, if the recoverer fails, the `BackOff` will be reset by default and redeliveries will again go through the back offs before recovery is attempted again.
With earlier versions, the `BackOff` was not reset and recovery was re-attempted on the next failure.
To revert to the previous behavior, set the processor’s `resetStateOnRecoveryFailure` property to `false`.| +|---|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +Starting with version 2.6, you can now provide the processor with a `BiFunction, Exception, BackOff>` to determine the `BackOff` to use, based on the failed record and/or the exception: + +``` +handler.setBackOffFunction((record, ex) -> { ... }); +``` + +If the function returns `null`, the processor’s default `BackOff` will be used. + +Starting with version 2.6.3, set `resetStateOnExceptionChange` to `true` and the retry sequence will be restarted (including the selection of a new `BackOff`, if so configured) if the exception type changes between failures. +By default, the exception type is not considered. + +Starting with version 2.3.1, similar to the `DefaultErrorHandler`, the `DefaultAfterRollbackProcessor` considers certain exceptions to be fatal, and retries are skipped for such exceptions; the recoverer is invoked on the first failure. +The exceptions that are considered fatal, by default, are: + +* `DeserializationException` + +* `MessageConversionException` + +* `ConversionException` + +* `MethodArgumentResolutionException` + +* `NoSuchMethodException` + +* `ClassCastException` + +since these exceptions are unlikely to be resolved on a retried delivery. + +You can add more exception types to the not-retryable category, or completely replace the map of classified exceptions. +See the Javadocs for `DefaultAfterRollbackProcessor.setClassifications()` for more information, as well as those for the `spring-retry` `BinaryExceptionClassifier`. + +Here is an example that adds `IllegalArgumentException` to the not-retryable exceptions: + +``` +@Bean +public DefaultAfterRollbackProcessor errorHandler(BiConsumer, Exception> recoverer) { + DefaultAfterRollbackProcessor processor = new DefaultAfterRollbackProcessor(recoverer); + processor.addNotRetryableException(IllegalArgumentException.class); + return processor; +} +``` + +Also see [Delivery Attempts Header](#delivery-header). + +| |With current `kafka-clients`, the container cannot detect whether a `ProducerFencedException` is caused by a rebalance or if the producer’s `transactional.id` has been revoked due to a timeout or expiry.
Because, in most cases, it is caused by a rebalance, the container does not call the `AfterRollbackProcessor` (because it’s not appropriate to seek the partitions because we no longer are assigned them).
If you ensure the timeout is large enough to process each transaction and periodically perform an "empty" transaction (e.g. via a `ListenerContainerIdleEvent`) you can avoid fencing due to timeout and expiry.
Or, you can set the `stopContainerWhenFenced` container property to `true` and the container will stop, avoiding the loss of records.
You can consume a `ConsumerStoppedEvent` and check the `Reason` property for `FENCED` to detect this condition.
Since the event also has a reference to the container, you can restart the container using this event.| +|---|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +Starting with version 2.7, while waiting for a `BackOff` interval, the error handler will loop with a short sleep until the desired delay is reached, while checking to see if the container has been stopped, allowing the sleep to exit soon after the `stop()` rather than causing a delay. + +Starting with version 2.7, the processor can be configured with one or more `RetryListener` s, receiving notifications of retry and recovery progress. + +``` +@FunctionalInterface +public interface RetryListener { + + void failedDelivery(ConsumerRecord record, Exception ex, int deliveryAttempt); + + default void recovered(ConsumerRecord record, Exception ex) { + } + + default void recoveryFailed(ConsumerRecord record, Exception original, Exception failure) { + } + +} +``` + +See the javadocs for more information. + +##### Delivery Attempts Header + +The following applies to record listeners only, not batch listeners. + +Starting with version 2.5, when using an `ErrorHandler` or `AfterRollbackProcessor` that implements `DeliveryAttemptAware`, it is possible to enable the addition of the `KafkaHeaders.DELIVERY_ATTEMPT` header (`kafka_deliveryAttempt`) to the record. +The value of this header is an incrementing integer starting at 1. +When receiving a raw `ConsumerRecord` the integer is in a `byte[4]`. + +``` +int delivery = ByteBuffer.wrap(record.headers() + .lastHeader(KafkaHeaders.DELIVERY_ATTEMPT).value()) + .getInt() +``` + +When using `@KafkaListener` with the `DefaultKafkaHeaderMapper` or `SimpleKafkaHeaderMapper`, it can be obtained by adding `@Header(KafkaHeaders.DELIVERY_ATTEMPT) int delivery` as a parameter to the listener method. + +To enable population of this header, set the container property `deliveryAttemptHeader` to `true`. +It is disabled by default to avoid the (small) overhead of looking up the state for each record and adding the header. + +The `DefaultErrorHandler` and `DefaultAfterRollbackProcessor` support this feature. + +##### Publishing Dead-letter Records + +You can configure the `DefaultErrorHandler` and `DefaultAfterRollbackProcessor` with a record recoverer when the maximum number of failures is reached for a record. +The framework provides the `DeadLetterPublishingRecoverer`, which publishes the failed message to another topic. +The recoverer requires a `KafkaTemplate`, which is used to send the record. +You can also, optionally, configure it with a `BiFunction, Exception, TopicPartition>`, which is called to resolve the destination topic and partition. + +| |By default, the dead-letter record is sent to a topic named `.DLT` (the original topic name suffixed with `.DLT`) and to the same partition as the original record.
Therefore, when you use the default resolver, the dead-letter topic **must have at least as many partitions as the original topic.**| +|---|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +If the returned `TopicPartition` has a negative partition, the partition is not set in the `ProducerRecord`, so the partition is selected by Kafka. +Starting with version 2.2.4, any `ListenerExecutionFailedException` (thrown, for example, when an exception is detected in a `@KafkaListener` method) is enhanced with the `groupId` property. +This allows the destination resolver to use this, in addition to the information in the `ConsumerRecord` to select the dead letter topic. + +The following example shows how to wire a custom destination resolver: + +``` +DeadLetterPublishingRecoverer recoverer = new DeadLetterPublishingRecoverer(template, + (r, e) -> { + if (e instanceof FooException) { + return new TopicPartition(r.topic() + ".Foo.failures", r.partition()); + } + else { + return new TopicPartition(r.topic() + ".other.failures", r.partition()); + } + }); +ErrorHandler errorHandler = new DefaultErrorHandler(recoverer, new FixedBackOff(0L, 2L)); +``` + +The record sent to the dead-letter topic is enhanced with the following headers: + +* `KafkaHeaders.DLT_EXCEPTION_FQCN`: The Exception class name (generally a `ListenerExecutionFailedException`, but can be others). + +* `KafkaHeaders.DLT_EXCEPTION_CAUSE_FQCN`: The Exception cause class name, if present (since version 2.8). + +* `KafkaHeaders.DLT_EXCEPTION_STACKTRACE`: The Exception stack trace. + +* `KafkaHeaders.DLT_EXCEPTION_MESSAGE`: The Exception message. + +* `KafkaHeaders.DLT_KEY_EXCEPTION_FQCN`: The Exception class name (key deserialization errors only). + +* `KafkaHeaders.DLT_KEY_EXCEPTION_STACKTRACE`: The Exception stack trace (key deserialization errors only). + +* `KafkaHeaders.DLT_KEY_EXCEPTION_MESSAGE`: The Exception message (key deserialization errors only). + +* `KafkaHeaders.DLT_ORIGINAL_TOPIC`: The original topic. + +* `KafkaHeaders.DLT_ORIGINAL_PARTITION`: The original partition. + +* `KafkaHeaders.DLT_ORIGINAL_OFFSET`: The original offset. + +* `KafkaHeaders.DLT_ORIGINAL_TIMESTAMP`: The original timestamp. + +* `KafkaHeaders.DLT_ORIGINAL_TIMESTAMP_TYPE`: The original timestamp type. + +* `KafkaHeaders.DLT_ORIGINAL_CONSUMER_GROUP`: The original consumer group that failed to process the record (since version 2.8). + +Key exceptions are only caused by `DeserializationException` s so there is no `DLT_KEY_EXCEPTION_CAUSE_FQCN`. + +There are two mechanisms to add more headers. + +1. Subclass the recoverer and override `createProducerRecord()` - call `super.createProducerRecord()` and add more headers. + +2. Provide a `BiFunction` to receive the consumer record and exception, returning a `Headers` object; headers from there will be copied to the final producer record. + Use `setHeadersFunction()` to set the `BiFunction`. + +The second is simpler to implement but the first has more information available, including the already assembled standard headers. + +Starting with version 2.3, when used in conjunction with an `ErrorHandlingDeserializer`, the publisher will restore the record `value()`, in the dead-letter producer record, to the original value that failed to be deserialized. +Previously, the `value()` was null and user code had to decode the `DeserializationException` from the message headers. +In addition, you can provide multiple `KafkaTemplate` s to the publisher; this might be needed, for example, if you want to publish the `byte[]` from a `DeserializationException`, as well as values using a different serializer from records that were deserialized successfully. +Here is an example of configuring the publisher with `KafkaTemplate` s that use a `String` and `byte[]` serializer: + +``` +@Bean +public DeadLetterPublishingRecoverer publisher(KafkaTemplate stringTemplate, + KafkaTemplate bytesTemplate) { + + Map, KafkaTemplate> templates = new LinkedHashMap<>(); + templates.put(String.class, stringTemplate); + templates.put(byte[].class, bytesTemplate); + return new DeadLetterPublishingRecoverer(templates); +} +``` + +The publisher uses the map keys to locate a template that is suitable for the `value()` about to be published. +A `LinkedHashMap` is recommended so that the keys are examined in order. + +When publishing `null` values, when there are multiple templates, the recoverer will look for a template for the `Void` class; if none is present, the first template from the `values().iterator()` will be used. + +Since 2.7 you can use the `setFailIfSendResultIsError` method so that an exception is thrown when message publishing fails. +You can also set a timeout for the verification of the sender success with `setWaitForSendResultTimeout`. + +| |If the recoverer fails (throws an exception), the failed record will be included in the seeks.
Starting with version 2.5.5, if the recoverer fails, the `BackOff` will be reset by default and redeliveries will again go through the back offs before recovery is attempted again.
With earlier versions, the `BackOff` was not reset and recovery was re-attempted on the next failure.
To revert to the previous behavior, set the error handler’s `resetStateOnRecoveryFailure` property to `false`.| +|---|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +Starting with version 2.6.3, set `resetStateOnExceptionChange` to `true` and the retry sequence will be restarted (including the selection of a new `BackOff`, if so configured) if the exception type changes between failures. +By default, the exception type is not considered. + +Starting with version 2.3, the recoverer can also be used with Kafka Streams - see [Recovery from Deserialization Exceptions](#streams-deser-recovery) for more information. + +The `ErrorHandlingDeserializer` adds the deserialization exception(s) in headers `ErrorHandlingDeserializer.VALUE_DESERIALIZER_EXCEPTION_HEADER` and `ErrorHandlingDeserializer.KEY_DESERIALIZER_EXCEPTION_HEADER` (using java serialization). +By default, these headers are not retained in the message published to the dead letter topic. +Starting with version 2.7, if both the key and value fail deserialization, the original values of both are populated in the record sent to the DLT. + +If incoming records are dependent on each other, but may arrive out of order, it may be useful to republish a failed record to the tail of the original topic (for some number of times), instead of sending it directly to the dead letter topic. +See [this Stack Overflow Question](https://stackoverflow.com/questions/64646996) for an example. + +The following error handler configuration will do exactly that: + +``` +@Bean +public ErrorHandler eh(KafkaOperations template) { + return new DefaultErrorHandler(new DeadLetterPublishingRecoverer(template, + (rec, ex) -> { + org.apache.kafka.common.header.Header retries = rec.headers().lastHeader("retries"); + if (retries == null) { + retries = new RecordHeader("retries", new byte[] { 1 }); + rec.headers().add(retries); + } + else { + retries.value()[0]++; + } + return retries.value()[0] > 5 + ? new TopicPartition("topic.DLT", rec.partition()) + : new TopicPartition("topic", rec.partition()); + }), new FixedBackOff(0L, 0L)); +} +``` + +Starting with version 2.7, the recoverer checks that the partition selected by the destination resolver actually exists. +If the partition is not present, the partition in the `ProducerRecord` is set to `null`, allowing the `KafkaProducer` to select the partition. +You can disable this check by setting the `verifyPartition` property to `false`. + +##### Managing Dead Letter Record Headers + +Referring to [Publishing Dead-letter Records](#dead-letters) above, the `DeadLetterPublishingRecoverer` has two properties used to manage headers when those headers already exist (such as when reprocessing a dead letter record that failed, including when using [Non-Blocking Retries](#retry-topic)). + +* `appendOriginalHeaders` (default `true`) + +* `stripPreviousExceptionHeaders` (default `true` since version 2.8) + +Apache Kafka supports multiple headers with the same name; to obtain the "latest" value, you can use `headers.lastHeader(headerName)`; to get an iterator over multiple headers, use `headers.headers(headerName).iterator()`. + +When repeatedly republishing a failed record, these headers can grow (and eventually cause publication to fail due to a `RecordTooLargeException`); this is especially true for the exception headers and particularly for the stack trace headers. + +The reason for the two properties is because, while you might want to retain only the last exception information, you might want to retain the history of which topic(s) the record passed through for each failure. + +`appendOriginalHeaders` is applied to all headers named `**ORIGINAL**` while `stripPreviousExceptionHeaders` is applied to all headers named `**EXCEPTION**`. + +Also see [Failure Header Management](#retry-headers) with [Non-Blocking Retries](#retry-topic). + +##### `ExponentialBackOffWithMaxRetries` Implementation + +Spring Framework provides a number of `BackOff` implementations. +By default, the `ExponentialBackOff` will retry indefinitely; to give up after some number of retry attempts requires calculating the `maxElapsedTime`. +Since version 2.7.3, Spring for Apache Kafka provides the `ExponentialBackOffWithMaxRetries` which is a subclass that receives the `maxRetries` property and automatically calculates the `maxElapsedTime`, which is a little more convenient. + +``` +@Bean +DefaultErrorHandler handler() { + ExponentialBackOffWithMaxRetries bo = new ExponentialBackOffWithMaxRetries(6); + bo.setInitialInterval(1_000L); + bo.setMultiplier(2.0); + bo.setMaxInterval(10_000L); + return new DefaultErrorHandler(myRecoverer, bo); +} +``` + +This will retry after `1, 2, 4, 8, 10, 10` seconds, before calling the recoverer. + +#### 4.1.22. JAAS and Kerberos + +Starting with version 2.0, a `KafkaJaasLoginModuleInitializer` class has been added to assist with Kerberos configuration. +You can add this bean, with the desired configuration, to your application context. +The following example configures such a bean: + +``` +@Bean +public KafkaJaasLoginModuleInitializer jaasConfig() throws IOException { + KafkaJaasLoginModuleInitializer jaasConfig = new KafkaJaasLoginModuleInitializer(); + jaasConfig.setControlFlag("REQUIRED"); + Map options = new HashMap<>(); + options.put("useKeyTab", "true"); + options.put("storeKey", "true"); + options.put("keyTab", "/etc/security/keytabs/kafka_client.keytab"); + options.put("principal", "[email protected]"); + jaasConfig.setOptions(options); + return jaasConfig; +} +``` + +### 4.2. Apache Kafka Streams Support + +Starting with version 1.1.4, Spring for Apache Kafka provides first-class support for [Kafka Streams](https://kafka.apache.org/documentation/streams). +To use it from a Spring application, the `kafka-streams` jar must be present on classpath. +It is an optional dependency of the Spring for Apache Kafka project and is not downloaded transitively. + +#### 4.2.1. Basics + +The reference Apache Kafka Streams documentation suggests the following way of using the API: + +``` +// Use the builders to define the actual processing topology, e.g. to specify +// from which input topics to read, which stream operations (filter, map, etc.) +// should be called, and so on. + +StreamsBuilder builder = ...; // when using the Kafka Streams DSL + +// Use the configuration to tell your application where the Kafka cluster is, +// which serializers/deserializers to use by default, to specify security settings, +// and so on. +StreamsConfig config = ...; + +KafkaStreams streams = new KafkaStreams(builder, config); + +// Start the Kafka Streams instance +streams.start(); + +// Stop the Kafka Streams instance +streams.close(); +``` + +So, we have two main components: + +* `StreamsBuilder`: With an API to build `KStream` (or `KTable`) instances. + +* `KafkaStreams`: To manage the lifecycle of those instances. + +| |All `KStream` instances exposed to a `KafkaStreams` instance by a single `StreamsBuilder` are started and stopped at the same time, even if they have different logic.
In other words, all streams defined by a `StreamsBuilder` are tied with a single lifecycle control.
Once a `KafkaStreams` instance has been closed by `streams.close()`, it cannot be restarted.
Instead, a new `KafkaStreams` instance to restart stream processing must be created.| +|---|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +#### 4.2.2. Spring Management + +To simplify using Kafka Streams from the Spring application context perspective and use the lifecycle management through a container, the Spring for Apache Kafka introduces `StreamsBuilderFactoryBean`. +This is an `AbstractFactoryBean` implementation to expose a `StreamsBuilder` singleton instance as a bean. +The following example creates such a bean: + +``` +@Bean +public FactoryBean myKStreamBuilder(KafkaStreamsConfiguration streamsConfig) { + return new StreamsBuilderFactoryBean(streamsConfig); +} +``` + +| |Starting with version 2.2, the stream configuration is now provided as a `KafkaStreamsConfiguration` object rather than a `StreamsConfig`.| +|---|------------------------------------------------------------------------------------------------------------------------------------------| + +The `StreamsBuilderFactoryBean` also implements `SmartLifecycle` to manage the lifecycle of an internal `KafkaStreams` instance. +Similar to the Kafka Streams API, you must define the `KStream` instances before you start the `KafkaStreams`. +That also applies for the Spring API for Kafka Streams. +Therefore, when you use default `autoStartup = true` on the `StreamsBuilderFactoryBean`, you must declare `KStream` instances on the `StreamsBuilder` before the application context is refreshed. +For example, `KStream` can be a regular bean definition, while the Kafka Streams API is used without any impacts. +The following example shows how to do so: + +``` +@Bean +public KStream kStream(StreamsBuilder kStreamBuilder) { + KStream stream = kStreamBuilder.stream(STREAMING_TOPIC1); + // Fluent KStream API + return stream; +} +``` + +If you would like to control the lifecycle manually (for example, stopping and starting by some condition), you can reference the `StreamsBuilderFactoryBean` bean directly by using the factory bean (`&`) [prefix](https://docs.spring.io/spring/docs/current/spring-framework-reference/html/beans.html#beans-factory-extension-factorybean). +Since `StreamsBuilderFactoryBean` use its internal `KafkaStreams` instance, it is safe to stop and restart it again. +A new `KafkaStreams` is created on each `start()`. +You might also consider using different `StreamsBuilderFactoryBean` instances, if you would like to control the lifecycles for `KStream` instances separately. + +You also can specify `KafkaStreams.StateListener`, `Thread.UncaughtExceptionHandler`, and `StateRestoreListener` options on the `StreamsBuilderFactoryBean`, which are delegated to the internal `KafkaStreams` instance. +Also, apart from setting those options indirectly on `StreamsBuilderFactoryBean`, starting with *version 2.1.5*, you can use a `KafkaStreamsCustomizer` callback interface to configure an inner `KafkaStreams` instance. +Note that `KafkaStreamsCustomizer` overrides the options provided by `StreamsBuilderFactoryBean`. +If you need to perform some `KafkaStreams` operations directly, you can access that internal `KafkaStreams` instance by using `StreamsBuilderFactoryBean.getKafkaStreams()`. +You can autowire `StreamsBuilderFactoryBean` bean by type, but you should be sure to use the full type in the bean definition, as the following example shows: + +``` +@Bean +public StreamsBuilderFactoryBean myKStreamBuilder(KafkaStreamsConfiguration streamsConfig) { + return new StreamsBuilderFactoryBean(streamsConfig); +} +... +@Autowired +private StreamsBuilderFactoryBean myKStreamBuilderFactoryBean; +``` + +Alternatively, you can add `@Qualifier` for injection by name if you use interface bean definition. +The following example shows how to do so: + +``` +@Bean +public FactoryBean myKStreamBuilder(KafkaStreamsConfiguration streamsConfig) { + return new StreamsBuilderFactoryBean(streamsConfig); +} +... +@Autowired +@Qualifier("&myKStreamBuilder") +private StreamsBuilderFactoryBean myKStreamBuilderFactoryBean; +``` + +Starting with version 2.4.1, the factory bean has a new property `infrastructureCustomizer` with type `KafkaStreamsInfrastructureCustomizer`; this allows customization of the `StreamsBuilder` (e.g. to add a state store) and/or the `Topology` before the stream is created. + +``` +public interface KafkaStreamsInfrastructureCustomizer { + + void configureBuilder(StreamsBuilder builder); + + void configureTopology(Topology topology); + +} +``` + +Default no-op implementations are provided to avoid having to implement both methods if one is not required. + +A `CompositeKafkaStreamsInfrastructureCustomizer` is provided, for when you need to apply multiple customizers. + +#### 4.2.3. KafkaStreams Micrometer Support + +Introduced in version 2.5.3, you can configure a `KafkaStreamsMicrometerListener` to automatically register micrometer meters for the `KafkaStreams` object managed by the factory bean: + +``` +streamsBuilderFactoryBean.addListener(new KafkaStreamsMicrometerListener(meterRegistry, + Collections.singletonList(new ImmutableTag("customTag", "customTagValue")))); +``` + +#### 4.2.4. Streams JSON Serialization and Deserialization + +For serializing and deserializing data when reading or writing to topics or state stores in JSON format, Spring for Apache Kafka provides a `JsonSerde` implementation that uses JSON, delegating to the `JsonSerializer` and `JsonDeserializer` described in [Serialization, Deserialization, and Message Conversion](#serdes). +The `JsonSerde` implementation provides the same configuration options through its constructor (target type or `ObjectMapper`). +In the following example, we use the `JsonSerde` to serialize and deserialize the `Cat` payload of a Kafka stream (the `JsonSerde` can be used in a similar fashion wherever an instance is required): + +``` +stream.through(Serdes.Integer(), new JsonSerde<>(Cat.class), "cats"); +``` + +When constructing the serializer/deserializer programmatically for use in the producer/consumer factory, since version 2.3, you can use the fluent API, which simplifies configuration. + +``` +stream.through(new JsonSerde<>(MyKeyType.class) + .forKeys() + .noTypeInfo(), + new JsonSerde<>(MyValueType.class) + .noTypeInfo(), + "myTypes"); +``` + +#### 4.2.5. Using `KafkaStreamBrancher` + +The `KafkaStreamBrancher` class introduces a more convenient way to build conditional branches on top of `KStream`. + +Consider the following example that does not use `KafkaStreamBrancher`: + +``` +KStream[] branches = builder.stream("source").branch( + (key, value) -> value.contains("A"), + (key, value) -> value.contains("B"), + (key, value) -> true + ); +branches[0].to("A"); +branches[1].to("B"); +branches[2].to("C"); +``` + +The following example uses `KafkaStreamBrancher`: + +``` +new KafkaStreamBrancher() + .branch((key, value) -> value.contains("A"), ks -> ks.to("A")) + .branch((key, value) -> value.contains("B"), ks -> ks.to("B")) + //default branch should not necessarily be defined in the end of the chain! + .defaultBranch(ks -> ks.to("C")) + .onTopOf(builder.stream("source")); + //onTopOf method returns the provided stream so we can continue with method chaining +``` + +#### 4.2.6. Configuration + +To configure the Kafka Streams environment, the `StreamsBuilderFactoryBean` requires a `KafkaStreamsConfiguration` instance. +See the Apache Kafka [documentation](https://kafka.apache.org/0102/documentation/#streamsconfigs) for all possible options. + +| |Starting with version 2.2, the stream configuration is now provided as a `KafkaStreamsConfiguration` object, rather than as a `StreamsConfig`.| +|---|----------------------------------------------------------------------------------------------------------------------------------------------| + +To avoid boilerplate code for most cases, especially when you develop microservices, Spring for Apache Kafka provides the `@EnableKafkaStreams` annotation, which you should place on a `@Configuration` class. +All you need is to declare a `KafkaStreamsConfiguration` bean named `defaultKafkaStreamsConfig`. +A `StreamsBuilderFactoryBean` bean, named `defaultKafkaStreamsBuilder`, is automatically declared in the application context. +You can declare and use any additional `StreamsBuilderFactoryBean` beans as well. +You can perform additional customization of that bean, by providing a bean that implements `StreamsBuilderFactoryBeanConfigurer`. +If there are multiple such beans, they will be applied according to their `Ordered.order` property. + +By default, when the factory bean is stopped, the `KafkaStreams.cleanUp()` method is called. +Starting with version 2.1.2, the factory bean has additional constructors, taking a `CleanupConfig` object that has properties to let you control whether the `cleanUp()` method is called during `start()` or `stop()` or neither. +Starting with version 2.7, the default is to never clean up local state. + +#### 4.2.7. Header Enricher + +Version 2.3 added the `HeaderEnricher` implementation of `Transformer`. +This can be used to add headers within the stream processing; the header values are SpEL expressions; the root object of the expression evaluation has 3 properties: + +* `context` - the `ProcessorContext`, allowing access to the current record metadata + +* `key` - the key of the current record + +* `value` - the value of the current record + +The expressions must return a `byte[]` or a `String` (which will be converted to `byte[]` using `UTF-8`). + +To use the enricher within a stream: + +``` +.transform(() -> enricher) +``` + +The transformer does not change the `key` or `value`; it simply adds headers. + +| |If your stream is multi-threaded, you need a new instance for each record.| +|---|--------------------------------------------------------------------------| + +``` +.transform(() -> new HeaderEnricher<..., ...>(expressionMap)) +``` + +Here is a simple example, adding one literal header and one variable: + +``` +Map headers = new HashMap<>(); +headers.put("header1", new LiteralExpression("value1")); +SpelExpressionParser parser = new SpelExpressionParser(); +headers.put("header2", parser.parseExpression("context.timestamp() + ' @' + context.offset()")); +HeaderEnricher enricher = new HeaderEnricher<>(headers); +KStream stream = builder.stream(INPUT); +stream + .transform(() -> enricher) + .to(OUTPUT); +``` + +#### 4.2.8. `MessagingTransformer` + +Version 2.3 added the `MessagingTransformer` this allows a Kafka Streams topology to interact with a Spring Messaging component, such as a Spring Integration flow. +The transformer requires an implementation of `MessagingFunction`. + +``` +@FunctionalInterface +public interface MessagingFunction { + + Message exchange(Message message); + +} +``` + +Spring Integration automatically provides an implementation using its `GatewayProxyFactoryBean`. +It also requires a `MessagingMessageConverter` to convert the key, value and metadata (including headers) to/from a Spring Messaging `Message`. +See [[Calling a Spring Integration Flow from a `KStream`](https://docs.spring.io/spring-integration/docs/current/reference/html/kafka.html#streams-integration)] for more information. + +#### 4.2.9. Recovery from Deserialization Exceptions + +Version 2.3 introduced the `RecoveringDeserializationExceptionHandler` which can take some action when a deserialization exception occurs. +Refer to the Kafka documentation about `DeserializationExceptionHandler`, of which the `RecoveringDeserializationExceptionHandler` is an implementation. +The `RecoveringDeserializationExceptionHandler` is configured with a `ConsumerRecordRecoverer` implementation. +The framework provides the `DeadLetterPublishingRecoverer` which sends the failed record to a dead-letter topic. +See [Publishing Dead-letter Records](#dead-letters) for more information about this recoverer. + +To configure the recoverer, add the following properties to your streams configuration: + +``` +@Bean(name = KafkaStreamsDefaultConfiguration.DEFAULT_STREAMS_CONFIG_BEAN_NAME) +public KafkaStreamsConfiguration kStreamsConfigs() { + Map props = new HashMap<>(); + ... + props.put(StreamsConfig.DEFAULT_DESERIALIZATION_EXCEPTION_HANDLER_CLASS_CONFIG, + RecoveringDeserializationExceptionHandler.class); + props.put(RecoveringDeserializationExceptionHandler.KSTREAM_DESERIALIZATION_RECOVERER, recoverer()); + ... + return new KafkaStreamsConfiguration(props); +} + +@Bean +public DeadLetterPublishingRecoverer recoverer() { + return new DeadLetterPublishingRecoverer(kafkaTemplate(), + (record, ex) -> new TopicPartition("recovererDLQ", -1)); +} +``` + +Of course, the `recoverer()` bean can be your own implementation of `ConsumerRecordRecoverer`. + +#### 4.2.10. Kafka Streams Example + +The following example combines all the topics we have covered in this chapter: + +``` +@Configuration +@EnableKafka +@EnableKafkaStreams +public static class KafkaStreamsConfig { + + @Bean(name = KafkaStreamsDefaultConfiguration.DEFAULT_STREAMS_CONFIG_BEAN_NAME) + public KafkaStreamsConfiguration kStreamsConfigs() { + Map props = new HashMap<>(); + props.put(StreamsConfig.APPLICATION_ID_CONFIG, "testStreams"); + props.put(StreamsConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9092"); + props.put(StreamsConfig.DEFAULT_KEY_SERDE_CLASS_CONFIG, Serdes.Integer().getClass().getName()); + props.put(StreamsConfig.DEFAULT_VALUE_SERDE_CLASS_CONFIG, Serdes.String().getClass().getName()); + props.put(StreamsConfig.DEFAULT_TIMESTAMP_EXTRACTOR_CLASS_CONFIG, WallclockTimestampExtractor.class.getName()); + return new KafkaStreamsConfiguration(props); + } + + @Bean + public StreamsBuilderFactoryBeanConfigurer configurer() { + return fb -> fb.setStateListener((newState, oldState) -> { + System.out.println("State transition from " + oldState + " to " + newState); + }); + } + + @Bean + public KStream kStream(StreamsBuilder kStreamBuilder) { + KStream stream = kStreamBuilder.stream("streamingTopic1"); + stream + .mapValues((ValueMapper) String::toUpperCase) + .groupByKey() + .windowedBy(TimeWindows.of(Duration.ofMillis(1000))) + .reduce((String value1, String value2) -> value1 + value2, + Named.as("windowStore")) + .toStream() + .map((windowedId, value) -> new KeyValue<>(windowedId.key(), value)) + .filter((i, s) -> s.length() > 40) + .to("streamingTopic2"); + + stream.print(Printed.toSysOut()); + + return stream; + } + +} +``` + +### 4.3. Testing Applications + +The `spring-kafka-test` jar contains some useful utilities to assist with testing your applications. + +#### 4.3.1. KafkaTestUtils + +`o.s.kafka.test.utils.KafkaTestUtils` provides a number of static helper methods to consume records, retrieve various record offsets, and others. +Refer to its [Javadocs](https://docs.spring.io/spring-kafka/docs/current/api/org/springframework/kafka/test/utils/KafkaTestUtils.html) for complete details. + +#### 4.3.2. JUnit + +`o.s.kafka.test.utils.KafkaTestUtils` also provides some static methods to set up producer and consumer properties. +The following listing shows those method signatures: + +``` +/** + * Set up test properties for an {@code } consumer. + * @param group the group id. + * @param autoCommit the auto commit. + * @param embeddedKafka a {@link EmbeddedKafkaBroker} instance. + * @return the properties. + */ +public static Map consumerProps(String group, String autoCommit, + EmbeddedKafkaBroker embeddedKafka) { ... } + +/** + * Set up test properties for an {@code } producer. + * @param embeddedKafka a {@link EmbeddedKafkaBroker} instance. + * @return the properties. + */ +public static Map producerProps(EmbeddedKafkaBroker embeddedKafka) { ... } +``` + +| |Starting with version 2.5, the `consumerProps` method sets the `ConsumerConfig.AUTO_OFFSET_RESET_CONFIG` to `earliest`.
This is because, in most cases, you want the consumer to consume any messages sent in a test case.
The `ConsumerConfig` default is `latest` which means that messages already sent by a test, before the consumer starts, will not receive those records.
To revert to the previous behavior, set the property to `latest` after calling the method.

When using the embedded broker, it is generally best practice to use a different topic for each test, to prevent cross-talk.
If this is not possible for some reason, note that the `consumeFromEmbeddedTopics` method’s default behavior is to seek the assigned partitions to the beginning after assignment.
Since it does not have access to the consumer properties, you must use the overloaded method that takes a `seekToEnd` boolean parameter to seek to the end instead of the beginning.| +|---|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +A JUnit 4 `@Rule` wrapper for the `EmbeddedKafkaBroker` is provided to create an embedded Kafka and an embedded Zookeeper server. +(See [@EmbeddedKafka Annotation](#embedded-kafka-annotation) for information about using `@EmbeddedKafka` with JUnit 5). +The following listing shows the signatures of those methods: + +``` +/** + * Create embedded Kafka brokers. + * @param count the number of brokers. + * @param controlledShutdown passed into TestUtils.createBrokerConfig. + * @param topics the topics to create (2 partitions per). + */ +public EmbeddedKafkaRule(int count, boolean controlledShutdown, String... topics) { ... } + +/** + * + * Create embedded Kafka brokers. + * @param count the number of brokers. + * @param controlledShutdown passed into TestUtils.createBrokerConfig. + * @param partitions partitions per topic. + * @param topics the topics to create. + */ +public EmbeddedKafkaRule(int count, boolean controlledShutdown, int partitions, String... topics) { ... } +``` + +The `EmbeddedKafkaBroker` class has a utility method that lets you consume for all the topics it created. +The following example shows how to use it: + +``` +Map consumerProps = KafkaTestUtils.consumerProps("testT", "false", embeddedKafka); +DefaultKafkaConsumerFactory cf = new DefaultKafkaConsumerFactory( + consumerProps); +Consumer consumer = cf.createConsumer(); +embeddedKafka.consumeFromAllEmbeddedTopics(consumer); +``` + +The `KafkaTestUtils` has some utility methods to fetch results from the consumer. +The following listing shows those method signatures: + +``` +/** + * Poll the consumer, expecting a single record for the specified topic. + * @param consumer the consumer. + * @param topic the topic. + * @return the record. + * @throws org.junit.ComparisonFailure if exactly one record is not received. + */ +public static ConsumerRecord getSingleRecord(Consumer consumer, String topic) { ... } + +/** + * Poll the consumer for records. + * @param consumer the consumer. + * @return the records. + */ +public static ConsumerRecords getRecords(Consumer consumer) { ... } +``` + +The following example shows how to use `KafkaTestUtils`: + +``` +... +template.sendDefault(0, 2, "bar"); +ConsumerRecord received = KafkaTestUtils.getSingleRecord(consumer, "topic"); +... +``` + +When the embedded Kafka and embedded Zookeeper server are started by the `EmbeddedKafkaBroker`, a system property named `spring.embedded.kafka.brokers` is set to the address of the Kafka brokers and a system property named `spring.embedded.zookeeper.connect` is set to the address of Zookeeper. +Convenient constants (`EmbeddedKafkaBroker.SPRING_EMBEDDED_KAFKA_BROKERS` and `EmbeddedKafkaBroker.SPRING_EMBEDDED_ZOOKEEPER_CONNECT`) are provided for this property. + +With the `EmbeddedKafkaBroker.brokerProperties(Map)`, you can provide additional properties for the Kafka servers. +See [Kafka Config](https://kafka.apache.org/documentation/#brokerconfigs) for more information about possible broker properties. + +#### 4.3.3. Configuring Topics + +The following example configuration creates topics called `cat` and `hat` with five partitions, a topic called `thing1` with 10 partitions, and a topic called `thing2` with 15 partitions: + +``` +public class MyTests { + + @ClassRule + private static EmbeddedKafkaRule embeddedKafka = new EmbeddedKafkaRule(1, false, 5, "cat", "hat"); + + @Test + public void test() { + embeddedKafkaRule.getEmbeddedKafka() + .addTopics(new NewTopic("thing1", 10, (short) 1), new NewTopic("thing2", 15, (short) 1)); + ... + } + +} +``` + +By default, `addTopics` will throw an exception when problems arise (such as adding a topic that already exists). +Version 2.6 added a new version of that method that returns a `Map`; the key is the topic name and the value is `null` for success, or an `Exception` for a failure. + +#### for Multiple Test Classes + +There is no built-in support for doing so, but you can use the same broker for multiple test classes with something similar to the following: + +``` +public final class EmbeddedKafkaHolder { + + private static EmbeddedKafkaBroker embeddedKafka = new EmbeddedKafkaBroker(1, false) + .brokerListProperty("spring.kafka.bootstrap-servers"); + + private static boolean started; + + public static EmbeddedKafkaBroker getEmbeddedKafka() { + if (!started) { + try { + embeddedKafka.afterPropertiesSet(); + } + catch (Exception e) { + throw new KafkaException("Embedded broker failed to start", e); + } + started = true; + } + return embeddedKafka; + } + + private EmbeddedKafkaHolder() { + super(); + } + +} +``` + +This assumes a Spring Boot environment and the embedded broker replaces the bootstrap servers property. + +Then, in each test class, you can use something similar to the following: + +``` +static { + EmbeddedKafkaHolder.getEmbeddedKafka().addTopics("topic1", "topic2"); +} + +private static final EmbeddedKafkaBroker broker = EmbeddedKafkaHolder.getEmbeddedKafka(); +``` + +If you are not using Spring Boot, you can obtain the bootstrap servers using `broker.getBrokersAsString()`. + +| |The preceding example provides no mechanism for shutting down the broker(s) when all tests are complete.
This could be a problem if, say, you run your tests in a Gradle daemon.
You should not use this technique in such a situation, or you should use something to call `destroy()` on the `EmbeddedKafkaBroker` when your tests are complete.| +|---|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +#### 4.3.5. @EmbeddedKafka Annotation + +We generally recommend that you use the rule as a `@ClassRule` to avoid starting and stopping the broker between tests (and use a different topic for each test). +Starting with version 2.0, if you use Spring’s test application context caching, you can also declare a `EmbeddedKafkaBroker` bean, so a single broker can be used across multiple test classes. +For convenience, we provide a test class-level annotation called `@EmbeddedKafka` to register the `EmbeddedKafkaBroker` bean. +The following example shows how to use it: + +``` +@RunWith(SpringRunner.class) +@DirtiesContext +@EmbeddedKafka(partitions = 1, + topics = { + KafkaStreamsTests.STREAMING_TOPIC1, + KafkaStreamsTests.STREAMING_TOPIC2 }) +public class KafkaStreamsTests { + + @Autowired + private EmbeddedKafkaBroker embeddedKafka; + + @Test + public void someTest() { + Map consumerProps = KafkaTestUtils.consumerProps("testGroup", "true", this.embeddedKafka); + consumerProps.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest"); + ConsumerFactory cf = new DefaultKafkaConsumerFactory<>(consumerProps); + Consumer consumer = cf.createConsumer(); + this.embeddedKafka.consumeFromAnEmbeddedTopic(consumer, KafkaStreamsTests.STREAMING_TOPIC2); + ConsumerRecords replies = KafkaTestUtils.getRecords(consumer); + assertThat(replies.count()).isGreaterThanOrEqualTo(1); + } + + @Configuration + @EnableKafkaStreams + public static class KafkaStreamsConfiguration { + + @Value("${" + EmbeddedKafkaBroker.SPRING_EMBEDDED_KAFKA_BROKERS + "}") + private String brokerAddresses; + + @Bean(name = KafkaStreamsDefaultConfiguration.DEFAULT_STREAMS_CONFIG_BEAN_NAME) + public KafkaStreamsConfiguration kStreamsConfigs() { + Map props = new HashMap<>(); + props.put(StreamsConfig.APPLICATION_ID_CONFIG, "testStreams"); + props.put(StreamsConfig.BOOTSTRAP_SERVERS_CONFIG, this.brokerAddresses); + return new KafkaStreamsConfiguration(props); + } + + } + +} +``` + +Starting with version 2.2.4, you can also use the `@EmbeddedKafka` annotation to specify the Kafka ports property. + +The following example sets the `topics`, `brokerProperties`, and `brokerPropertiesLocation` attributes of `@EmbeddedKafka` support property placeholder resolutions: + +``` +@TestPropertySource(locations = "classpath:/test.properties") +@EmbeddedKafka(topics = { "any-topic", "${kafka.topics.another-topic}" }, + brokerProperties = { "log.dir=${kafka.broker.logs-dir}", + "listeners=PLAINTEXT://localhost:${kafka.broker.port}", + "auto.create.topics.enable=${kafka.broker.topics-enable:true}" }, + brokerPropertiesLocation = "classpath:/broker.properties") +``` + +In the preceding example, the property placeholders `${kafka.topics.another-topic}`, `${kafka.broker.logs-dir}`, and `${kafka.broker.port}` are resolved from the Spring `Environment`. +In addition, the broker properties are loaded from the `broker.properties` classpath resource specified by the `brokerPropertiesLocation`. +Property placeholders are resolved for the `brokerPropertiesLocation` URL and for any property placeholders found in the resource. +Properties defined by `brokerProperties` override properties found in `brokerPropertiesLocation`. + +You can use the `@EmbeddedKafka` annotation with JUnit 4 or JUnit 5. + +#### 4.3.6. @EmbeddedKafka Annotation with JUnit5 + +Starting with version 2.3, there are two ways to use the `@EmbeddedKafka` annotation with JUnit5. +When used with the `@SpringJunitConfig` annotation, the embedded broker is added to the test application context. +You can auto wire the broker into your test, at the class or method level, to get the broker address list. + +When **not** using the spring test context, the `EmbdeddedKafkaCondition` creates a broker; the condition includes a parameter resolver so you can access the broker in your test method…​ + +``` +@EmbeddedKafka +public class EmbeddedKafkaConditionTests { + + @Test + public void test(EmbeddedKafkaBroker broker) { + String brokerList = broker.getBrokersAsString(); + ... + } + +} +``` + +A stand-alone (not Spring test context) broker will be created if the class annotated with `@EmbeddedBroker` is not also annotated (or meta annotated) with `ExtendedWith(SpringExtension.class)`.`@SpringJunitConfig` and `@SpringBootTest` are so meta annotated and the context-based broker will be used when either of those annotations are also present. + +| |When there is a Spring test application context available, the topics and broker properties can contain property placeholders, which will be resolved as long as the property is defined somewhere.
If there is no Spring context available, these placeholders won’t be resolved.| +|---|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +#### 4.3.7. Embedded Broker in `@SpringBootTest` Annotations + +[Spring Initializr](https://start.spring.io/) now automatically adds the `spring-kafka-test` dependency in test scope to the project configuration. + +| |If your application uses the Kafka binder in `spring-cloud-stream` and if you want to use an embedded broker for tests, you must remove the `spring-cloud-stream-test-support` dependency, because it replaces the real binder with a test binder for test cases.
If you wish some tests to use the test binder and some to use the embedded broker, tests that use the real binder need to disable the test binder by excluding the binder auto configuration in the test class.
The following example shows how to do so:

```
@RunWith(SpringRunner.class)
@SpringBootTest(properties = "spring.autoconfigure.exclude="
+ "org.springframework.cloud.stream.test.binder.TestSupportBinderAutoConfiguration")
public class MyApplicationTests {
...
}
```| +|---|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +There are several ways to use an embedded broker in a Spring Boot application test. + +They include: + +* [JUnit4 Class Rule](#kafka-testing-junit4-class-rule) + +* [`@EmbeddedKafka` Annotation or `EmbeddedKafkaBroker` Bean](#kafka-testing-embeddedkafka-annotation) + +##### JUnit4 Class Rule + +The following example shows how to use a JUnit4 class rule to create an embedded broker: + +``` +@RunWith(SpringRunner.class) +@SpringBootTest +public class MyApplicationTests { + + @ClassRule + public static EmbeddedKafkaRule broker = new EmbeddedKafkaRule(1, + false, "someTopic") + .brokerListProperty("spring.kafka.bootstrap-servers"); + } + + @Autowired + private KafkaTemplate template; + + @Test + public void test() { + ... + } + +} +``` + +Notice that, since this is a Spring Boot application, we override the broker list property to set Boot’s property. + +##### `@EmbeddedKafka` Annotation or `EmbeddedKafkaBroker` Bean + +The following example shows how to use an `@EmbeddedKafka` Annotation to create an embedded broker: + +``` +@RunWith(SpringRunner.class) +@EmbeddedKafka(topics = "someTopic", + bootstrapServersProperty = "spring.kafka.bootstrap-servers") +public class MyApplicationTests { + + @Autowired + private KafkaTemplate template; + + @Test + public void test() { + ... + } + +} +``` + +#### 4.3.8. Hamcrest Matchers + +The `o.s.kafka.test.hamcrest.KafkaMatchers` provides the following matchers: + +``` +/** + * @param key the key + * @param the type. + * @return a Matcher that matches the key in a consumer record. + */ +public static Matcher> hasKey(K key) { ... } + +/** + * @param value the value. + * @param the type. + * @return a Matcher that matches the value in a consumer record. + */ +public static Matcher> hasValue(V value) { ... } + +/** + * @param partition the partition. + * @return a Matcher that matches the partition in a consumer record. + */ +public static Matcher> hasPartition(int partition) { ... } + +/** + * Matcher testing the timestamp of a {@link ConsumerRecord} assuming the topic has been set with + * {@link org.apache.kafka.common.record.TimestampType#CREATE_TIME CreateTime}. + * + * @param ts timestamp of the consumer record. + * @return a Matcher that matches the timestamp in a consumer record. + */ +public static Matcher> hasTimestamp(long ts) { + return hasTimestamp(TimestampType.CREATE_TIME, ts); +} + +/** + * Matcher testing the timestamp of a {@link ConsumerRecord} + * @param type timestamp type of the record + * @param ts timestamp of the consumer record. + * @return a Matcher that matches the timestamp in a consumer record. + */ +public static Matcher> hasTimestamp(TimestampType type, long ts) { + return new ConsumerRecordTimestampMatcher(type, ts); +} +``` + +#### 4.3.9. AssertJ Conditions + +You can use the following AssertJ conditions: + +``` +/** + * @param key the key + * @param the type. + * @return a Condition that matches the key in a consumer record. + */ +public static Condition> key(K key) { ... } + +/** + * @param value the value. + * @param the type. + * @return a Condition that matches the value in a consumer record. + */ +public static Condition> value(V value) { ... } + +/** + * @param key the key. + * @param value the value. + * @param the key type. + * @param the value type. + * @return a Condition that matches the key in a consumer record. + * @since 2.2.12 + */ +public static Condition> keyValue(K key, V value) { ... } + +/** + * @param partition the partition. + * @return a Condition that matches the partition in a consumer record. + */ +public static Condition> partition(int partition) { ... } + +/** + * @param value the timestamp. + * @return a Condition that matches the timestamp value in a consumer record. + */ +public static Condition> timestamp(long value) { + return new ConsumerRecordTimestampCondition(TimestampType.CREATE_TIME, value); +} + +/** + * @param type the type of timestamp + * @param value the timestamp. + * @return a Condition that matches the timestamp value in a consumer record. + */ +public static Condition> timestamp(TimestampType type, long value) { + return new ConsumerRecordTimestampCondition(type, value); +} +``` + +#### 4.3.10. Example + +The following example brings together most of the topics covered in this chapter: + +``` +public class KafkaTemplateTests { + + private static final String TEMPLATE_TOPIC = "templateTopic"; + + @ClassRule + public static EmbeddedKafkaRule embeddedKafka = new EmbeddedKafkaRule(1, true, TEMPLATE_TOPIC); + + @Test + public void testTemplate() throws Exception { + Map consumerProps = KafkaTestUtils.consumerProps("testT", "false", + embeddedKafka.getEmbeddedKafka()); + DefaultKafkaConsumerFactory cf = + new DefaultKafkaConsumerFactory(consumerProps); + ContainerProperties containerProperties = new ContainerProperties(TEMPLATE_TOPIC); + KafkaMessageListenerContainer container = + new KafkaMessageListenerContainer<>(cf, containerProperties); + final BlockingQueue> records = new LinkedBlockingQueue<>(); + container.setupMessageListener(new MessageListener() { + + @Override + public void onMessage(ConsumerRecord record) { + System.out.println(record); + records.add(record); + } + + }); + container.setBeanName("templateTests"); + container.start(); + ContainerTestUtils.waitForAssignment(container, + embeddedKafka.getEmbeddedKafka().getPartitionsPerTopic()); + Map producerProps = + KafkaTestUtils.producerProps(embeddedKafka.getEmbeddedKafka()); + ProducerFactory pf = + new DefaultKafkaProducerFactory(producerProps); + KafkaTemplate template = new KafkaTemplate<>(pf); + template.setDefaultTopic(TEMPLATE_TOPIC); + template.sendDefault("foo"); + assertThat(records.poll(10, TimeUnit.SECONDS), hasValue("foo")); + template.sendDefault(0, 2, "bar"); + ConsumerRecord received = records.poll(10, TimeUnit.SECONDS); + assertThat(received, hasKey(2)); + assertThat(received, hasPartition(0)); + assertThat(received, hasValue("bar")); + template.send(TEMPLATE_TOPIC, 0, 2, "baz"); + received = records.poll(10, TimeUnit.SECONDS); + assertThat(received, hasKey(2)); + assertThat(received, hasPartition(0)); + assertThat(received, hasValue("baz")); + } + +} +``` + +The preceding example uses the Hamcrest matchers. +With `AssertJ`, the final part looks like the following code: + +``` +assertThat(records.poll(10, TimeUnit.SECONDS)).has(value("foo")); +template.sendDefault(0, 2, "bar"); +ConsumerRecord received = records.poll(10, TimeUnit.SECONDS); +// using individual assertions +assertThat(received).has(key(2)); +assertThat(received).has(value("bar")); +assertThat(received).has(partition(0)); +template.send(TEMPLATE_TOPIC, 0, 2, "baz"); +received = records.poll(10, TimeUnit.SECONDS); +// using allOf() +assertThat(received).has(allOf(keyValue(2, "baz"), partition(0))); +``` + +### 4.4. Non-Blocking Retries + +| |This is an experimental feature and the usual rule of no breaking API changes does not apply to this feature until the experimental designation is removed.
Users are encouraged to try out the feature and provide feedback via GitHub Issues or GitHub discussions.
This is regarding the API only; the feature is considered to be complete, and robust.| +|---|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +Achieving non-blocking retry / dlt functionality with Kafka usually requires setting up extra topics and creating and configuring the corresponding listeners. +Since 2.7 Spring for Apache Kafka offers support for that via the `@RetryableTopic` annotation and `RetryTopicConfiguration` class to simplify that bootstrapping. + +#### 4.4.1. How The Pattern Works + +If message processing fails, the message is forwarded to a retry topic with a back off timestamp. +The retry topic consumer then checks the timestamp and if it’s not due it pauses the consumption for that topic’s partition. +When it is due the partition consumption is resumed, and the message is consumed again. +If the message processing fails again the message will be forwarded to the next retry topic, and the pattern is repeated until a successful processing occurs, or the attempts are exhausted, and the message is sent to the Dead Letter Topic (if configured). + +To illustrate, if you have a "main-topic" topic, and want to setup non-blocking retry with an exponential backoff of 1000ms with a multiplier of 2 and 4 max attempts, it will create the main-topic-retry-1000, main-topic-retry-2000, main-topic-retry-4000 and main-topic-dlt topics and configure the respective consumers. +The framework also takes care of creating the topics and setting up and configuring the listeners. + +| |By using this strategy you lose Kafka’s ordering guarantees for that topic.| +|---|---------------------------------------------------------------------------| + +| |You can set the `AckMode` mode you prefer, but `RECORD` is suggested.| +|---|---------------------------------------------------------------------| + +| |At this time this functionality doesn’t support class level `@KafkaListener` annotations| +|---|----------------------------------------------------------------------------------------| + +#### 4.4.2. Back Off Delay Precision + +##### Overview and Guarantees + +All message processing and backing off is handled by the consumer thread, and, as such, delay precision is guaranteed on a best-effort basis. +If one message’s processing takes longer than the next message’s back off period for that consumer, the next message’s delay will be higher than expected. +Also, for short delays (about 1s or less), the maintenance work the thread has to do, such as committing offsets, may delay the message processing execution. +The precision can also be affected if the retry topic’s consumer is handling more than one partition, because we rely on waking up the consumer from polling and having full pollTimeouts to make timing adjustments. + +That being said, for consumers handling a single partition the message’s processing should happen under 100ms after it’s exact due time for most situations. + +| |It is guaranteed that a message will never be processed before its due time.| +|---|----------------------------------------------------------------------------| + +##### Tuning the Delay Precision + +The message’s processing delay precision relies on two `ContainerProperties`: `ContainerProperties.pollTimeout` and `ContainerProperties.idlePartitionEventInterval`. +Both properties will be automatically set in the retry topic and dlt’s `ListenerContainerFactory` to one quarter of the smallest delay value for that topic, with a minimum value of 250ms and a maximum value of 5000ms. +These values will only be set if the property has its default values - if you change either value yourself your change will not be overridden. +This way you can tune the precision and performance for the retry topics if you need to. + +| |You can have separate `ListenerContainerFactory` instances for the main and retry topics - this way you can have different settings to better suit your needs, such as having a higher polling timeout setting for the main topics and a lower one for the retry topics.| +|---|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +#### 4.4.3. Configuration + +##### Using the `@RetryableTopic` annotation + +To configure the retry topic and dlt for a `@KafkaListener` annotated method, you just have to add the `@RetryableTopic` annotation to it and Spring for Apache Kafka will bootstrap all the necessary topics and consumers with the default configurations. + +``` +@RetryableTopic(kafkaTemplate = "myRetryableTopicKafkaTemplate") +@KafkaListener(topics = "my-annotated-topic", groupId = "myGroupId") +public void processMessage(MyPojo message) { + // ... message processing +} +``` + +You can specify a method in the same class to process the dlt messages by annotating it with the `@DltHandler` annotation. +If no DltHandler method is provided a default consumer is created which only logs the consumption. + +``` +@DltHandler +public void processMessage(MyPojo message) { +// ... message processing, persistence, etc +} +``` + +| |If you don’t specify a kafkaTemplate name a bean with name `retryTopicDefaultKafkaTemplate` will be looked up.
If no bean is found an exception is thrown.| +|---|--------------------------------------------------------------------------------------------------------------------------------------------------------------| + +##### Using `RetryTopicConfiguration` beans + +You can also configure the non-blocking retry support by creating `RetryTopicConfiguration` beans in a `@Configuration` annotated class. + +``` +@Bean +public RetryTopicConfiguration myRetryTopic(KafkaTemplate template) { + return RetryTopicConfigurationBuilder + .newInstance() + .create(template); +} +``` + +This will create retry topics and a dlt, as well as the corresponding consumers, for all topics in methods annotated with '@KafkaListener' using the default configurations. The `KafkaTemplate` instance is required for message forwarding. + +To achieve more fine-grained control over how to handle non-blocking retrials for each topic, more than one `RetryTopicConfiguration` bean can be provided. + +``` +@Bean +public RetryTopicConfiguration myRetryTopic(KafkaTemplate template) { + return RetryTopicConfigurationBuilder + .newInstance() + .fixedBackoff(3000) + .maxAttempts(5) + .includeTopics("my-topic", "my-other-topic") + .create(template); +} + +@Bean +public RetryTopicConfiguration myOtherRetryTopic(KafkaTemplate template) { + return RetryTopicConfigurationBuilder + .newInstance() + .exponentialBackoff(1000, 2, 5000) + .maxAttempts(4) + .excludeTopics("my-topic", "my-other-topic") + .retryOn(MyException.class) + .create(template); +} +``` + +| |The retry topics' and dlt’s consumers will be assigned to a consumer group with a group id that is the combination of the one with you provide in the `groupId` parameter of the `@KafkaListener` annotation with the topic’s suffix. If you don’t provide any they’ll all belong to the same group, and rebalance on a retry topic will cause an unnecessary rebalance on the main topic.| +|---|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +| |If the consumer is configured with an [`ErrorHandlingDeserializer`](#error-handling-deserializer), to handle deserilialization exceptions, it is important to configure the `KafkaTemplate` and its producer with a serializer that can handle normal objects as well as raw `byte[]` values, which result from deserialization exceptions.
The generic value type of the template should be `Object`.
One technique is to use the `DelegatingByTypeSerializer`; an example follows:| +|---|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +``` +@Bean +public ProducerFactory producerFactory() { + return new DefaultKafkaProducerFactory<>(producerConfiguration(), new StringSerializer(), + new DelegatingByTypeSerializer(Map.of(byte[].class, new ByteArraySerializer(), + MyNormalObject.class, new JsonSerializer()))); +} + +@Bean +public KafkaTemplate kafkaTemplate() { + return new KafkaTemplate<>(producerFactory()); +} +``` + +#### 4.4.4. Features + +Most of the features are available both for the `@RetryableTopic` annotation and the `RetryTopicConfiguration` beans. + +##### BackOff Configuration + +The BackOff configuration relies on the `BackOffPolicy` interface from the `Spring Retry` project. + +It includes: + +* Fixed Back Off + +* Exponential Back Off + +* Random Exponential Back Off + +* Uniform Random Back Off + +* No Back Off + +* Custom Back Off + +``` +@RetryableTopic(attempts = 5, + backoff = @Backoff(delay = 1000, multiplier = 2, maxDelay = 5000)) +@KafkaListener(topics = "my-annotated-topic") +public void processMessage(MyPojo message) { + // ... message processing +} +``` + +``` +@Bean +public RetryTopicConfiguration myRetryTopic(KafkaTemplate template) { + return RetryTopicConfigurationBuilder + .newInstance() + .fixedBackoff(3000) + .maxAttempts(4) + .build(); +} +``` + +You can also provide a custom implementation of Spring Retry’s `SleepingBackOffPolicy`: + +``` +@Bean +public RetryTopicConfiguration myRetryTopic(KafkaTemplate template) { + return RetryTopicConfigurationBuilder + .newInstance() + .customBackOff(new MyCustomBackOffPolicy()) + .maxAttempts(5) + .build(); +} +``` + +| |The default backoff policy is FixedBackOffPolicy with a maximum of 3 attempts and 1000ms intervals.| +|---|---------------------------------------------------------------------------------------------------| + +| |The first attempt counts against the maxAttempts, so if you provide a maxAttempts value of 4 there’ll be the original attempt plus 3 retries.| +|---|---------------------------------------------------------------------------------------------------------------------------------------------| + +##### Single Topic Fixed Delay Retries + +If you’re using fixed delay policies such as `FixedBackOffPolicy` or `NoBackOffPolicy` you can use a single topic to accomplish the non-blocking retries. +This topic will be suffixed with the provided or default suffix, and will not have either the index or the delay values appended. + +``` +@RetryableTopic(backoff = @Backoff(2000), fixedDelayTopicStrategy = FixedDelayStrategy.SINGLE_TOPIC) +@KafkaListener(topics = "my-annotated-topic") +public void processMessage(MyPojo message) { + // ... message processing +} +``` + +``` +@Bean +public RetryTopicConfiguration myRetryTopic(KafkaTemplate template) { + return RetryTopicConfigurationBuilder + .newInstance() + .fixedBackoff(3000) + .maxAttempts(5) + .useSingleTopicForFixedDelays() + .build(); +} +``` + +| |The default behavior is creating separate retry topics for each attempt, appended with their index value: retry-0, retry-1, …​| +|---|------------------------------------------------------------------------------------------------------------------------------| + +##### Global timeout + +You can set the global timeout for the retrying process. +If that time is reached, the next time the consumer throws an exception the message goes straight to the DLT, or just ends the processing if no DLT is available. + +``` +@RetryableTopic(backoff = @Backoff(2000), timeout = 5000) +@KafkaListener(topics = "my-annotated-topic") +public void processMessage(MyPojo message) { + // ... message processing +} +``` + +``` +@Bean +public RetryTopicConfiguration myRetryTopic(KafkaTemplate template) { + return RetryTopicConfigurationBuilder + .newInstance() + .fixedBackoff(2000) + .timeoutAfter(5000) + .build(); +} +``` + +| |The default is having no timeout set, which can also be achieved by providing -1 as the timout value.| +|---|-----------------------------------------------------------------------------------------------------| + +##### Exception Classifier + +You can specify which exceptions you want to retry on and which not to. +You can also set it to traverse the causes to lookup nested exceptions. + +``` +@RetryableTopic(include = {MyRetryException.class, MyOtherRetryException.class}, traversingCauses = true) +@KafkaListener(topics = "my-annotated-topic") +public void processMessage(MyPojo message) { + throw new RuntimeException(new MyRetryException()); // Will retry +} +``` + +``` +@Bean +public RetryTopicConfiguration myRetryTopic(KafkaTemplate template) { + return RetryTopicConfigurationBuilder + .newInstance() + .notRetryOn(MyDontRetryException.class) + .create(template); +} +``` + +| |The default behavior is retrying on all exceptions and not traversing causes.| +|---|-----------------------------------------------------------------------------| + +Since 2.8.3 there’s a global list of fatal exceptions which will cause the record to be sent to the DLT without any retries. +See [DefaultErrorHandler](#default-eh) for the default list of fatal exceptions. +You can add or remove exceptions to and from this list with: + +``` +@Bean(name = RetryTopicInternalBeanNames.DESTINATION_TOPIC_CONTAINER_NAME) +public DefaultDestinationTopicResolver topicResolver(ApplicationContext applicationContext, + @Qualifier(RetryTopicInternalBeanNames + .INTERNAL_BACKOFF_CLOCK_BEAN_NAME) Clock clock) { + DefaultDestinationTopicResolver ddtr = new DefaultDestinationTopicResolver(clock, applicationContext); + ddtr.addNotRetryableExceptions(MyFatalException.class); + ddtr.removeNotRetryableException(ConversionException.class); + return ddtr; +} +``` + +| |To disable fatal exceptions' classification, clear the default list using the `setClassifications` method in `DefaultDestinationTopicResolver`.| +|---|-----------------------------------------------------------------------------------------------------------------------------------------------| + +##### Include and Exclude Topics + +You can decide which topics will and will not be handled by a `RetryTopicConfiguration` bean via the .includeTopic(String topic), .includeTopics(Collection\ topics) .excludeTopic(String topic) and .excludeTopics(Collection\ topics) methods. + +``` +@Bean +public RetryTopicConfiguration myRetryTopic(KafkaTemplate template) { + return RetryTopicConfigurationBuilder + .newInstance() + .includeTopics(List.of("my-included-topic", "my-other-included-topic")) + .create(template); +} + +@Bean +public RetryTopicConfiguration myOtherRetryTopic(KafkaTemplate template) { + return RetryTopicConfigurationBuilder + .newInstance() + .excludeTopic("my-excluded-topic") + .create(template); +} +``` + +| |The default behavior is to include all topics.| +|---|----------------------------------------------| + +##### Topics AutoCreation + +Unless otherwise specified the framework will auto create the required topics using `NewTopic` beans that are consumed by the `KafkaAdmin` bean. +You can specify the number of partitions and the replication factor with which the topics will be created, and you can turn this feature off. + +| |Note that if you’re not using Spring Boot you’ll have to provide a KafkaAdmin bean in order to use this feature.| +|---|----------------------------------------------------------------------------------------------------------------| + +``` +@RetryableTopic(numPartitions = 2, replicationFactor = 3) +@KafkaListener(topics = "my-annotated-topic") +public void processMessage(MyPojo message) { + // ... message processing +} + +@RetryableTopic(autoCreateTopics = false) +@KafkaListener(topics = "my-annotated-topic") +public void processMessage(MyPojo message) { + // ... message processing +} +``` + +``` +@Bean +public RetryTopicConfiguration myRetryTopic(KafkaTemplate template) { + return RetryTopicConfigurationBuilder + .newInstance() + .autoCreateTopicsWith(2, 3) + .create(template); +} + +@Bean +public RetryTopicConfiguration myOtherRetryTopic(KafkaTemplate template) { + return RetryTopicConfigurationBuilder + .newInstance() + .doNotAutoCreateRetryTopics() + .create(template); +} +``` + +| |By default the topics are autocreated with one partition and a replication factor of one.| +|---|-----------------------------------------------------------------------------------------| + +##### Failure Header Management + +When considering how to manage failure headers (original headers and exception headers), the framework delegates to the `DeadLetterPublishingRecover` to decide whether to append or replace the headers. + +By default, it explicitly sets `appendOriginalHeaders` to `false` and leaves `stripPreviousExceptionHeaders` to the default used by the `DeadLetterPublishingRecover`. + +This means that only the first "original" and last exception headers are retained with the default configuration. +This is to avoid creation of excessively large messages (due to the stack trace header, for example) when many retry steps are involved. + +See [Managing Dead Letter Record Headers](#dlpr-headers) for more information. + +To reconfigure the framework to use different settings for these properties, replace the standard `DeadLetterPublishingRecovererFactory` bean by adding a `recovererCustomizer`: + +``` +@Bean(RetryTopicInternalBeanNames.DEAD_LETTER_PUBLISHING_RECOVERER_FACTORY_BEAN_NAME) +DeadLetterPublishingRecovererFactory factory(DestinationTopicResolver resolver) { + DeadLetterPublishingRecovererFactory factory = new DeadLetterPublishingRecovererFactory(resolver); + factory.setDeadLetterPublishingRecovererCustomizer(dlpr -> { + dlpr.appendOriginalHeaders(true); + dlpr.setStripPreviousExceptionHeaders(false); + }); + return factory; +} +``` + +#### 4.4.5. Topic Naming + +Retry topics and DLT are named by suffixing the main topic with a provided or default value, appended by either the delay or index for that topic. + +Examples: + +"my-topic" → "my-topic-retry-0", "my-topic-retry-1", …​, "my-topic-dlt" + +"my-other-topic" → "my-topic-myRetrySuffix-1000", "my-topic-myRetrySuffix-2000", …​, "my-topic-myDltSuffix". + +##### Retry Topics and Dlt Suffixes + +You can specify the suffixes that will be used by the retry and dlt topics. + +``` +@RetryableTopic(retryTopicSuffix = "-my-retry-suffix", dltTopicSuffix = "-my-dlt-suffix") +@KafkaListener(topics = "my-annotated-topic") +public void processMessage(MyPojo message) { + // ... message processing +} +``` + +``` +@Bean +public RetryTopicConfiguration myRetryTopic(KafkaTemplate template) { + return RetryTopicConfigurationBuilder + .newInstance() + .retryTopicSuffix("-my-retry-suffix") + .dltTopicSuffix("-my-dlt-suffix") + .create(template); +} +``` + +| |The default suffixes are "-retry" and "-dlt", for retry topics and dlt respectively.| +|---|------------------------------------------------------------------------------------| + +##### Appending the Topic’s Index or Delay + +You can either append the topic’s index or delay values after the suffix. + +``` +@RetryableTopic(topicSuffixingStrategy = TopicSuffixingStrategy.SUFFIX_WITH_INDEX_VALUE) +@KafkaListener(topics = "my-annotated-topic") +public void processMessage(MyPojo message) { + // ... message processing +} +``` + +``` +@Bean +public RetryTopicConfiguration myRetryTopic(KafkaTemplate template) { + return RetryTopicConfigurationBuilder + .newInstance() + .suffixTopicsWithIndexValues() + .create(template); + } +``` + +| |The default behavior is to suffix with the delay values, except for fixed delay configurations with multiple topics, in which case the topics are suffixed with the topic’s index.| +|---|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +##### Custom naming strategies + +More complex naming strategies can be accomplished by registering a bean that implements `RetryTopicNamesProviderFactory`. The default implementation is `SuffixingRetryTopicNamesProviderFactory` and a different implementation can be registered in the following way: + +``` +@Bean +public RetryTopicNamesProviderFactory myRetryNamingProviderFactory() { + return new CustomRetryTopicNamesProviderFactory(); +} +``` + +As an example the following implementation, in addition to the standard suffix, adds a prefix to retry/dl topics names: + +``` +public class CustomRetryTopicNamesProviderFactory implements RetryTopicNamesProviderFactory { + + @Override + public RetryTopicNamesProvider createRetryTopicNamesProvider( + DestinationTopic.Properties properties) { + + if(properties.isMainEndpoint()) { + return new SuffixingRetryTopicNamesProvider(properties); + } + else { + return new SuffixingRetryTopicNamesProvider(properties) { + + @Override + public String getTopicName(String topic) { + return "my-prefix-" + super.getTopicName(topic); + } + + }; + } + } + +} +``` + +#### 4.4.6. Dlt Strategies + +The framework provides a few strategies for working with DLTs. You can provide a method for DLT processing, use the default logging method, or have no DLT at all. Also you can choose what happens if DLT processing fails. + +##### Dlt Processing Method + +You can specify the method used to process the Dlt for the topic, as well as the behavior if that processing fails. + +To do that you can use the `@DltHandler` annotation in a method of the class with the `@RetryableTopic` annotation(s). +Note that the same method will be used for all the `@RetryableTopic` annotated methods within that class. + +``` +@RetryableTopic +@KafkaListener(topics = "my-annotated-topic") +public void processMessage(MyPojo message) { + // ... message processing +} + +@DltHandler +public void processMessage(MyPojo message) { +// ... message processing, persistence, etc +} +``` + +The DLT handler method can also be provided through the RetryTopicConfigurationBuilder.dltHandlerMethod(String, String) method, passing as arguments the bean name and method name that should process the DLT’s messages. + +``` +@Bean +public RetryTopicConfiguration myRetryTopic(KafkaTemplate template) { + return RetryTopicConfigurationBuilder + .newInstance() + .dltProcessor("myCustomDltProcessor", "processDltMessage") + .create(template); +} + +@Component +public class MyCustomDltProcessor { + + private final MyDependency myDependency; + + public MyCustomDltProcessor(MyDependency myDependency) { + this.myDependency = myDependency; + } + + public void processDltMessage(MyPojo message) { + // ... message processing, persistence, etc + } +} +``` + +| |If no DLT handler is provided, the default RetryTopicConfigurer.LoggingDltListenerHandlerMethod is used.| +|---|--------------------------------------------------------------------------------------------------------| + +Starting with version 2.8, if you don’t want to consume from the DLT in this application at all, including by the default handler (or you wish to defer consumption), you can control whether or not the DLT container starts, independent of the container factory’s `autoStartup` property. + +When using the `@RetryableTopic` annotation, set the `autoStartDltHandler` property to `false`; when using the configuration builder, use `.autoStartDltHandler(false)` . + +You can later start the DLT handler via the `KafkaListenerEndpointRegistry`. + +##### DLT Failure Behavior + +Should the DLT processing fail, there are two possible behaviors available: `ALWAYS_RETRY_ON_ERROR` and `FAIL_ON_ERROR`. + +In the former the record is forwarded back to the DLT topic so it doesn’t block other DLT records' processing. +In the latter the consumer ends the execution without forwarding the message. + +``` +@RetryableTopic(dltProcessingFailureStrategy = + DltStrategy.FAIL_ON_ERROR) +@KafkaListener(topics = "my-annotated-topic") +public void processMessage(MyPojo message) { + // ... message processing +} +``` + +``` +@Bean +public RetryTopicConfiguration myRetryTopic(KafkaTemplate template) { + return RetryTopicConfigurationBuilder + .newInstance() + .dltProcessor(MyCustomDltProcessor.class, "processDltMessage") + .doNotRetryOnDltFailure() + .create(template); +} +``` + +| |The default behavior is to `ALWAYS_RETRY_ON_ERROR`.| +|---|---------------------------------------------------| + +| |Starting with version 2.8.3, `ALWAYS_RETRY_ON_ERROR` will NOT route a record back to the DLT if the record causes a fatal exception to be thrown,
such as a `DeserializationException` because, generally, such exceptions will always be thrown.| +|---|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +Exceptions that are considered fatal are: + +* `DeserializationException` + +* `MessageConversionException` + +* `ConversionException` + +* `MethodArgumentResolutionException` + +* `NoSuchMethodException` + +* `ClassCastException` + +You can add exceptions to and remove exceptions from this list using methods on the `DestinationTopicResolver` bean. + +See [Exception Classifier](#retry-topic-ex-classifier) for more information. + +##### Configuring No DLT + +The framework also provides the possibility of not configuring a DLT for the topic. +In this case after retrials are exhausted the processing simply ends. + +``` +@RetryableTopic(dltProcessingFailureStrategy = + DltStrategy.NO_DLT) +@KafkaListener(topics = "my-annotated-topic") +public void processMessage(MyPojo message) { + // ... message processing +} +``` + +``` +@Bean +public RetryTopicConfiguration myRetryTopic(KafkaTemplate template) { + return RetryTopicConfigurationBuilder + .newInstance() + .doNotConfigureDlt() + .create(template); +} +``` + +#### 4.4.7. Specifying a ListenerContainerFactory + +By default the RetryTopic configuration will use the provided factory from the `@KafkaListener` annotation, but you can specify a different one to be used to create the retry topic and dlt listener containers. + +For the `@RetryableTopic` annotation you can provide the factory’s bean name, and using the `RetryTopicConfiguration` bean you can either provide the bean name or the instance itself. + +``` +@RetryableTopic(listenerContainerFactory = "my-retry-topic-factory") +@KafkaListener(topics = "my-annotated-topic") +public void processMessage(MyPojo message) { + // ... message processing +} +``` + +``` +@Bean +public RetryTopicConfiguration myRetryTopic(KafkaTemplate template, + ConcurrentKafkaListenerContainerFactory factory) { + + return RetryTopicConfigurationBuilder + .newInstance() + .listenerFactory(factory) + .create(template); +} + +@Bean +public RetryTopicConfiguration myOtherRetryTopic(KafkaTemplate template) { + return RetryTopicConfigurationBuilder + .newInstance() + .listenerFactory("my-retry-topic-factory") + .create(template); +} +``` + +| |Since 2.8.3 you can use the same factory for retryable and non-retryable topics.| +|---|--------------------------------------------------------------------------------| + +If you need to revert the factory configuration behavior to prior 2.8.3, you can replace the standard `RetryTopicConfigurer` bean and set `useLegacyFactoryConfigurer` to `true`, such as: + +``` +@Bean(name = RetryTopicInternalBeanNames.RETRY_TOPIC_CONFIGURER) +public RetryTopicConfigurer retryTopicConfigurer(DestinationTopicProcessor destinationTopicProcessor, + ListenerContainerFactoryResolver containerFactoryResolver, + ListenerContainerFactoryConfigurer listenerContainerFactoryConfigurer, + BeanFactory beanFactory, + RetryTopicNamesProviderFactory retryTopicNamesProviderFactory) { + RetryTopicConfigurer retryTopicConfigurer = new RetryTopicConfigurer(destinationTopicProcessor, containerFactoryResolver, listenerContainerFactoryConfigurer, beanFactory, retryTopicNamesProviderFactory); + retryTopicConfigurer.useLegacyFactoryConfigurer(true); + return retryTopicConfigurer; +} +``` + +\==== Changing KafkaBackOffException Logging Level + +When a message in the retry topic is not due for consumption, a `KafkaBackOffException` is thrown. Such exceptions are logged by default at `DEBUG` level, but you can change this behavior by setting an error handler customizer in the `ListenerContainerFactoryConfigurer` in a `@Configuration` class. + +For example, to change the logging level to WARN you might add: + +``` +@Bean(name = RetryTopicInternalBeanNames.LISTENER_CONTAINER_FACTORY_CONFIGURER_NAME) +public ListenerContainerFactoryConfigurer listenerContainer(KafkaConsumerBackoffManager kafkaConsumerBackoffManager, + DeadLetterPublishingRecovererFactory deadLetterPublishingRecovererFactory, + @Qualifier(RetryTopicInternalBeanNames + .INTERNAL_BACKOFF_CLOCK_BEAN_NAME) Clock clock) { + ListenerContainerFactoryConfigurer configurer = new ListenerContainerFactoryConfigurer(kafkaConsumerBackoffManager, deadLetterPublishingRecovererFactory, clock); + configurer.setErrorHandlerCustomizer(commonErrorHandler -> ((DefaultErrorHandler) commonErrorHandler).setLogLevel(KafkaException.Level.WARN)); + return configurer; +} +``` + +\== Tips, Tricks and Examples + +\=== Manually Assigning All Partitions + +Let’s say you want to always read all records from all partitions (such as when using a compacted topic to load a distributed cache), it can be useful to manually assign the partitions and not use Kafka’s group management. +Doing so can be unwieldy when there are many partitions, because you have to list the partitions. +It’s also an issue if the number of partitions changes over time, because you would have to recompile your application each time the partition count changes. + +The following is an example of how to use the power of a SpEL expression to create the partition list dynamically when the application starts: + +``` +@KafkaListener(topicPartitions = @TopicPartition(topic = "compacted", + partitions = "#{@finder.partitions('compacted')}"), + partitionOffsets = @PartitionOffset(partition = "*", initialOffset = "0"))) +public void listen(@Header(KafkaHeaders.RECEIVED_MESSAGE_KEY) String key, String payload) { + ... +} + +@Bean +public PartitionFinder finder(ConsumerFactory consumerFactory) { + return new PartitionFinder(consumerFactory); +} + +public static class PartitionFinder { + + private final ConsumerFactory consumerFactory; + + public PartitionFinder(ConsumerFactory consumerFactory) { + this.consumerFactory = consumerFactory; + } + + public String[] partitions(String topic) { + try (Consumer consumer = consumerFactory.createConsumer()) { + return consumer.partitionsFor(topic).stream() + .map(pi -> "" + pi.partition()) + .toArray(String[]::new); + } + } + +} +``` + +Using this in conjunction with `ConsumerConfig.AUTO_OFFSET_RESET_CONFIG=earliest` will load all records each time the application is started. +You should also set the container’s `AckMode` to `MANUAL` to prevent the container from committing offsets for a `null` consumer group. +Howewever, starting with version 2.5.5, as shown above, you can apply an initial offset to all partitions; see [Explicit Partition Assignment](#manual-assignment) for more information. + +\=== Examples of Kafka Transactions with Other Transaction Managers + +The following Spring Boot application is an example of chaining database and Kafka transactions. +The listener container starts the Kafka transaction and the `@Transactional` annotation starts the DB transaction. +The DB transaction is committed first; if the Kafka transaction fails to commit, the record will be redelivered so the DB update should be idempotent. + +``` +@SpringBootApplication +public class Application { + + public static void main(String[] args) { + SpringApplication.run(Application.class, args); + } + + @Bean + public ApplicationRunner runner(KafkaTemplate template) { + return args -> template.executeInTransaction(t -> t.send("topic1", "test")); + } + + @Bean + public DataSourceTransactionManager dstm(DataSource dataSource) { + return new DataSourceTransactionManager(dataSource); + } + + @Component + public static class Listener { + + private final JdbcTemplate jdbcTemplate; + + private final KafkaTemplate kafkaTemplate; + + public Listener(JdbcTemplate jdbcTemplate, KafkaTemplate kafkaTemplate) { + this.jdbcTemplate = jdbcTemplate; + this.kafkaTemplate = kafkaTemplate; + } + + @KafkaListener(id = "group1", topics = "topic1") + @Transactional("dstm") + public void listen1(String in) { + this.kafkaTemplate.send("topic2", in.toUpperCase()); + this.jdbcTemplate.execute("insert into mytable (data) values ('" + in + "')"); + } + + @KafkaListener(id = "group2", topics = "topic2") + public void listen2(String in) { + System.out.println(in); + } + + } + + @Bean + public NewTopic topic1() { + return TopicBuilder.name("topic1").build(); + } + + @Bean + public NewTopic topic2() { + return TopicBuilder.name("topic2").build(); + } + +} +``` + +``` +spring.datasource.url=jdbc:mysql://localhost/integration?serverTimezone=UTC +spring.datasource.username=root +spring.datasource.driver-class-name=com.mysql.cj.jdbc.Driver + +spring.kafka.consumer.auto-offset-reset=earliest +spring.kafka.consumer.enable-auto-commit=false +spring.kafka.consumer.properties.isolation.level=read_committed + +spring.kafka.producer.transaction-id-prefix=tx- + +#logging.level.org.springframework.transaction=trace +#logging.level.org.springframework.kafka.transaction=debug +#logging.level.org.springframework.jdbc=debug +``` + +``` +create table mytable (data varchar(20)); +``` + +For producer-only transactions, transaction synchronization works: + +``` +@Transactional("dstm") +public void someMethod(String in) { + this.kafkaTemplate.send("topic2", in.toUpperCase()); + this.jdbcTemplate.execute("insert into mytable (data) values ('" + in + "')"); +} +``` + +The `KafkaTemplate` will synchronize its transaction with the DB transaction and the commit/rollback occurs after the database. + +If you wish to commit the Kafka transaction first, and only commit the DB transaction if the Kafka transaction is successful, use nested `@Transactional` methods: + +``` +@Transactional("dstm") +public void someMethod(String in) { + this.jdbcTemplate.execute("insert into mytable (data) values ('" + in + "')"); + sendToKafka(in); +} + +@Transactional("kafkaTransactionManager") +public void sendToKafka(String in) { + this.kafkaTemplate.send("topic2", in.toUpperCase()); +} +``` + +\=== Customizing the JsonSerializer and JsonDeserializer + +The serializer and deserializer support a number of cusomizations using properties, see [JSON](#json-serde) for more information. +The `kafka-clients` code, not Spring, instantiates these objects, unless you inject them directly into the consumer and producer factories. +If you wish to configure the (de)serializer using properties, but wish to use, say, a custom `ObjectMapper`, simply create a subclass and pass the custom mapper into the `super` constructor. For example: + +``` +public class CustomJsonSerializer extends JsonSerializer { + + public CustomJsonSerializer() { + super(customizedObjectMapper()); + } + + private static ObjectMapper customizedObjectMapper() { + ObjectMapper mapper = JacksonUtils.enhancedObjectMapper(); + mapper.disable(SerializationFeature.WRITE_DATES_AS_TIMESTAMPS); + return mapper; + } + +} +``` + +\== Other Resources + +In addition to this reference documentation, we recommend a number of other resources that may help you learn about Spring and Apache Kafka. + +* [Apache Kafka Project Home Page](https://kafka.apache.org/) + +* [Spring for Apache Kafka Home Page](https://projects.spring.io/spring-kafka/) + +* [Spring for Apache Kafka GitHub Repository](https://github.com/spring-projects/spring-kafka) + +* [Spring Integration GitHub Repository (Apache Kafka Module)](https://github.com/spring-projects/spring-integration) + +\== Override Spring Boot Dependencies + +When using Spring for Apache Kafka in a Spring Boot application, the Apache Kafka dependency versions are determined by Spring Boot’s dependency management. +If you wish to use a different version of `kafka-clients` or `kafka-streams`, and use the embedded kafka broker for testing, you need to override their version used by Spring Boot dependency management and add two `test` artifacts for Apache Kafka. + +| |There is a bug in Apache Kafka 3.0.0 when running the embedded broker on Microsoft Windows [KAFKA-13391](https://issues.apache.org/jira/browse/KAFKA-13391).
To use the embedded broker on Windows, you need to downgrade the Apache Kafka version to 2.8.1 until 3.0.1 is available.
When using 2.8.1, you also need to exclude `zookeeper` dependency from `spring-kafka-test`.| +|---|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +Maven + +``` + + 2.8.1 + + + + org.springframework.kafka + spring-kafka + + + + org.apache.kafka + kafka-streams + + + + org.springframework.kafka + spring-kafka-test + test + + + + org.apache.zookeeper + zookeeper + + + + + + org.apache.kafka + kafka-clients + test + test + ${kafka.version} + + + + org.apache.kafka + kafka_2.13 + test + test + ${kafka.version} + +``` + +Gradle + +``` +ext['kafka.version'] = '2.8.1' + +dependencies { + implementation 'org.springframework.kafka:spring-kafka' + implementation "org.apache.kafka:kafka-streams" // optional - only needed when using kafka-streams + testImplementation ('org.springframework.kafka:spring-kafka-test') { + // needed if downgrading to Apache Kafka 2.8.1 + exclude group: 'org.apache.zookeeper', module: 'zookeeper' + } + testImplementation "org.apache.kafka:kafka-clients:${kafka.version}:test" + testImplementation "org.apache.kafka:kafka_2.13:${kafka.version}:test" +} +``` + +The test scope dependencies are only needed if you are using the embedded Kafka broker in tests. + +\== Change History + +\=== Changes between 2.6 and 2.7 + +\==== Kafka Client Version + +This version requires the 2.7.0 `kafka-clients`. +It is also compatible with the 2.8.0 clients, since version 2.7.1; see [[update-deps]](#update-deps). + +\==== Non-Blocking Delayed Retries Using Topics + +This significant new feature is added in this release. +When strict ordering is not important, failed deliveries can be sent to another topic to be consumed later. +A series of such retry topics can be configured, with increasing delays. +See [Non-Blocking Retries](#retry-topic) for more information. + +\==== Listener Container Changes + +The `onlyLogRecordMetadata` container property is now `true` by default. + +A new container property `stopImmediate` is now available. + +See [Listener Container Properties](#container-props) for more information. + +Error handlers that use a `BackOff` between delivery attempts (e.g. `SeekToCurrentErrorHandler` and `DefaultAfterRollbackProcessor`) will now exit the back off interval soon after the container is stopped, rather than delaying the stop. +See [After-rollback Processor](#after-rollback) and [[seek-to-current]](#seek-to-current) for more information. + +Error handlers and after rollback processors that extend `FailedRecordProcessor` can now be configured with one or more `RetryListener` s to receive information about retry and recovery progress. + +See See [After-rollback Processor](#after-rollback), [[seek-to-current]](#seek-to-current), and [[recovering-batch-eh]](#recovering-batch-eh) for more information. + +The `RecordInterceptor` now has additional methods called after the listener returns (normally, or by throwing an exception). +It also has a sub-interface `ConsumerAwareRecordInterceptor`. +In addition, there is now a `BatchInterceptor` for batch listeners. +See [Message Listener Containers](#message-listener-container) for more information. + +\==== `@KafkaListener` Changes + +You can now validate the payload parameter of `@KafkaHandler` methods (class-level listeners). +See [`@KafkaListener` `@Payload` Validation](#kafka-validation) for more information. + +You can now set the `rawRecordHeader` property on the `MessagingMessageConverter` and `BatchMessagingMessageConverter` which causes the raw `ConsumerRecord` to be added to the converted `Message`. +This is useful, for example, if you wish to use a `DeadLetterPublishingRecoverer` in a listener error handler. +See [Listener Error Handlers](#listener-error-handlers) for more information. + +You can now modify `@KafkaListener` annotations during application initialization. +See [`@KafkaListener` Attribute Modification](#kafkalistener-attrs) for more information. + +\==== `DeadLetterPublishingRecover` Changes + +Now, if both the key and value fail deserialization, the original values are published to the DLT. +Previously, the value was populated but the key `DeserializationException` remained in the headers. +There is a breaking API change, if you subclassed the recoverer and overrode the `createProducerRecord` method. + +In addition, the recoverer verifies that the partition selected by the destination resolver actually exists before publishing to it. + +See [Publishing Dead-letter Records](#dead-letters) for more information. + +\==== `ChainedKafkaTransactionManager` is Deprecated + +See [Transactions](#transactions) for more information. + +\==== `ReplyingKafkaTemplate` Changes + +There is now a mechanism to examine a reply and fail the future exceptionally if some condition exists. + +Support for sending and receiving `spring-messaging` `Message` s has been added. + +See [Using `ReplyingKafkaTemplate`](#replying-template) for more information. + +\==== Kafka Streams Changes + +By default, the `StreamsBuilderFactoryBean` is now configured to not clean up local state. +See [Configuration](#streams-config) for more information. + +\==== `KafkaAdmin` Changes + +New methods `createOrModifyTopics` and `describeTopics` have been added.`KafkaAdmin.NewTopics` has been added to facilitate configuring multiple topics in a single bean. +See [Configuring Topics](#configuring-topics) for more information. + +\==== `MessageConverter` Changes + +It is now possible to add a `spring-messaging` `SmartMessageConverter` to the `MessagingMessageConverter`, allowing content negotiation based on the `contentType` header. +See [Spring Messaging Message Conversion](#messaging-message-conversion) for more information. + +\==== Sequencing `@KafkaListener` s + +See [Starting `@KafkaListener` s in Sequence](#sequencing) for more information. + +\==== `ExponentialBackOffWithMaxRetries` + +A new `BackOff` implementation is provided, making it more convenient to configure the max retries. +See [`ExponentialBackOffWithMaxRetries` Implementation](#exp-backoff) for more information. + +\==== Conditional Delegating Error Handlers + +These new error handlers can be configured to delegate to different error handlers, depending on the exception type. +See [Delegating Error Handler](#cond-eh) for more information. + +\=== Changes between 2.5 and 2.6 + +\==== Kafka Client Version + +This version requires the 2.6.0 `kafka-clients`. + +\==== Listener Container Changes + +The default `EOSMode` is now `BETA`. +See [Exactly Once Semantics](#exactly-once) for more information. + +Various error handlers (that extend `FailedRecordProcessor`) and the `DefaultAfterRollbackProcessor` now reset the `BackOff` if recovery fails. +In addition, you can now select the `BackOff` to use based on the failed record and/or exception. +See [[seek-to-current]](#seek-to-current), [[recovering-batch-eh]](#recovering-batch-eh), [Publishing Dead-letter Records](#dead-letters) and [After-rollback Processor](#after-rollback) for more information. + +You can now configure an `adviceChain` in the container properties. +See [Listener Container Properties](#container-props) for more information. + +When the container is configured to publish `ListenerContainerIdleEvent` s, it now publishes a `ListenerContainerNoLongerIdleEvent` when a record is received after publishing an idle event. +See [Application Events](#events) and [Detecting Idle and Non-Responsive Consumers](#idle-containers) for more information. + +\==== @KafkaListener Changes + +When using manual partition assignment, you can now specify a wildcard for determining which partitions should be reset to the initial offset. +In addition, if the listener implements `ConsumerSeekAware`, `onPartitionsAssigned()` is called after the manual assignment. +(Also added in version 2.5.5). +See [Explicit Partition Assignment](#manual-assignment) for more information. + +Convenience methods have been added to `AbstractConsumerSeekAware` to make seeking easier. +See [Seeking to a Specific Offset](#seek) for more information. + +\==== ErrorHandler Changes + +Subclasses of `FailedRecordProcessor` (e.g. `SeekToCurrentErrorHandler`, `DefaultAfterRollbackProcessor`, `RecoveringBatchErrorHandler`) can now be configured to reset the retry state if the exception is a different type to that which occurred previously with this record. +See [[seek-to-current]](#seek-to-current), [After-rollback Processor](#after-rollback), [[recovering-batch-eh]](#recovering-batch-eh) for more information. + +\==== Producer Factory Changes + +You can now set a maximum age for producers after which they will be closed and recreated. +See [Transactions](#transactions) for more information. + +You can now update the configuration map after the `DefaultKafkaProducerFactory` has been created. +This might be useful, for example, if you have to update SSL key/trust store locations after a credentials change. +See [Using `DefaultKafkaProducerFactory`](#producer-factory) for more information. + +\=== Changes between 2.4 and 2.5 + +This section covers the changes made from version 2.4 to version 2.5. +For changes in earlier version, see [[history]](#history). + +\==== Consumer/Producer Factory Changes + +The default consumer and producer factories can now invoke a callback whenever a consumer or producer is created or closed. +Implementations for native Micrometer metrics are provided. +See [Factory Listeners](#factory-listeners) for more information. + +You can now change bootstrap server properties at runtime, enabling failover to another Kafka cluster. +See [Connecting to Kafka](#connecting) for more information. + +\==== `StreamsBuilderFactoryBean` Changes + +The factory bean can now invoke a callback whenever a `KafkaStreams` created or destroyed. +An Implementation for native Micrometer metrics is provided. +See [KafkaStreams Micrometer Support](#streams-micrometer) for more information. + +\==== Kafka Client Version + +This version requires the 2.5.0 `kafka-clients`. + +\==== Class/Package Changes + +`SeekUtils` has been moved from the `o.s.k.support` package to `o.s.k.listener`. + +\==== Delivery Attempts Header + +There is now an option to to add a header which tracks delivery attempts when using certain error handlers and after rollback processors. +See [Delivery Attempts Header](#delivery-header) for more information. + +\==== @KafkaListener Changes + +Default reply headers will now be populated automatically if needed when a `@KafkaListener` return type is `Message`. +See [Reply Type Message\](#reply-message) for more information. + +The `KafkaHeaders.RECEIVED_MESSAGE_KEY` is no longer populated with a `null` value when the incoming record has a `null` key; the header is omitted altogether. + +`@KafkaListener` methods can now specify a `ConsumerRecordMetadata` parameter instead of using discrete headers for metadata such as topic, partition, etc. +See [Consumer Record Metadata](#consumer-record-metadata) for more information. + +\==== Listener Container Changes + +The `assignmentCommitOption` container property is now `LATEST_ONLY_NO_TX` by default. +See [Listener Container Properties](#container-props) for more information. + +The `subBatchPerPartition` container property is now `true` by default when using transactions. +See [Transactions](#transactions) for more information. + +A new `RecoveringBatchErrorHandler` is now provided. +See [[recovering-batch-eh]](#recovering-batch-eh) for more information. + +Static group membership is now supported. +See [Message Listener Containers](#message-listener-container) for more information. + +When incremental/cooperative rebalancing is configured, if offsets fail to commit with a non-fatal `RebalanceInProgressException`, the container will attempt to re-commit the offsets for the partitions that remain assigned to this instance after the rebalance is completed. + +The default error handler is now the `SeekToCurrentErrorHandler` for record listeners and `RecoveringBatchErrorHandler` for batch listeners. +See [Container Error Handlers](#error-handlers) for more information. + +You can now control the level at which exceptions intentionally thrown by standard error handlers are logged. +See [Container Error Handlers](#error-handlers) for more information. + +The `getAssignmentsByClientId()` method has been added, making it easier to determine which consumers in a concurrent container are assigned which partition(s). +See [Listener Container Properties](#container-props) for more information. + +You can now suppress logging entire `ConsumerRecord` s in error, debug logs etc. +See `onlyLogRecordMetadata` in [Listener Container Properties](#container-props). + +\==== KafkaTemplate Changes + +The `KafkaTemplate` can now maintain micrometer timers. +See [Monitoring](#micrometer) for more information. + +The `KafkaTemplate` can now be configured with `ProducerConfig` properties to override those in the producer factory. +See [Using `KafkaTemplate`](#kafka-template) for more information. + +A `RoutingKafkaTemplate` has now been provided. +See [Using `RoutingKafkaTemplate`](#routing-template) for more information. + +You can now use `KafkaSendCallback` instead of `ListenerFutureCallback` to get a narrower exception, making it easier to extract the failed `ProducerRecord`. +See [Using `KafkaTemplate`](#kafka-template) for more information. + +\==== Kafka String Serializer/Deserializer + +New `ToStringSerializer`/`StringDeserializer` s as well as an associated `SerDe` are now provided. +See [String serialization](#string-serde) for more information. + +\==== JsonDeserializer + +The `JsonDeserializer` now has more flexibility to determine the deserialization type. +See [Using Methods to Determine Types](#serdes-type-methods) for more information. + +\==== Delegating Serializer/Deserializer + +The `DelegatingSerializer` can now handle "standard" types, when the outbound record has no header. +See [Delegating Serializer and Deserializer](#delegating-serialization) for more information. + +\==== Testing Changes + +The `KafkaTestUtils.consumerProps()` helper record now sets `ConsumerConfig.AUTO_OFFSET_RESET_CONFIG` to `earliest` by default. +See [JUnit](#junit) for more information. + +\=== Changes between 2.3 and 2.4 + +\==== Kafka Client Version + +This version requires the 2.4.0 `kafka-clients` or higher and supports the new incremental rebalancing feature. + +\==== ConsumerAwareRebalanceListener + +Like `ConsumerRebalanceListener`, this interface now has an additional method `onPartitionsLost`. +Refer to the Apache Kafka documentation for more information. + +Unlike the `ConsumerRebalanceListener`, The default implementation does **not** call `onPartitionsRevoked`. +Instead, the listener container will call that method after it has called `onPartitionsLost`; you should not, therefore, do the same when implementing `ConsumerAwareRebalanceListener`. + +See the IMPORTANT note at the end of [Rebalancing Listeners](#rebalance-listeners) for more information. + +\==== GenericErrorHandler + +The `isAckAfterHandle()` default implementation now returns true by default. + +\==== KafkaTemplate + +The `KafkaTemplate` now supports non-transactional publishing alongside transactional. +See [`KafkaTemplate` Transactional and non-Transactional Publishing](#tx-template-mixed) for more information. + +\==== AggregatingReplyingKafkaTemplate + +The `releaseStrategy` is now a `BiConsumer`. +It is now called after a timeout (as well as when records arrive); the second parameter is `true` in the case of a call after a timeout. + +See [Aggregating Multiple Replies](#aggregating-request-reply) for more information. + +\==== Listener Container + +The `ContainerProperties` provides an `authorizationExceptionRetryInterval` option to let the listener container to retry after any `AuthorizationException` is thrown by the `KafkaConsumer`. +See its JavaDocs and [Using `KafkaMessageListenerContainer`](#kafka-container) for more information. + +\==== @KafkaListener + +The `@KafkaListener` annotation has a new property `splitIterables`; default true. +When a replying listener returns an `Iterable` this property controls whether the return result is sent as a single record or a record for each element is sent. +See [Forwarding Listener Results using `@SendTo`](#annotation-send-to) for more information + +Batch listeners can now be configured with a `BatchToRecordAdapter`; this allows, for example, the batch to be processed in a transaction while the listener gets one record at a time. +With the default implementation, a `ConsumerRecordRecoverer` can be used to handle errors within the batch, without stopping the processing of the entire batch - this might be useful when using transactions. +See [Transactions with Batch Listeners](#transactions-batch) for more information. + +\==== Kafka Streams + +The `StreamsBuilderFactoryBean` accepts a new property `KafkaStreamsInfrastructureCustomizer`. +This allows configuration of the builder and/or topology before the stream is created. +See [Spring Management](#streams-spring) for more information. + +\=== Changes Between 2.2 and 2.3 + +This section covers the changes made from version 2.2 to version 2.3. + +\==== Tips, Tricks and Examples + +A new chapter [[tips-n-tricks]](#tips-n-tricks) has been added. +Please submit GitHub issues and/or pull requests for additional entries in that chapter. + +\==== Kafka Client Version + +This version requires the 2.3.0 `kafka-clients` or higher. + +\==== Class/Package Changes + +`TopicPartitionInitialOffset` is deprecated in favor of `TopicPartitionOffset`. + +\==== Configuration Changes + +Starting with version 2.3.4, the `missingTopicsFatal` container property is false by default. +When this is true, the application fails to start if the broker is down; many users were affected by this change; given that Kafka is a high-availability platform, we did not anticipate that starting an application with no active brokers would be a common use case. + +\==== Producer and Consumer Factory Changes + +The `DefaultKafkaProducerFactory` can now be configured to create a producer per thread. +You can also provide `Supplier` instances in the constructor as an alternative to either configured classes (which require no-arg constructors), or constructing with `Serializer` instances, which are then shared between all Producers. +See [Using `DefaultKafkaProducerFactory`](#producer-factory) for more information. + +The same option is available with `Supplier` instances in `DefaultKafkaConsumerFactory`. +See [Using `KafkaMessageListenerContainer`](#kafka-container) for more information. + +\==== Listener Container Changes + +Previously, error handlers received `ListenerExecutionFailedException` (with the actual listener exception as the `cause`) when the listener was invoked using a listener adapter (such as `@KafkaListener` s). +Exceptions thrown by native `GenericMessageListener` s were passed to the error handler unchanged. +Now a `ListenerExecutionFailedException` is always the argument (with the actual listener exception as the `cause`), which provides access to the container’s `group.id` property. + +Because the listener container has it’s own mechanism for committing offsets, it prefers the Kafka `ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG` to be `false`. +It now sets it to false automatically unless specifically set in the consumer factory or the container’s consumer property overrides. + +The `ackOnError` property is now `false` by default. +See [[seek-to-current]](#seek-to-current) for more information. + +It is now possible to obtain the consumer’s `group.id` property in the listener method. +See [Obtaining the Consumer `group.id`](#listener-group-id) for more information. + +The container has a new property `recordInterceptor` allowing records to be inspected or modified before invoking the listener. +A `CompositeRecordInterceptor` is also provided in case you need to invoke multiple interceptors. +See [Message Listener Containers](#message-listener-container) for more information. + +The `ConsumerSeekAware` has new methods allowing you to perform seeks relative to the beginning, end, or current position and to seek to the first offset greater than or equal to a time stamp. +See [Seeking to a Specific Offset](#seek) for more information. + +A convenience class `AbstractConsumerSeekAware` is now provided to simplify seeking. +See [Seeking to a Specific Offset](#seek) for more information. + +The `ContainerProperties` provides an `idleBetweenPolls` option to let the main loop in the listener container to sleep between `KafkaConsumer.poll()` calls. +See its JavaDocs and [Using `KafkaMessageListenerContainer`](#kafka-container) for more information. + +When using `AckMode.MANUAL` (or `MANUAL_IMMEDIATE`) you can now cause a redelivery by calling `nack` on the `Acknowledgment`. +See [Committing Offsets](#committing-offsets) for more information. + +Listener performance can now be monitored using Micrometer `Timer` s. +See [Monitoring](#micrometer) for more information. + +The containers now publish additional consumer lifecycle events relating to startup. +See [Application Events](#events) for more information. + +Transactional batch listeners can now support zombie fencing. +See [Transactions](#transactions) for more information. + +The listener container factory can now be configured with a `ContainerCustomizer` to further configure each container after it has been created and configured. +See [Container factory](#container-factory) for more information. + +\==== ErrorHandler Changes + +The `SeekToCurrentErrorHandler` now treats certain exceptions as fatal and disables retry for those, invoking the recoverer on first failure. + +The `SeekToCurrentErrorHandler` and `SeekToCurrentBatchErrorHandler` can now be configured to apply a `BackOff` (thread sleep) between delivery attempts. + +Starting with version 2.3.2, recovered records' offsets will be committed when the error handler returns after recovering a failed record. + +See [[seek-to-current]](#seek-to-current) for more information. + +The `DeadLetterPublishingRecoverer`, when used in conjunction with an `ErrorHandlingDeserializer`, now sets the payload of the message sent to the dead-letter topic, to the original value that could not be deserialized. +Previously, it was `null` and user code needed to extract the `DeserializationException` from the message headers. +See [Publishing Dead-letter Records](#dead-letters) for more information. + +\==== TopicBuilder + +A new class `TopicBuilder` is provided for more convenient creation of `NewTopic` `@Bean` s for automatic topic provisioning. +See [Configuring Topics](#configuring-topics) for more information. + +\==== Kafka Streams Changes + +You can now perform additional configuration of the `StreamsBuilderFactoryBean` created by `@EnableKafkaStreams`. +See [Streams Configuration](#streams-config) for more information. + +A `RecoveringDeserializationExceptionHandler` is now provided which allows records with deserialization errors to be recovered. +It can be used in conjunction with a `DeadLetterPublishingRecoverer` to send these records to a dead-letter topic. +See [Recovery from Deserialization Exceptions](#streams-deser-recovery) for more information. + +The `HeaderEnricher` transformer has been provided, using SpEL to generate the header values. +See [Header Enricher](#streams-header-enricher) for more information. + +The `MessagingTransformer` has been provided. +This allows a Kafka streams topology to interact with a spring-messaging component, such as a Spring Integration flow. +See [`MessagingTransformer`](#streams-messaging) and See [[Calling a Spring Integration Flow from a `KStream`](https://docs.spring.io/spring-integration/docs/current/reference/html/kafka.html#streams-integration)] for more information. + +\==== JSON Component Changes + +Now all the JSON-aware components are configured by default with a Jackson `ObjectMapper` produced by the `JacksonUtils.enhancedObjectMapper()`. +The `JsonDeserializer` now provides `TypeReference`-based constructors for better handling of target generic container types. +Also a `JacksonMimeTypeModule` has been introduced for serialization of `org.springframework.util.MimeType` to plain string. +See its JavaDocs and [Serialization, Deserialization, and Message Conversion](#serdes) for more information. + +A `ByteArrayJsonMessageConverter` has been provided as well as a new super class for all Json converters, `JsonMessageConverter`. +Also, a `StringOrBytesSerializer` is now available; it can serialize `byte[]`, `Bytes` and `String` values in `ProducerRecord` s. +See [Spring Messaging Message Conversion](#messaging-message-conversion) for more information. + +The `JsonSerializer`, `JsonDeserializer` and `JsonSerde` now have fluent APIs to make programmatic configuration simpler. +See the javadocs, [Serialization, Deserialization, and Message Conversion](#serdes), and [Streams JSON Serialization and Deserialization](#serde) for more informaion. + +\==== ReplyingKafkaTemplate + +When a reply times out, the future is completed exceptionally with a `KafkaReplyTimeoutException` instead of a `KafkaException`. + +Also, an overloaded `sendAndReceive` method is now provided that allows specifying the reply timeout on a per message basis. + +\==== AggregatingReplyingKafkaTemplate + +Extends the `ReplyingKafkaTemplate` by aggregating replies from multiple receivers. +See [Aggregating Multiple Replies](#aggregating-request-reply) for more information. + +\==== Transaction Changes + +You can now override the producer factory’s `transactionIdPrefix` on the `KafkaTemplate` and `KafkaTransactionManager`. +See [`transactionIdPrefix`](#transaction-id-prefix) for more information. + +\==== New Delegating Serializer/Deserializer + +The framework now provides a delegating `Serializer` and `Deserializer`, utilizing a header to enable producing and consuming records with multiple key/value types. +See [Delegating Serializer and Deserializer](#delegating-serialization) for more information. + +\==== New Retrying Deserializer + +The framework now provides a delegating `RetryingDeserializer`, to retry serialization when transient errors such as network problems might occur. +See [Retrying Deserializer](#retrying-deserialization) for more information. + +\=== Changes Between 2.1 and 2.2 + +\==== Kafka Client Version + +This version requires the 2.0.0 `kafka-clients` or higher. + +\==== Class and Package Changes + +The `ContainerProperties` class has been moved from `org.springframework.kafka.listener.config` to `org.springframework.kafka.listener`. + +The `AckMode` enum has been moved from `AbstractMessageListenerContainer` to `ContainerProperties`. + +The `setBatchErrorHandler()` and `setErrorHandler()` methods have been moved from `ContainerProperties` to both `AbstractMessageListenerContainer` and `AbstractKafkaListenerContainerFactory`. + +\==== After Rollback Processing + +A new `AfterRollbackProcessor` strategy is provided. +See [After-rollback Processor](#after-rollback) for more information. + +\==== `ConcurrentKafkaListenerContainerFactory` Changes + +You can now use the `ConcurrentKafkaListenerContainerFactory` to create and configure any `ConcurrentMessageListenerContainer`, not only those for `@KafkaListener` annotations. +See [Container factory](#container-factory) for more information. + +\==== Listener Container Changes + +A new container property (`missingTopicsFatal`) has been added. +See [Using `KafkaMessageListenerContainer`](#kafka-container) for more information. + +A `ConsumerStoppedEvent` is now emitted when a consumer stops. +See [Thread Safety](#thread-safety) for more information. + +Batch listeners can optionally receive the complete `ConsumerRecords` object instead of a `List`. +See [Batch Listeners](#batch-listeners) for more information. + +The `DefaultAfterRollbackProcessor` and `SeekToCurrentErrorHandler` can now recover (skip) records that keep failing, and, by default, does so after 10 failures. +They can be configured to publish failed records to a dead-letter topic. + +Starting with version 2.2.4, the consumer’s group ID can be used while selecting the dead letter topic name. + +See [After-rollback Processor](#after-rollback), [[seek-to-current]](#seek-to-current), and [Publishing Dead-letter Records](#dead-letters) for more information. + +The `ConsumerStoppingEvent` has been added. +See [Application Events](#events) for more information. + +The `SeekToCurrentErrorHandler` can now be configured to commit the offset of a recovered record when the container is configured with `AckMode.MANUAL_IMMEDIATE` (since 2.2.4). +See [[seek-to-current]](#seek-to-current) for more information. + +\==== @KafkaListener Changes + +You can now override the `concurrency` and `autoStartup` properties of the listener container factory by setting properties on the annotation. +You can now add configuration to determine which headers (if any) are copied to a reply message. +See [`@KafkaListener` Annotation](#kafka-listener-annotation) for more information. + +You can now use `@KafkaListener` as a meta-annotation on your own annotations. +See [`@KafkaListener` as a Meta Annotation](#kafka-listener-meta) for more information. + +It is now easier to configure a `Validator` for `@Payload` validation. +See [`@KafkaListener` `@Payload` Validation](#kafka-validation) for more information. + +You can now specify kafka consumer properties directly on the annotation; these will override any properties with the same name defined in the consumer factory (since version 2.2.4). +See [Annotation Properties](#annotation-properties) for more information. + +\==== Header Mapping Changes + +Headers of type `MimeType` and `MediaType` are now mapped as simple strings in the `RecordHeader` value. +Previously, they were mapped as JSON and only `MimeType` was decoded.`MediaType` could not be decoded. +They are now simple strings for interoperability. + +Also, the `DefaultKafkaHeaderMapper` has a new `addToStringClasses` method, allowing the specification of types that should be mapped by using `toString()` instead of JSON. +See [Message Headers](#headers) for more information. + +\==== Embedded Kafka Changes + +The `KafkaEmbedded` class and its `KafkaRule` interface have been deprecated in favor of the `EmbeddedKafkaBroker` and its JUnit 4 `EmbeddedKafkaRule` wrapper. +The `@EmbeddedKafka` annotation now populates an `EmbeddedKafkaBroker` bean instead of the deprecated `KafkaEmbedded`. +This change allows the use of `@EmbeddedKafka` in JUnit 5 tests. +The `@EmbeddedKafka` annotation now has the attribute `ports` to specify the port that populates the `EmbeddedKafkaBroker`. +See [Testing Applications](#testing) for more information. + +\==== JsonSerializer/Deserializer Enhancements + +You can now provide type mapping information by using producer and consumer properties. + +New constructors are available on the deserializer to allow overriding the type header information with the supplied target type. + +The `JsonDeserializer` now removes any type information headers by default. + +You can now configure the `JsonDeserializer` to ignore type information headers by using a Kafka property (since 2.2.3). + +See [Serialization, Deserialization, and Message Conversion](#serdes) for more information. + +\==== Kafka Streams Changes + +The streams configuration bean must now be a `KafkaStreamsConfiguration` object instead of a `StreamsConfig` object. + +The `StreamsBuilderFactoryBean` has been moved from package `…​core` to `…​config`. + +The `KafkaStreamBrancher` has been introduced for better end-user experience when conditional branches are built on top of `KStream` instance. + +See [Apache Kafka Streams Support](#streams-kafka-streams) and [Configuration](#streams-config) for more information. + +\==== Transactional ID + +When a transaction is started by the listener container, the `transactional.id` is now the `transactionIdPrefix` appended with `..`. +This change allows proper fencing of zombies, [as described here](https://www.confluent.io/blog/transactions-apache-kafka/). + +\=== Changes Between 2.0 and 2.1 + +\==== Kafka Client Version + +This version requires the 1.0.0 `kafka-clients` or higher. + +The 1.1.x client is supported natively in version 2.2. + +\==== JSON Improvements + +The `StringJsonMessageConverter` and `JsonSerializer` now add type information in `Headers`, letting the converter and `JsonDeserializer` create specific types on reception, based on the message itself rather than a fixed configured type. +See [Serialization, Deserialization, and Message Conversion](#serdes) for more information. + +\==== Container Stopping Error Handlers + +Container error handlers are now provided for both record and batch listeners that treat any exceptions thrown by the listener as fatal/ +They stop the container. +See [Handling Exceptions](#annotation-error-handling) for more information. + +\==== Pausing and Resuming Containers + +The listener containers now have `pause()` and `resume()` methods (since version 2.1.3). +See [Pausing and Resuming Listener Containers](#pause-resume) for more information. + +\==== Stateful Retry + +Starting with version 2.1.3, you can configure stateful retry. +See [[stateful-retry]](#stateful-retry) for more information. + +\==== Client ID + +Starting with version 2.1.1, you can now set the `client.id` prefix on `@KafkaListener`. +Previously, to customize the client ID, you needed a separate consumer factory (and container factory) per listener. +The prefix is suffixed with `-n` to provide unique client IDs when you use concurrency. + +\==== Logging Offset Commits + +By default, logging of topic offset commits is performed with the `DEBUG` logging level. +Starting with version 2.1.2, a new property in `ContainerProperties` called `commitLogLevel` lets you specify the log level for these messages. +See [Using `KafkaMessageListenerContainer`](#kafka-container) for more information. + +\==== Default @KafkaHandler + +Starting with version 2.1.3, you can designate one of the `@KafkaHandler` annotations on a class-level `@KafkaListener` as the default. +See [`@KafkaListener` on a Class](#class-level-kafkalistener) for more information. + +\==== ReplyingKafkaTemplate + +Starting with version 2.1.3, a subclass of `KafkaTemplate` is provided to support request/reply semantics. +See [Using `ReplyingKafkaTemplate`](#replying-template) for more information. + +\==== ChainedKafkaTransactionManager + +Version 2.1.3 introduced the `ChainedKafkaTransactionManager`. +(It is now deprecated). + +\==== Migration Guide from 2.0 + +See the [2.0 to 2.1 Migration](https://github.com/spring-projects/spring-kafka/wiki/Spring-for-Apache-Kafka-2.0-to-2.1-Migration-Guide) guide. + +\=== Changes Between 1.3 and 2.0 + +\==== Spring Framework and Java Versions + +The Spring for Apache Kafka project now requires Spring Framework 5.0 and Java 8. + +\==== `@KafkaListener` Changes + +You can now annotate `@KafkaListener` methods (and classes and `@KafkaHandler` methods) with `@SendTo`. +If the method returns a result, it is forwarded to the specified topic. +See [Forwarding Listener Results using `@SendTo`](#annotation-send-to) for more information. + +\==== Message Listeners + +Message listeners can now be aware of the `Consumer` object. +See [Message Listeners](#message-listeners) for more information. + +\==== Using `ConsumerAwareRebalanceListener` + +Rebalance listeners can now access the `Consumer` object during rebalance notifications. +See [Rebalancing Listeners](#rebalance-listeners) for more information. + +\=== Changes Between 1.2 and 1.3 + +\==== Support for Transactions + +The 0.11.0.0 client library added support for transactions. +The `KafkaTransactionManager` and other support for transactions have been added. +See [Transactions](#transactions) for more information. + +\==== Support for Headers + +The 0.11.0.0 client library added support for message headers. +These can now be mapped to and from `spring-messaging` `MessageHeaders`. +See [Message Headers](#headers) for more information. + +\==== Creating Topics + +The 0.11.0.0 client library provides an `AdminClient`, which you can use to create topics. +The `KafkaAdmin` uses this client to automatically add topics defined as `@Bean` instances. + +\==== Support for Kafka Timestamps + +`KafkaTemplate` now supports an API to add records with timestamps. +New `KafkaHeaders` have been introduced regarding `timestamp` support. +Also, new `KafkaConditions.timestamp()` and `KafkaMatchers.hasTimestamp()` testing utilities have been added. +See [Using `KafkaTemplate`](#kafka-template), [`@KafkaListener` Annotation](#kafka-listener-annotation), and [Testing Applications](#testing) for more details. + +\==== `@KafkaListener` Changes + +You can now configure a `KafkaListenerErrorHandler` to handle exceptions. +See [Handling Exceptions](#annotation-error-handling) for more information. + +By default, the `@KafkaListener` `id` property is now used as the `group.id` property, overriding the property configured in the consumer factory (if present). +Further, you can explicitly configure the `groupId` on the annotation. +Previously, you would have needed a separate container factory (and consumer factory) to use different `group.id` values for listeners. +To restore the previous behavior of using the factory configured `group.id`, set the `idIsGroup` property on the annotation to `false`. + +\==== `@EmbeddedKafka` Annotation + +For convenience, a test class-level `@EmbeddedKafka` annotation is provided, to register `KafkaEmbedded` as a bean. +See [Testing Applications](#testing) for more information. + +\==== Kerberos Configuration + +Support for configuring Kerberos is now provided. +See [JAAS and Kerberos](#kerberos) for more information. + +\=== Changes Between 1.1 and 1.2 + +This version uses the 0.10.2.x client. + +\=== Changes Between 1.0 and 1.1 + +\==== Kafka Client + +This version uses the Apache Kafka 0.10.x.x client. + +\==== Batch Listeners + +Listeners can be configured to receive the entire batch of messages returned by the `consumer.poll()` operation, rather than one at a time. + +\==== Null Payloads + +Null payloads are used to “delete” keys when you use log compaction. + +\==== Initial Offset + +When explicitly assigning partitions, you can now configure the initial offset relative to the current position for the consumer group, rather than absolute or relative to the current end. + +\==== Seek + +You can now seek the position of each topic or partition. +You can use this to set the initial position during initialization when group management is in use and Kafka assigns the partitions. +You can also seek when an idle container is detected or at any arbitrary point in your application’s execution. +See [Seeking to a Specific Offset](#seek) for more information. \ No newline at end of file diff --git a/docs/en/spring-for-graphql/README.md b/docs/en/spring-for-graphql/README.md new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/docs/en/spring-for-graphql/spring-graphql.md b/docs/en/spring-for-graphql/spring-graphql.md new file mode 100644 index 0000000000000000000000000000000000000000..e7ba8c3844d64d0ed0bc084cf99a908ccc15962c --- /dev/null +++ b/docs/en/spring-for-graphql/spring-graphql.md @@ -0,0 +1,1528 @@ +# Spring for GraphQL Documentation + +## 1. Overview + +Spring for GraphQL provides support for Spring applications built on[GraphQL Java](https://www.graphql-java.com/). It is a joint collaboration between both +teams. Our shared philosophy is to be less opinionated and more focused on +comprehensive and wide-ranging support. + +Spring for GraphQL is the successor of the[GraphQL Java Spring](https://github.com/graphql-java/graphql-java-spring) project from +the GraphQL Java team. It aims to be the foundation for all Spring, GraphQL applications. + +The project is in a milestone phase towards a 1.0 release, currently, and looking for +feedback. Please, use our[issue tracker](https://github.com/spring-projects/spring-graphql/issues) to report a +problem, discuss a design issue, or request a feature. + +To get started, check the Spring GraphQL starter on [start.spring.io](https://start.spring.io) and the[Samples](#samples) sections. + +## 2. Requirements + +Spring for GraphQL requires the following as a baseline: + +* JDK8 + +* Spring Framework 5.3 + +* GraphQL Java 17 + +* Spring Data 2021.1.0 or later for QueryDSL or Query by Example + +## 3. Web Transports + +Spring for GraphQL supports GraphQL requests over HTTP and over WebSocket. + +### 3.1. HTTP + +`GraphQlHttpHandler` handles GraphQL over HTTP requests and delegates to the[Web Interception](#web-interception) chain for request execution. There are two variants, one for +Spring MVC and one for Spring WebFlux. Both handle requests asynchronously and have +equivalent functionality, but rely on blocking vs non-blocking I/O respectively for +writing the HTTP response. + +Requests must use HTTP POST with GraphQL request details included as JSON in the +request body, as defined in the proposed[GraphQL over HTTP](https://github.com/graphql/graphql-over-http/blob/main/spec/GraphQLOverHTTP.md)specification. Once the JSON body has been successfully decoded, the HTTP response +status is always 200 (OK), and any errors from GraphQL request execution appear in the +"errors" section of the GraphQL response. + +`GraphQlHttpHandler` can be exposed as an HTTP endpoint by declaring a `RouterFunction`bean and using the `RouterFunctions` from Spring MVC or WebFlux to create the route. The +Boot starter does this, see the[Web Endpoints](https://docs.spring.io/spring-boot/docs/2.7.0-SNAPSHOT/reference/html/web.html#web.graphql.web-endpoints) section for +details, or check `GraphQlWebMvcAutoConfiguration` or `GraphQlWebFluxAutoConfiguration`it contains, for the actual config. + +The Spring for GraphQL repository contains a Spring MVC[HTTP sample](https://github.com/spring-projects/spring-graphql/tree/main/samples/webmvc-http) application. + +### 3.2. WebSocket + +`GraphQlWebSocketHandler` handles GraphQL over WebSocket requests based on the[protocol](https://github.com/enisdenjo/graphql-ws/blob/master/PROTOCOL.md) defined in the[graphql-ws](https://github.com/enisdenjo/graphql-ws) library. The main reason to use +GraphQL over WebSocket is subscriptions which allow sending a stream of GraphQL +responses, but it can also be used for regular queries with a single response. +The handler delegates every request to the [Web Interception](#web-interception) chain for further +request execution. + +| |GraphQL Over WebSocket Protocols

There are two such protocols, one in the[subscriptions-transport-ws](https://github.com/apollographql/subscriptions-transport-ws)library and another in the[graphql-ws](https://github.com/enisdenjo/graphql-ws) library. The former is not active and
succeeded by the latter. Read this[blog post](https://the-guild.dev/blog/graphql-over-websockets) for the history.| +|---|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +There are two variants of `GraphQlWebSocketHandler`, one for Spring MVC and one for +Spring WebFlux. Both handle requests asynchronously and have equivalent functionality. +The WebFlux handler also uses non-blocking I/O and back pressure to stream messages, +which works well since in GraphQL Java a subscription response is a Reactive Streams`Publisher`. + +The `graphql-ws` project lists a number of[recipes](https://github.com/enisdenjo/graphql-ws#recipes) for client use. + +`GraphQlWebSocketHandler` can be exposed as a WebSocket endpoint by declaring a`SimpleUrlHandlerMapping` bean and using it to map the handler to a URL path. The Boot +starter has options to enable this, see the[Web Endpoints](https://docs.spring.io/spring-boot/docs/2.7.0-SNAPSHOT/reference/html/web.html#web.graphql.web-endpoints) section for +details, or check `GraphQlWebMvcAutoConfiguration` or `GraphQlWebFluxAutoConfiguration`it contains, for the actual config. + +The Spring for GraphQL repository contains a WebFlux[WebSocket sample](https://github.com/spring-projects/spring-graphql/tree/main/samples/webflux-websocket) application. + +### 3.3. Web Interception + +[HTTP](#web-http) and [WebSocket](#web-websocket) transport handlers delegate to a common Web +interception chain for request execution. The chain consists of a sequence of`WebInterceptor` components, followed by a `GraphQlService` that invokes the GraphQL +Java engine. + +`WebInterceptor` is as a common contract to use in both Spring MVC and WebFlux +applications. Use it to intercept requests, inspect HTTP request headers, or to register a +transformation of the `graphql.ExecutionInput`: + +``` +class MyInterceptor implements WebInterceptor { + + @Override + public Mono intercept(WebInput webInput, WebInterceptorChain chain) { + webInput.configureExecutionInput((executionInput, builder) -> { + Map map = ... ; + return builder.extensions(map).build(); + }); + return chain.next(webInput); + } +} + +``` + +Use `WebInterceptor` also to intercept responses, add HTTP response headers, or transform +the `graphql.ExecutionResult`: + +``` +class MyInterceptor implements WebInterceptor { + + @Override + public Mono intercept(WebInput webInput, WebInterceptorChain chain) { + return chain.next(webInput) + .map(webOutput -> { + Object data = webOutput.getData(); + Object updatedData = ... ; + return webOutput.transform(builder -> builder.data(updatedData)); + }); + } +} + +``` + +`WebGraphQlHandler` provides a builder to initialize the Web interception chain. After +you build the chain, you can use the resulting `WebGraphQlHandler` to initialize the HTTP +or WebSocket transport handlers. The Boot starter configures all this, see the[Web Endpoints](https://docs.spring.io/spring-boot/docs/2.7.0-SNAPSHOT/reference/html/web.html#web.graphql.web-endpoints) section for +details, or check `GraphQlWebMvcAutoConfiguration` or `GraphQlWebFluxAutoConfiguration`it contains, for the actual config. + +## 4. Request Execution + +`GraphQlService` is the main Spring abstraction to call GraphQL Java to execute +requests. Underlying transports, such as the [Web Transports](#web-transports), delegate to `GraphQlService` to +handle requests. + +The main implementation, `ExecutionGraphQlService`, is a thin facade around the +invocation of `graphql.GraphQL`. It is configured with a `GraphQlSource` for access to +the `graphql.GraphQL` instance. + +### 4.1. `GraphQLSource` + +`GraphQlSource` is a core Spring abstraction for access to the`graphql.GraphQL` instance to use for request execution. It provides a builder API to +initialize GraphQL Java and build a `GraphQlSource`. + +The default `GraphQlSource` builder, accessible via `GraphQlSource.builder()`, enables +support for [Reactive `DataFetcher`](#execution-reactive-datafetcher), [Context Propagation](#execution-context), and[Exception Resolution](#execution-exceptions). + +The Spring Boot [starter](https://docs.spring.io/spring-boot/docs/2.7.0-SNAPSHOT/reference/html/web.html#web.graphql) initializes a`GraphQlSource` instance through the default `GraphQlSource.Builder` and also enables +the following: + +* Load [schema files](#execution-graphqlsource-schema-resources) from a configurable location. + +* Expose [properties](https://docs.spring.io/spring-boot/docs/2.7.0-SNAPSHOT/reference/html/application-properties.html#appendix.application-properties.web)that apply to `GraphQlSource.Builder`. + +* Detect [`RuntimeWiringConfigurer`](#execution-graphqlsource-runtimewiring-configurer) beans. + +* Detect [Instrumentation](https://www.graphql-java.com/documentation/instrumentation) beans for[GraphQL metrics](https://docs.spring.io/spring-boot/docs/2.7.0-SNAPSHOT/reference/html/actuator.html#actuator.metrics.supported.spring-graphql). + +* Detect `DataFetcherExceptionResolver` beans for[exception resolution](#execution-exceptions). + +* Detect `GraphQlSourceBuilderCustomizer` beans for any other customizations. + +#### 4.1.1. Schema Resources + +`GraphQlSource.Builder` can be configured with one or more `Resource` instances to be +parsed and merged together. That means schema files can be loaded from just about any +location. + +By default, the Spring Boot starter[finds schema files](https://docs.spring.io/spring-boot/docs/2.7.0-SNAPSHOT/reference/html/web.html#web.graphql.schema) from a +well-known classpath location, but you can change that to a location on the file system +via `FileSystemResource`, to byte content via `ByteArrayResource`, or implement a custom`Resource` that loads schema files from a remote location or storage. + +#### 4.1.2. Schema Creation + +By default, `GraphQlSource.Builder` uses the GraphQL Java `GraphQLSchemaGenerator` to +create the `graphql.schema.GraphQLSchema`. This works for most applications, but if +necessary, you can hook into the schema creation through the builder: + +``` +// Typically, accessed through Spring Boot's GraphQlSourceBuilderCustomizer +GraphQlSource.Builder builder = ... + +builder.schemaResources(..) + .configureRuntimeWiring(..) + .schemaFactory((typeDefinitionRegistry, runtimeWiring) -> { + // create GraphQLSchema + }) + +``` + +The primary reason for this is to create the schema through a federation library. + +#### 4.1.3. `RuntimeWiringConfigurer` + +You can use `RuntimeWiringConfigurer` to register: + +* Custom scalar types. + +* Directives handling code. + +* `TypeResolver`, if you need to override the[Default `TypeResolver`](#execution-graphqlsource-default-type-resolver) for a type. + +* `DataFetcher` for a field, although most applications will simply configure`AnnotatedControllerConfigurer`, which detects annotated, `DataFetcher` handler methods. + The Spring Boot starter adds the `AnnotatedControllerConfigurer` by default. + +The Spring Boot starter detects beans of type `RuntimeWiringConfigurer` and +registers them in the `GraphQlSource.Builder`. That means in most cases, you’ll' have +something like the following in your configuration: + +``` +@Configuration +public class GraphQlConfig { + + @Bean + public RuntimeWiringConfigurer runtimeWiringConfigurer(BookRepository repository) { + + GraphQLScalarType scalarType = ... ; + SchemaDirectiveWiring directiveWiring = ... ; + DataFetcher dataFetcher = QuerydslDataFetcher.builder(repository).single(); + + return wiringBuilder -> wiringBuilder + .scalar(scalarType) + .directiveWiring(directiveWiring) + .type("Query", builder -> builder.dataFetcher("book", dataFetcher)); + } +} + +``` + +If you need to add a `WiringFactory`, e.g. to make registrations that take into account +schema definitions, implement the alternative `configure` method that accepts both the`RuntimeWiring.Builder` and an output `List`. This allows you to add any +number of factories that are then invoked in sequence. + +#### 4.1.4. Default `TypeResolver` + +`GraphQlSource.Builder` registers `ClassNameTypeResolver` as the default `TypeResolver`to use for GraphQL Interfaces and Unions that don’t already have such a registration +through a [`RuntimeWiringConfigurer`](#execution-graphqlsource-runtimewiring-configurer). The purpose of +a `TypeResolver` in GraphQL Java is to determine the GraphQL Object type for values +returned from the `DataFetcher` for a GraphQL Interface or Union field. + +`ClassNameTypeResolver` tries to match the simple class name of the value to a GraphQL +Object Type and if it is not successful, it also navigates its super types including +base classes and interfaces, looking for a match. `ClassNameTypeResolver` provides an +option to configure a name extracting function along with `Class` to GraphQL Object type +name mappings that should help to cover more corner cases. + +#### 4.1.5. Operation Caching + +GraphQL Java must *parse* and *validate* an operation before executing it. This may impact +performance significantly. To avoid the need to re-parse and validate, an application may +configure a `PreparsedDocumentProvider` that caches and reuses Document instances. The[GraphQL Java docs](https://www.graphql-java.com/documentation/execution/#query-caching) provide more details on +query caching through a `PreparsedDocumentProvider`. + +In Spring GraphQL you can register a `PreparsedDocumentProvider` through`GraphQlSource.Builder#configureGraphQl`: +. + +``` +// Typically, accessed through Spring Boot's GraphQlSourceBuilderCustomizer +GraphQlSource.Builder builder = ... + +// Create provider +PreparsedDocumentProvider provider = ... + +builder.schemaResources(..) + .configureRuntimeWiring(..) + .configureGraphQl(graphQLBuilder -> graphQLBuilder.preparsedDocumentProvider(provider)) + +``` + +#### 4.1.6. Directives + +The GraphQL language supports directives that "describe alternate runtime execution and +type validation behavior in a GraphQL document". Directives are similar to annotations in +Java but declared on types, fields, fragments and operations in a GraphQL document. + +GraphQL Java provides the `SchemaDirectiveWiring` contract to help applications detect +and handle directives. For more details, see[Schema Directives](https://www.graphql-java.com/documentation/sdl-directives/) in the +GraphQL Java documentation. + +In Spring GraphQL you can register a `SchemaDirectiveWiring` through a[`RuntimeWiringConfigurer`](#execution-graphqlsource-runtimewiring-configurer). The Spring Boot starter detects +such beans, so you might have something like: + +``` +@Configuration +public class GraphQlConfig { + + @Bean + public RuntimeWiringConfigurer runtimeWiringConfigurer() { + return builder -> builder.directiveWiring(new MySchemaDirectiveWiring()); + } + +} + +``` + +| |For an example of directives support check out the[Extended Validation for Graphql Java](https://github.com/graphql-java/graphql-java-extended-validation)library.| +|---|------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +### 4.2. Reactive `DataFetcher` + +The default `GraphQlSource` builder enables support for a `DataFetcher` to return `Mono`or `Flux` which adapts those to a `CompletableFuture` where `Flux` values are aggregated +and turned into a List, unless the request is a GraphQL subscription request, +in which case the return value remains a Reactive Streams `Publisher` for streaming +GraphQL responses. + +A reactive `DataFetcher` can rely on access to Reactor context propagated from the +transport layer, such as from a WebFlux request handling, see[WebFlux Context](#execution-context-webflux). + +### 4.3. Context Propagation + +Spring for GraphQL provides support to transparently propagate context from the [Web Transports](#web-transports), +through the GraphQL engine, and to `DataFetcher` and other components it invokes. +This includes both `ThreadLocal` context from the Spring MVC request handling thread and +Reactor `Context` from the WebFlux processing pipeline. + +#### 4.3.1. WebMvc + +A `DataFetcher` and other components invoked by GraphQL Java may not always execute on +the same thread as the Spring MVC handler, for example if an asynchronous[`WebInterceptor`](#web-interception) or `DataFetcher` switches to a different thread. + +Spring for GraphQL supports propagating `ThreadLocal` values from the Servlet container +thread to the thread a `DataFetcher` and other components invoked by the GraphQL engine +execute on. To do this, an application needs to create a `ThreadLocalAccessor` to extract`ThreadLocal` values of interest: + +``` +public class RequestAttributesAccessor implements ThreadLocalAccessor { + + private static final String KEY = RequestAttributesAccessor.class.getName(); + + @Override + public void extractValues(Map container) { + container.put(KEY, RequestContextHolder.getRequestAttributes()); + } + + @Override + public void restoreValues(Map values) { + if (values.containsKey(KEY)) { + RequestContextHolder.setRequestAttributes((RequestAttributes) values.get(KEY)); + } + } + + @Override + public void resetValues(Map values) { + RequestContextHolder.resetRequestAttributes(); + } + +} + +``` + +A `ThreadLocalAccessor` can be registered in the [WebGraphHandler](#web-interception)builder. The Boot starter detects beans of this type and automatically registers them for +Spring MVC application, see the[Web Endpoints](https://docs.spring.io/spring-boot/docs/2.7.0-SNAPSHOT/reference/html/web.html#web.graphql.web-endpoints) section. + +#### 4.3.2. WebFlux + +A [Reactive `DataFetcher`](#execution-reactive-datafetcher) can rely on access to Reactor context that +originates from the WebFlux request handling chain. This includes Reactor context +added by [WebInterceptor](#web-interception) components. + +### 4.4. Exception Resolution + +GraphQL Java applications can register a `DataFetcherExceptionHandler` to decide how to +represent exceptions from the data layer in the "errors" section of the GraphQL response. + +Spring for GraphQL has a built-in `DataFetcherExceptionHandler` that is configured for use +by the [`GraphQLSource`](#execution-graphqlsource) builder. It enables applications to register one or +more Spring `DataFetcherExceptionResolver` components that are invoked sequentially +until one resolves the `Exception` to a list of `graphql.GraphQLError` objects. + +`DataFetcherExceptionResolver` is an asynchronous contract. For most implementations, it +would be sufficient to extend `DataFetcherExceptionResolverAdapter` and override +one of its `resolveToSingleError` or `resolveToMultipleErrors` methods that +resolve exceptions synchronously. + +A `GraphQLError` can be assigned an `graphql.ErrorClassification`. Spring for GraphQL +defines an `ErrorType` enum with common, error classification categories: + +* `BAD_REQUEST` + +* `UNAUTHORIZED` + +* `FORBIDDEN` + +* `NOT_FOUND` + +* `INTERNAL_ERROR` + +Applications can use this to classify errors. If an error remains unresolved, by +default it is marked as `INTERNAL_ERROR`. + +### 4.5. Batch Loading + +Given a `Book` and its `Author`, we can create one `DataFetcher` for a book and another +for its author. This allows selecting books with or without authors, but it means books +and authors aren’t loaded together, which is especially inefficient when querying multiple +books as the author for each book is loaded individually. This is known as the N+1 select +problem. + +#### 4.5.1. `DataLoader` + +GraphQL Java provides a `DataLoader` mechanism for batch loading of related entities. +You can find the full details in the[GraphQL Java docs](https://www.graphql-java.com/documentation/batching/). Below is a +summary of how it works: + +1. Register `DataLoader`'s in the `DataLoaderRegistry` that can load entities, given unique keys. + +2. `DataFetcher`'s can access `DataLoader`'s and use them to load entities by id. + +3. A `DataLoader` defers loading by returning a future so it can be done in a batch. + +4. `DataLoader`'s maintain a per request cache of loaded entities that can further + improve efficiency. + +#### 4.5.2. `BatchLoaderRegistry` + +The complete batching loading mechanism in GraphQL Java requires implementing one of +several `BatchLoader` interface, then wrapping and registering those as `DataLoader`s +with a name in the `DataLoaderRegistry`. + +The API in Spring GraphQL is slightly different. For registration, there is only one, +central `BatchLoaderRegistry` exposing factory methods and a builder to create and +register any number of batch loading functions: + +``` +@Configuration +public class MyConfig { + + public MyConfig(BatchLoaderRegistry registry) { + + registry.forTypePair(Long.class, Author.class).registerMappedBatchLoader((authorIds, env) -> { + // return Mono + }); + + // more registrations ... + } + +} + +``` + +The Spring Boot starter declares a `BatchLoaderRegistry` bean that you can inject into +your configuration, as shown above, or into any component such as a controller in order +register batch loading functions. In turn the `BatchLoaderRegistry` is injected into`ExecutionGraphQlService` where it ensures `DataLoader` registrations per request. + +By default, the `DataLoader` name is based on the class name of the target entity. +This allows an `@SchemaMapping` method to declare a[DataLoader argument](#controllers-schema-mapping-data-loader) with a generic type, and +without the need for specifying a name. The name, however, can be customized through the`BatchLoaderRegistry` builder, if necessary, along with other `DataLoader` options. + +For many cases, when loading related entities, you can use[@BatchMapping](#controllers-batch-mapping) controller methods, which are a shortcut +for and replace the need to use `BatchLoaderRegistry` and `DataLoader` directly. +s`BatchLoaderRegistry` provides other important benefits too. It supports access to +the same `GraphQLContext` from batch loading functions and from `@BatchMapping` methods, +as well as ensures [Context Propagation](#execution-context) to them. This is why applications are expected +to use it. It is possible to perform your own `DataLoader` registrations directly but +such registrations would forgo the above benefits. + +#### 4.5.3. Testing Batch Loading + +Start by having `BatchLoaderRegistry` perform registrations on a `DataLoaderRegistry`: + +``` +BatchLoaderRegistry batchLoaderRegistry = new DefaultBatchLoaderRegistry(); +// perform registrations... + +DataLoaderRegistry dataLoaderRegistry = DataLoaderRegistry.newRegistry().build(); +batchLoaderRegistry.registerDataLoaders(dataLoaderRegistry, graphQLContext); + +``` + +Now you can access and test individual `DataLoader`'s as follows: + +``` +DataLoader loader = dataLoaderRegistry.getDataLoader(Book.class.getName()); +loader.load(1L); +loader.loadMany(Arrays.asList(2L, 3L)); +List books = loader.dispatchAndJoin(); // actual loading + +assertThat(books).hasSize(3); +assertThat(books.get(0).getName()).isEqualTo("..."); +// ... + +``` + +## 5. Data Integration + +Spring for GraphQL lets you leverage existing Spring technology, following common +programming models to expose underlying data sources through GraphQL. + +This section discusses an integration layer for Spring Data that provides an easy way to +adapt a Querydsl or a Query by Example repository to a `DataFetcher`, including the +option for automated detection and GraphQL Query registration for repositories marked +with `@GraphQlRepository`. + +### 5.1. Querydsl + +Spring for GraphQL supports use of [Querydsl](http://www.querydsl.com/) to fetch data through +the Spring Data[Querydsl extension](https://docs.spring.io/spring-data/commons/docs/current/reference/html/#core.extensions). +Querydsl provides a flexible yet typesafe approach to express query predicates by +generating a meta-model using annotation processors. + +For example, declare a repository as `QuerydslPredicateExecutor`: + +``` +public interface AccountRepository extends Repository, + QuerydslPredicateExecutor { +} + +``` + +Then use it to create a `DataFetcher`: + +``` +// For single result queries +DataFetcher dataFetcher = + QuerydslDataFetcher.builder(repository).single(); + +// For multi-result queries +DataFetcher> dataFetcher = + QuerydslDataFetcher.builder(repository).many(); + +``` + +You can now register the above `DataFetcher` through a[`RuntimeWiringConfigurer`](#execution-graphqlsource-runtimewiring-configurer). + +The `DataFetcher` builds a Querydsl `Predicate` from GraphQL request parameters, and +uses it to fetch data. Spring Data supports `QuerydslPredicateExecutor` for JPA, +MongoDB, and LDAP. + +If the repository is `ReactiveQuerydslPredicateExecutor`, the builder returns`DataFetcher>` or `DataFetcher>`. Spring Data supports this +variant for MongoDB. + +#### 5.1.1. Build Setup + +To configure Querydsl in your build, follow the[official reference documentation](https://querydsl.com/static/querydsl/latest/reference/html/ch02.html): + +For example: + +Gradle + +``` +dependencies { + //... + + annotationProcessor "com.querydsl:querydsl-apt:$querydslVersion:jpa", + 'org.hibernate.javax.persistence:hibernate-jpa-2.1-api:1.0.2.Final', + 'javax.annotation:javax.annotation-api:1.3.2' +} + +compileJava { + options.annotationProcessorPath = configurations.annotationProcessor +} +``` + +Maven + +``` + + + + com.querydsl + querydsl-apt + ${querydsl.version} + jpa + provided + + + org.hibernate.javax.persistence + hibernate-jpa-2.1-api + 1.0.2.Final + + + javax.annotation + javax.annotation-api + 1.3.2 + + + + + + com.mysema.maven + apt-maven-plugin + ${apt-maven-plugin.version} + + + + process + + + target/generated-sources/java + com.querydsl.apt.jpa.JPAAnnotationProcessor + + + + + +``` + +The [webmvc-http](https://github.com/spring-projects/spring-graphql/tree/main/samples/webmvc-http) sample uses Querydsl for`artifactRepositories`. + +#### 5.1.2. Customizations + +`QuerydslDataFetcher` supports customizing how GraphQL arguments are bound onto properties +to create a Querydsl `Predicate`. By default, arguments are bound as "is equal to" for +each available property. To customize that, you can use `QuerydslDataFetcher` builder +methods to provide a `QuerydslBinderCustomizer`. + +A repository may itself be an instance of `QuerydslBinderCustomizer`. This is auto-detected +and transparently applied during [Auto-Registration](#data-querydsl-registration). However, when manually +building a `QuerydslDataFetcher` you will need to use builder methods to apply it. + +`QuerydslDataFetcher` supports interface and DTO projections to transform query results +before returning these for further GraphQL processing. + +| |To learn what projections are, please refer to the[Spring Data docs](https://docs.spring.io/spring-data/commons/docs/current/reference/html/#projections).
To understand how to use projections in GraphQL, please see [Selection Set vs Projections](#data-projections).| +|---|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +To use Spring Data projections with Querydsl repositories, create either a projection interface +or a target DTO class and configure it through the `projectAs` method to obtain a`DataFetcher` producing the target type: + +``` +class Account { + + String name, identifier, description; + + Person owner; +} + +interface AccountProjection { + + String getName(); + + String getIdentifier(); +} + +// For single result queries +DataFetcher dataFetcher = + QuerydslDataFetcher.builder(repository).projectAs(AccountProjection.class).single(); + +// For multi-result queries +DataFetcher> dataFetcher = + QuerydslDataFetcher.builder(repository).projectAs(AccountProjection.class).many(); + +``` + +#### 5.1.3. Auto-Registration + +If a repository is annotated with `@GraphQlRepository`, it is automatically registered +for queries that do not already have a registered `DataFetcher` and whose return type +matches that of the repository domain type. This includes both single value and multi-value +queries. + +By default, the name of the GraphQL type returned by the query must match the simple name +of the repository domain type. If needed, you can use the `typeName` attribute of`@GraphQlRepository` to specify the target GraphQL type name. + +Auto-registration detects if a given repository implements `QuerydslBinderCustomizer` and +transparently applies that through `QuerydslDataFetcher` builder methods. + +Auto-registration is performed through a built-in `RuntimeWiringConfigurer` that can be +obtained from `QuerydslDataFetcher`. The[Boot starter](https://docs.spring.io/spring-boot/docs/2.7.0-SNAPSHOT/reference/html/web.html#web.graphql.data-query) automatically +detects `@GraphQlRepository` beans and uses them to initialize the`RuntimeWiringConfigurer` with. + +Auto-registration does not support [customizations](#data-querybyexample-customizations). +If you need that, you’ll need to use `QueryByExampleDataFetcher` to build and +register the `DataFetcher` manually through a[`RuntimeWiringConfigurer`](#execution-graphqlsource-runtimewiring-configurer). + +### 5.2. Query by Example + +Spring Data supports the use of[Query by Example](https://docs.spring.io/spring-data/commons/docs/current/reference/html/#query-by-example)to fetch data. Query by Example (QBE) is a simple querying technique that does not require +you to write queries through store-specific query languages. + +Start by declaring a repository that is `QueryByExampleExecutor`: + +``` +public interface AccountRepository extends Repository, + QueryByExampleExecutor { +} + +``` + +Use `QueryByExampleDataFetcher` to turn the repository into a `DataFecher`: + +``` +// For single result queries +DataFetcher dataFetcher = + QueryByExampleDataFetcher.builder(repository).single(); + +// For multi-result queries +DataFetcher> dataFetcher = + QueryByExampleDataFetcher.builder(repository).many(); + +``` + +You can now register the above `DataFetcher` through a[`RuntimeWiringConfigurer`](#execution-graphqlsource-runtimewiring-configurer). + +The `DataFetcher` uses the GraphQL arguments map to create the domain type of the +repository and use that as the example object to fetch data with. Spring Data supports`QueryByExampleDataFetcher` for JPA, MongoDB, Neo4j, and Redis. + +If the repository is `ReactiveQueryByExampleExecutor`, the builder returns`DataFetcher>` or `DataFetcher>`. Spring Data supports this +variant for MongoDB, Neo4j, Redis, and R2dbc. + +#### 5.2.1. Build Setup + +Query by Example is already included in the Spring Data modules for the data stores where +it is supported, so no extra setup is required to enable it. + +#### 5.2.2. Customizations + +`QueryByExampleDataFetcher` supports interface and DTO projections to transform query +results before returning these for further GraphQL processing. + +| |To learn what projections are, please refer to the[Spring Data documentation](https://docs.spring.io/spring-data/commons/docs/current/reference/html/#projections).
To understand the role of projections in GraphQL, please see [Selection Set vs Projections](#data-projections).| +|---|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +To use Spring Data projections with Query by Example repositories, create either a projection interface +or a target DTO class and configure it through the `projectAs` method to obtain a`DataFetcher` producing the target type: + +``` +class Account { + + String name, identifier, description; + + Person owner; +} + +interface AccountProjection { + + String getName(); + + String getIdentifier(); +} + +// For single result queries +DataFetcher dataFetcher = + QueryByExampleDataFetcher.builder(repository).projectAs(AccountProjection.class).single(); + +// For multi-result queries +DataFetcher> dataFetcher = + QueryByExampleDataFetcher.builder(repository).projectAs(AccountProjection.class).many(); + +``` + +#### 5.2.3. Auto-Registration + +If a repository is annotated with `@GraphQlRepository`, it is automatically registered +for queries that do not already have a registered `DataFetcher` and whose return type +matches that of the repository domain type. This includes both single value and multi-value +queries. + +By default, the name of the GraphQL type returned by the query must match the simple name +of the repository domain type. If needed, you can use the `typeName` attribute of`@GraphQlRepository` to specify the target GraphQL type name. + +Auto-registration is performed through a built-in `RuntimeWiringConfigurer` that can be +obtained from `QueryByExampleDataFetcher`. The[Boot starter](https://docs.spring.io/spring-boot/docs/2.7.0-SNAPSHOT/reference/html/web.html#web.graphql.data-query) automatically +detects `@GraphQlRepository` beans and uses them to initialize the`RuntimeWiringConfigurer` with. + +Auto-registration does not support [customizations](#data-querybyexample-customizations). +If you need that, you’ll need to use `QueryByExampleDataFetcher` to build and +register the `DataFetcher` manually through a[`RuntimeWiringConfigurer`](#execution-graphqlsource-runtimewiring-configurer). + +### 5.3. Selection Set vs Projections + +A common question that arises is, how GraphQL selection sets compare to[Spring Data projections](https://docs.spring.io/spring-data/commons/docs/current/reference/html/#projections)and what role does each play? + +The short answer is that Spring for GraphQL is not a data gateway that translates GraphQL +queries directly into SQL or JSON queries. Instead, it lets you leverage existing Spring +technology and does not assume a one for one mapping between the GraphQL schema and the +underlying data model. That is why client-driven selection and server-side transformation +of the data model can play complementary roles. + +To better understand, consider that Spring Data promotes domain-driven (DDD) design as +the recommended approach to manage complexity in the data layer. In DDD, it is important +to adhere to the constraints of an aggregate. By definition an aggregate is valid only if +loaded in its entirety, since a partially loaded aggregate may impose limitations on +aggregate functionality. + +In Spring Data you can choose whether you want your aggregate be exposed as is, or +whether to apply transformations to the data model before returning it as a GraphQL +result. Sometimes it’s enough to do the former, and by default the[Querydsl](#data-querydsl) and the [Query by Example](#data-querybyexample) integrations turn the GraphQL +selection set into property path hints that the underlying Spring Data module uses to +limit the selection. + +In other cases, it’s useful to reduce or even transform the underlying data model in +order to adapt to the GraphQL schema. Spring Data supports this through Interface +and DTO Projections. + +Interface projections define a fixed set of properties to expose where properties may or +may not be `null`, depending on the data store query result. There are two kinds of +interface projections both of which determine what properties to load from the underlying +data source: + +* [Closed interface projections](https://docs.spring.io/spring-data/commons/docs/current/reference/html/#projections.interfaces.closed)are helpful if you cannot partially materialize the aggregate object, but you still + want to expose a subset of properties. + +* [Open interface projections](https://docs.spring.io/spring-data/commons/docs/current/reference/html/#projections.interfaces.open)leverage Spring’s `@Value` annotation and[SpEL](https://docs.spring.io/spring-framework/docs/current/reference/html/core.html#expressions) expressions to apply lightweight + data transformations, such as concatenations, computations, or applying static functions + to a property. + +DTO projections offer a higher level of customization as you can place transformation +code either in the constructor or in getter methods. + +DTO projections materialize from a query where the individual properties are +determined by the projection itself. DTO projections are commonly used with full-args +constructors (e.g. Java records), and therefore they can only be constructed if all +required fields (or columns) are part of the database query result. + +## 6. Annotated Controllers + +Spring for GraphQL provides an annotation-based programming model where `@Controller`components use annotations to declare handler methods with flexible method signatures to +fetch the data for specific GraphQL fields. For example: + +``` +@Controller +public class GreetingController { + + @QueryMapping (1) + public String hello() { (2) + return "Hello, world!"; + } + +} + +``` + +|**1**| Bind this method to a query, i.e. a field under the Query type. | +|-----|---------------------------------------------------------------------------| +|**2**|Determine the query from the method name if not declared on the annotation.| + +Spring for GraphQL uses `RuntimeWiring.Builder` to register the above handler method as a`graphql.schema.DataFetcher` for the query named "hello". + +### 6.1. Declaration + +You can define `@Controller` beans as standard Spring bean definitions. The`@Controller` stereotype allows for auto-detection, aligned with Spring general +support for detecting `@Controller` and `@Component` classes on the classpath and +auto-registering bean definitions for them. It also acts as a stereotype for the annotated +class, indicating its role as a data fetching component in a GraphQL application. + +`AnnotatedControllerConfigurer` detects `@Controller` beans and registers their +annotated handler methods as `DataFetcher`s via `RuntimeWiring.Builder`. It is an +implementation of `RuntimeWiringConfigurer` which can be added to `GraphQlSource.Builder`. +The Spring Boot starter automatically declares `AnnotatedControllerConfigurer` as a bean +and adds all `RuntimeWiringConfigurer` beans to `GraphQlSource.Builder` and that enables +support for annotated `DataFetcher`s, see the[GraphQL RuntimeWiring](https://docs.spring.io/spring-boot/docs/2.7.0-SNAPSHOT/reference/html/web.html#web.graphql.runtimewiring) section +in the Boot starter documentation. + +### 6.2. `@SchemaMapping` + +The `@SchemaMapping` annotation maps a handler method to a field in the GraphQL schema +and declares it to be the `DataFetcher` for that field. The annotation can specify the +parent type name, and the field name: + +``` +@Controller +public class BookController { + + @SchemaMapping(typeName="Book", field="author") + public Author getAuthor(Book book) { + // ... + } +} + +``` + +The `@SchemaMapping` annotation can also leave out those attributes, in which case the +field name defaults to the method name, while the type name defaults to the simple class +name of the source/parent object injected into the method. For example, the below +defaults to type "Book" and field "author": + +``` +@Controller +public class BookController { + + @SchemaMapping + public Author author(Book book) { + // ... + } +} + +``` + +The `@SchemaMapping` annotation can be declared at the class level to specify a default +type name for all handler methods in the class. + +``` +@Controller +@SchemaMapping(typeName="Book") +public class BookController { + + // @SchemaMapping methods for fields of the "Book" type + +} + +``` + +`@QueryMapping`, `@MutationMapping`, and `@SubscriptionMapping` are meta annotations that +are themselves annotated with `@SchemaMapping` and have the typeName preset to `Query`,`Mutation`, or `Subscription` respectively. Effectively, these are shortcut annotations +for fields under the Query, Mutation, and Subscription types respectively. For example: + +``` +@Controller +public class BookController { + + @QueryMapping + public Book bookById(@Argument Long id) { + // ... + } + + @MutationMapping + public Book addBook(@Argument BookInput bookInput) { + // ... + } + + @SubscriptionMapping + public Flux newPublications() { + // ... + } +} + +``` + +`@SchemaMapping` handler methods have flexible signatures and can choose from a range of +method arguments and return values.. + +#### 6.2.1. Method Signature + +Schema mapping handler methods can have any of the following method arguments: + +| Method Argument | Description | +|-------------------------------|--------------------------------------------------------------------------------------------------------------------------------------------------------| +| `@Argument` | For access to a named field argument converted to a higher-level, typed Object.
See [`@Argument`](#controllers-schema-mapping-argument). | +| `@Arguments` | For access to all field arguments converted to a higher-level, typed Object.
See [`@Arguments`](#controllers-schema-mapping-arguments). | +| `@ProjectedPayload` Interface |For access to field arguments through a project interface.
See [`@ProjectPayload` Interface](#controllers-schema-mapping-projectedpayload-argument).| +| Source | For access to the source (i.e. parent/container) instance of the field.
See [Source](#controllers-schema-mapping-source). | +| `DataLoader` | For access to a `DataLoader` in the `DataLoaderRegistry`.
See [`DataLoader`](#controllers-schema-mapping-data-loader). | +| `@ContextValue` | For access to a value from the localContext, if it is an instance of `GraphQLContext`,
or from the `GraphQLContext` of `DataFetchingEnvironment`. | +| `GraphQLContext` | For access to the context from the `DataFetchingEnvironment`. | +| `java.security.Principal` | Obtained from the Spring Security context, if available. | +| `@AuthenticationPrincipal` | For access to `Authentication#getPrincipal()` from the Spring Security context. | +|`DataFetchingFieldSelectionSet`| For access to the selection set for the query through the `DataFetchingEnvironment`. | +| `Locale`, `Optional` | For access to the `Locale` from the `DataFetchingEnvironment`. | +| `DataFetchingEnvironment` | For direct access to the underlying `DataFetchingEnvironment`. | + +Schema mapping handler methods can return any value, including Reactor `Mono` and`Flux` as described in [Reactive `DataFetcher`](#execution-reactive-datafetcher). + +#### 6.2.2. `@Argument` + +In GraphQL Java, `DataFetchingEnvironment` provides access to a map of field-specific +argument values. The values can be simple scalar values (e.g. String, Long), a `Map` of +values for more complex input, or a `List` of values. + +Use the `@Argument` annotation to inject a named field argument into a handler method. +The method parameter can be a higher-level, typed Object of any type. It is created and +initialized from the named field argument value(s), either matching them to single data +constructor parameters, or using the default constructor and then matching keys onto +Object properties through a `org.springframework.validation.DataBinder`: + +``` +@Controller +public class BookController { + + @QueryMapping + public Book bookById(@Argument Long id) { + // ... + } + + @MutationMapping + public Book addBook(@Argument BookInput bookInput) { + // ... + } +} + +``` + +By default, if the method parameter name is available (requires the `-parameters` compiler +flag with Java 8+ or debugging info from the compiler), it is used to look up the argument. +If needed, you can customize the name through the annotation, e.g. `@Argument("bookInput")`. + +| |The `@Argument` annotation does not have a "required" flag, nor the option to
specify a default value. Both of these can be specified at the GraphQL schema level and
are enforced by the GraphQL Engine.| +|---|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +You can use `@Argument` on a `Map` argument, to obtain all argument +values. The name attribute on `@Argument` must not be set. + +#### 6.2.3. `@Arguments` + +Use the `@Arguments` annotation, if you want to bind the full arguments map onto a single +target Object, in contrast to `@Argument`, which binds a specific, named argument. + +For example, `@Argument BookInput bookInput` uses the value of the argument "bookInput" +to initialize `BookInput`, while `@Arguments` uses the full arguments map and in that +case, top-level arguments are bound to `BookInput` properties. + +#### 6.2.4. `@Argument(s)` Validation + +If a [Bean Validation](https://docs.spring.io/spring-framework/docs/current/reference/html/core.html#validation-beanvalidation-overview)`Validator` (or typically, a `LocalValidatorFactoryBean`) bean is present in the application context, +the `AnnotatedControllerConfigurer` will auto-detect it and configure support for validation. +Controller arguments annotated with `@Valid` and `@Validated` are then validated before method invocation. + +Bean Validation lets you declare constraints on types, as the following example shows: + +``` +public class BookInput { + + @NotNull + private String title; + + @NotNull + @Size(max=13) + private String isbn; +} + +``` + +We can then mark our argument for validation with `@Valid`: + +``` +@Controller +public class BookController { + + @MutationMapping + public Book addBook(@Argument @Valid BookInput bookInput) { + // ... + } +} + +``` + +If an error occurs during validation, a `ConstraintViolationException` is thrown and can be +later [resolved with a custom `DataFetcherExceptionResolver`](#execution-exceptions). + +| |Unlike Spring MVC, handler method signatures do not support the injection of `BindingResult`for reacting to validation errors: those are globally dealt with as exceptions.| +|---|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +#### 6.2.5. `@ProjectPayload` Interface + +As an alternative to using complete Objects with [`@Argument`](#controllers-schema-mapping-argument), +you can also use a projection interface to access GraphQL request arguments through a +well-defined, minimal interface. Argument projections are provided by[Spring Data’s Interface projections](https://docs.spring.io/spring-data/commons/docs/current/reference/html/#projections.interfaces)when Spring Data is on the class path. + +To make use of this, create an interface annotated with `@ProjectedPayload` and declare +it as a controller method parameter. If the parameter is annotated with `@Argument`, +it applies to an individual argument within the `DataFetchingEnvironment.getArguments()`map. When declared without `@Argument`, the projection works on top-level arguments in +the complete arguments map. + +For example: + +``` +@Controller +public class BookController { + + @QueryMapping + public Book bookById(BookIdProjection bookId) { + // ... + } + + @MutationMapping + public Book addBook(@Argument BookInputProjection bookInput) { + // ... + } +} + +@ProjectedPayload +interface BookIdProjection { + + Long getId(); +} + +@ProjectedPayload +interface BookInputProjection { + + String getName(); + + @Value("#{target.author + ' ' + target.name}") + String getAuthorAndName(); +} + +``` + +#### 6.2.6. Source + +In GraphQL Java, the `DataFetchingEnvironment` provides access to the source (i.e. +parent/container) instance of the field. To access this, simply declare a method parameter +of the expected target type. + +``` +@Controller +public class BookController { + + @SchemaMapping + public Author author(Book book) { + // ... + } +} + +``` + +The source method argument also helps to determine the type name for the mapping. +If the simple name of the Java class matches the GraphQL type, then there is no need to +explicitly specify the type name in the `@SchemaMapping` annotation. + +| |A [`@BatchMapping`](#controllers-batch-mapping) handler method can batch load all authors for a query,
given a list of source/parent books objects.| +|---|-------------------------------------------------------------------------------------------------------------------------------------------------------| + +#### 6.2.7. `DataLoader` + +When you register a batch loading function for an entity, as explained in[Batch Loading](#execution-batching), you can access the `DataLoader` for the entity by declaring a +method argument of type `DataLoader` and use it to load the entity: + +``` +@Controller +public class BookController { + + public BookController(BatchLoaderRegistry registry) { + registry.forTypePair(Long.class, Author.class).registerMappedBatchLoader((authorIds, env) -> { + // return Map + }); + } + + @SchemaMapping + public CompletableFuture author(Book book, DataLoader loader) { + return loader.load(book.getAuthorId()); + } + +} + +``` + +By default, `BatchLoaderRegistry` uses the full class name of the value type (e.g. the +class name for `Author`) for the key of the registration, and therefore simply declaring +the `DataLoader` method argument with generic types provides enough information +to locate it in the `DataLoaderRegistry`. As a fallback, the `DataLoader` method argument +resolver will also try the method argument name as the key but typically that should not +be necessary. + +Note that for many cases with loading related entities, where the `@SchemaMapping` simply +delegates to a `DataLoader`, you can reduce boilerplate by using a[@BatchMapping](#controllers-batch-mapping) method as described in the next section. + +### 6.3. `@BatchMapping` + +[Batch Loading](#execution-batching) addresses the N+1 select problem through the use of an`org.dataloader.DataLoader` to defer the loading of individual entity instances, so they +can be loaded together. For example: + +``` +@Controller +public class BookController { + + public BookController(BatchLoaderRegistry registry) { + registry.forTypePair(Long.class, Author.class).registerMappedBatchLoader((authorIds, env) -> { + // return Map + }); + } + + @SchemaMapping + public CompletableFuture author(Book book, DataLoader loader) { + return loader.load(book.getAuthorId()); + } + +} + +``` + +For the straight-forward case of loading an associated entity, shown above, the`@SchemaMapping` method does nothing more than delegate to the `DataLoader`. This is +boilerplate that can be avoided with a `@BatchMapping` method. For example: + +``` +@Controller +public class BookController { + + @BatchMapping + public Mono> author(List books) { + // ... + } +} + +``` + +The above becomes a batch loading function in the `BatchLoaderRegistry`where keys are `Book` instances and the loaded values their authors. In addition, a`DataFetcher` is also transparently bound to the `author` field of the type `Book`, which +simply delegates to the `DataLoader` for authors, given its source/parent `Book` instance. + +| |To be used as a unique key, `Book` must implement `hashcode` and `equals`.| +|---|--------------------------------------------------------------------------| + +By default, the field name defaults to the method name, while the type name defaults to +the simple class name of the input `List` element type. Both can be customized through +annotation attributes. The type name can also be inherited from a class level`@SchemaMapping`. + +#### 6.3.1. Method Signature + +Batch mapping methods support the following arguments: + +| Method Argument | Description | +|-------------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------| +| `List` | The source/parent objects. | +|`java.security.Principal`| Obtained from Spring Security context, if available. | +| `@ContextValue` |For access to a value from the `GraphQLContext` of `BatchLoaderEnvironment`,
which is the same context as the one from the `DataFetchingEnvironment`.| +| `GraphQLContext` | For access to the context from the `BatchLoaderEnvironment`,
which is the same context as the one from the `DataFetchingEnvironment`. | +|`BatchLoaderEnvironment` | The environment that is available in GraphQL Java to a`org.dataloader.BatchLoaderWithContext`. | + +Batch mapping methods can return: + +| Return Type | Description | +|---------------------|--------------------------------------------------------------------------------------------------------------------------| +| `Mono>` | A map with parent objects as keys, and batch loaded objects as values. | +| `Flux` |A sequence of batch loaded objects that must be in the same order as the source/parent
objects passed into the method.| +|`Map`, `List`| Imperative variants, e.g. without remote calls to make. | + +## 7. Security + +The path to a [Web](#web-transports) GraphQL endpoint can be secured with HTTP +URL security to ensure that only authenticated users can access it. This does not, +however, differentiate among different GraphQL requests on such a shared endpoint on +a single URL. + +To apply more fine-grained security, add Spring Security annotations such as`@PreAuthorize` or `@Secured` to service methods involved in fetching specific parts of +the GraphQL response. This should work due to [Context Propagation](#execution-context) that aims to make +Security, and other context, available at the data fetching level. + +The Spring for GraphQL repository contains samples for[Spring MVC](https://github.com/spring-projects/spring-graphql/tree/main/samples/webmvc-http-security) and for[WebFlux](https://github.com/spring-projects/spring-graphql/tree/main/samples/webflux-security). + +## 8. Testing + +It’s possible to test GraphQL requests with Spring’s `WebTestClient`, just sending and +receiving JSON, but a number of GraphQL specific details make this approach more +cumbersome than is necessary. + +To get the full testing support, you’ll need to add the `spring-graphql-test` dependdency +in your build: + +Gradle + +``` +dependencies { + // ... + testImplementation 'org.springframework.graphql:spring-graphql-test:1.0.0-SNAPSHOT' +} +``` + +Maven + +``` + + + + org.springframework.graphql + spring-graphql-test + 1.0.0-SNAPSHOT + test + + +``` + +### 8.1. `GraphQlTester` + +`GraphQlTester` defines a workflow to test GraphQL requests with the following benefits: + +* Verify no unexpected errors under the "errors" key in the response. + +* Decode under the "data" key in the response. + +* Use JsonPath to decode different parts of the response. + +* Test subscriptions. + +To create `GraphQlTester`, you only need a `GraphQlService`, and no transport: + +``` +GraphQlSource graphQlSource = GraphQlSource.builder() + .schemaResources(...) + .runtimeWiringConfigurer(...) + .build(); + +GraphQlService graphQlService = new ExecutionGraphQlService(graphQlSource); + +GraphQlTester graphQlTester = GraphQlTester.builder(graphQlService).build(); + +``` + +### 8.2. `WebGraphQlTester` + +`WebGraphQlTester` extends `GraphQlTester` to add a workflow and configuration specific +to [Web Transports](#web-transports), and it always verifies GraphQL HTTP responses are 200 (OK). + +To create `WebGraphQlTester`, you need one of the following inputs: + +* `WebTestClient` — perform requests as an HTTP client, either against [HTTP](#web-http)handlers without a server, or against a live server. + +* `WebGraphQlHandler` — perform requests through the [Web Interception](#web-interception) chain used + by both [HTTP](#web-http) and [WebSocket](#web-websocket) handlers, which in effect is testing without + a Web framework. One reason to use this is for [Subscriptions](#testing-subscriptions). + +For Spring WebFlux without a server, you can point to your Spring configuration: + +``` +ApplicationContext context = ... ; + +WebTestClient client = + WebTestClient.bindToApplicationContext(context) + .configureClient() + .baseUrl("/graphql") + .build(); + +WebGraphQlTester tester = WebGraphQlTester.builder(client).build(); + +``` + +For Spring MVC without a server, the same but using `MockMvcWebTestClient`: + +``` +WebApplicationContext context = ... ; + +WebTestClient client = + MockMvcWebTestClient.bindToApplicationContext(context) + .configureClient() + .baseUrl("/graphql") + .build(); + +WebGraphQlTester tester = WebGraphQlTester.builder(client).build(); + +``` + +To test against a live, running server: + +``` +WebTestClient client = + WebTestClient.bindToServer() + .baseUrl("http://localhost:8080/graphql") + .build(); + +WebGraphQlTester tester = WebGraphQlTester.builder(client).build(); + +``` + +`WebGraphQlTester` supports setting HTTP request headers and access to HTTP response +headers. This may be useful to inspect or set security related headers. + +``` +this.graphQlTester.queryName("{ myQuery }") + .httpHeaders(headers -> headers.setBasicAuth("rob", "...")) + .execute() + .httpHeadersSatisfy(headers -> { + // check response headers + }) + .path("myQuery.field1").entity(String.class).isEqualTo("value1") + .path("myQuery.field2").entity(String.class).isEqualTo("value2"); + +``` + +You can also set default request headers at the builder level: + +``` +WebGraphQlTester tester = WebGraphQlTester.builder(client) + .defaultHttpHeaders(headers -> headers.setBasicAuth("rob", "...")) + .build(); + +``` + +### 8.3. Queries + +Below is an example query test using[JsonPath](https://github.com/json-path/JsonPath) to extract all release versions in the +GraphQL response. + +``` +String query = "{" + + " project(slug:\"spring-framework\") {" + + " releases {" + + " version" + + " }"+ + " }" + + "}"; + +graphQlTester.query(query) + .execute() + .path("project.releases[*].version") + .entityList(String.class) + .hasSizeGreaterThan(1); + +``` + +The JsonPath is relative to the "data" section of the response. + +You can also create query files with extensions `.graphql` or `.gql` under `"graphql/"` on +the classpath and refer to them by file name. For example, given a file called`projectReleases.graphql` in `src/main/resources/graphql`, with content: + +``` +query projectReleases($slug: ID!) { + project(slug: $slug) { + releases { + version + } + } +} +``` + +You can write the same test as follows: + +``` +graphQlTester.queryName("projectReleases") (1) + .variable("slug", "spring-framework") (2) + .execute() + .path("project.releases[*].version") + .entityList(String.class) + .hasSizeGreaterThan(1); + +``` + +|**1**|Refer to the query in the file named "projectReleases".| +|-----|-------------------------------------------------------| +|**2**| Set the `slug` variable. | + +| |The "JS GraphQL" plugin for IntelliJ supports GraphQL query files with code completion.| +|---|---------------------------------------------------------------------------------------| + +### 8.4. Errors + +Verify won’t succeed when there are errors under the "errors" key in the response. + +If necessary to ignore an error, use an error filter `Predicate`: + +``` +graphQlTester.query(query) + .execute() + .errors() + .filter(error -> ...) + .verify() + .path("project.releases[*].version") + .entityList(String.class) + .hasSizeGreaterThan(1); + +``` + +An error filter can be registered globally and apply to all tests: + +``` +WebGraphQlTester graphQlTester = WebGraphQlTester.builder(client) + .errorFilter(error -> ...) + .build(); + +``` + +Or to expect an error, and in contrast to `filter`, throw an assertion error +when it doesn’t exist in the response: + +``` +graphQlTester.query(query) + .execute() + .errors() + .expect(error -> ...) + .verify() + .path("project.releases[*].version") + .entityList(String.class) + .hasSizeGreaterThan(1); + +``` + +Or inspect all errors directly and that also marks them as filtered: + +``` +graphQlTester.query(query) + .execute() + .errors() + .satisfy(errors -> { + // ... + }); + +``` + +If a request does not have any response data (e.g. mutation), use `executeAndVerify`instead of `execute` to verify there are no errors in the response: + +``` +graphQlTester.query(query).executeAndVerify(); + +``` + +### 8.5. Subscriptions + +The `executeSubscription` method defines a workflow specific to subscriptions which return +a stream of responses instead of a single response. + +To test subscriptions, you can create `GraphQlTester` with a `GraphQlService`, which +calls `graphql.GraphQL` directly and that returns a stream of responses: + +``` +GraphQlService service = ... ; + +GraphQlTester graphQlTester = GraphQlTester.builder(service).build(); + +Flux result = graphQlTester.query("subscription { greetings }") + .executeSubscription() + .toFlux("greetings", String.class); // decode each response + +``` + +The `StepVerifier` from Project Reactor is useful to verify a stream: + +``` +Flux result = graphQlTester.query("subscription { greetings }") + .executeSubscription() + .toFlux("greetings", String.class); + +StepVerifier.create(result) + .expectNext("Hi") + .expectNext("Bonjour") + .expectNext("Hola") + .verifyComplete(); + +``` + +To test with the [Web Interception](#web-interception) chain, you can create `WebGraphQlTester` with a`WebGraphQlHandler`: + +``` +GraphQlService service = ... ; + +WebGraphQlHandler handler = WebGraphQlHandler.builder(service) + .interceptor((input, next) -> next.handle(input)) + .build(); + +WebGraphQlTester graphQlTester = WebGraphQlTester.builder(handler).build(); + +``` + +Currently, Spring for GraphQL does not support testing with a WebSocket client, and it +cannot be used for integration test of GraphQL over WebSocket requests. + +## 9. Samples + +This Spring for GraphQL repository contains [sample applications](https://github.com/spring-projects/spring-graphql/tree/main/samples) for +various scenarios. + +You can run those by cloning this repository and running main application classes from +your IDE or by typing the following on the command line: + +``` +$ ./gradlew :samples:{sample-directory-name}:bootRun +``` \ No newline at end of file diff --git a/docs/en/spring-framework/README.md b/docs/en/spring-framework/README.md new file mode 100644 index 0000000000000000000000000000000000000000..a0fa33a12b80e6617e48c199557ecf7fa94d9969 --- /dev/null +++ b/docs/en/spring-framework/README.md @@ -0,0 +1 @@ +# Spring Framework diff --git a/docs/en/spring-framework/core.md b/docs/en/spring-framework/core.md new file mode 100644 index 0000000000000000000000000000000000000000..8ee51af7678e80ffbce70e9be63dd1cae0b28348 --- /dev/null +++ b/docs/en/spring-framework/core.md @@ -0,0 +1,21586 @@ +# Core Technologies + +This part of the reference documentation covers all the technologies that are +absolutely integral to the Spring Framework. + +Foremost amongst these is the Spring Framework’s Inversion of Control (IoC) container. +A thorough treatment of the Spring Framework’s IoC container is closely followed by +comprehensive coverage of Spring’s Aspect-Oriented Programming (AOP) technologies. +The Spring Framework has its own AOP framework, which is conceptually easy to +understand and which successfully addresses the 80% sweet spot of AOP requirements +in Java enterprise programming. + +Coverage of Spring’s integration with AspectJ (currently the richest — in terms of +features — and certainly most mature AOP implementation in the Java enterprise space) +is also provided. + +## 1. The IoC Container + +This chapter covers Spring’s Inversion of Control (IoC) container. + +### 1.1. Introduction to the Spring IoC Container and Beans + +This chapter covers the Spring Framework implementation of the Inversion of Control +(IoC) principle. IoC is also known as dependency injection (DI). It is a process whereby +objects define their dependencies (that is, the other objects they work with) only through +constructor arguments, arguments to a factory method, or properties that are set on the +object instance after it is constructed or returned from a factory method. The container +then injects those dependencies when it creates the bean. This process is fundamentally +the inverse (hence the name, Inversion of Control) of the bean itself +controlling the instantiation or location of its dependencies by using direct +construction of classes or a mechanism such as the Service Locator pattern. + +The `org.springframework.beans` and `org.springframework.context` packages are the basis +for Spring Framework’s IoC container. The[`BeanFactory`](https://docs.spring.io/spring-framework/docs/5.3.16/javadoc-api/org/springframework/beans/factory/BeanFactory.html)interface provides an advanced configuration mechanism capable of managing any type of +object.[`ApplicationContext`](https://docs.spring.io/spring-framework/docs/5.3.16/javadoc-api/org/springframework/context/ApplicationContext.html)is a sub-interface of `BeanFactory`. It adds: + +* Easier integration with Spring’s AOP features + +* Message resource handling (for use in internationalization) + +* Event publication + +* Application-layer specific contexts such as the `WebApplicationContext`for use in web applications. + +In short, the `BeanFactory` provides the configuration framework and basic +functionality, and the `ApplicationContext` adds more enterprise-specific functionality. +The `ApplicationContext` is a complete superset of the `BeanFactory` and is used +exclusively in this chapter in descriptions of Spring’s IoC container. For more +information on using the `BeanFactory` instead of the `ApplicationContext,` see[The `BeanFactory`](#beans-beanfactory). + +In Spring, the objects that form the backbone of your application and that are managed +by the Spring IoC container are called beans. A bean is an object that is +instantiated, assembled, and managed by a Spring IoC container. Otherwise, a +bean is simply one of many objects in your application. Beans, and the dependencies +among them, are reflected in the configuration metadata used by a container. + +### 1.2. Container Overview + +The `org.springframework.context.ApplicationContext` interface represents the Spring IoC +container and is responsible for instantiating, configuring, and assembling the +beans. The container gets its instructions on what objects to +instantiate, configure, and assemble by reading configuration metadata. The +configuration metadata is represented in XML, Java annotations, or Java code. It lets +you express the objects that compose your application and the rich interdependencies +between those objects. + +Several implementations of the `ApplicationContext` interface are supplied +with Spring. In stand-alone applications, it is common to create an +instance of[`ClassPathXmlApplicationContext`](https://docs.spring.io/spring-framework/docs/5.3.16/javadoc-api/org/springframework/context/support/ClassPathXmlApplicationContext.html)or [`FileSystemXmlApplicationContext`](https://docs.spring.io/spring-framework/docs/5.3.16/javadoc-api/org/springframework/context/support/FileSystemXmlApplicationContext.html). +While XML has been the traditional format for defining configuration metadata, you can +instruct the container to use Java annotations or code as the metadata format by +providing a small amount of XML configuration to declaratively enable support for these +additional metadata formats. + +In most application scenarios, explicit user code is not required to instantiate one or +more instances of a Spring IoC container. For example, in a web application scenario, a +simple eight (or so) lines of boilerplate web descriptor XML in the `web.xml` file +of the application typically suffices (see [Convenient ApplicationContext Instantiation for Web Applications](#context-create)). If you use the[Spring Tools for Eclipse](https://spring.io/tools) (an Eclipse-powered development +environment), you can easily create this boilerplate configuration with a few mouse clicks or +keystrokes. + +The following diagram shows a high-level view of how Spring works. Your application classes +are combined with configuration metadata so that, after the `ApplicationContext` is +created and initialized, you have a fully configured and executable system or +application. + +![container magic](images/container-magic.png) + +Figure 1. The Spring IoC container + +#### 1.2.1. Configuration Metadata + +As the preceding diagram shows, the Spring IoC container consumes a form of +configuration metadata. This configuration metadata represents how you, as an +application developer, tell the Spring container to instantiate, configure, and assemble +the objects in your application. + +Configuration metadata is traditionally supplied in a simple and intuitive XML format, +which is what most of this chapter uses to convey key concepts and features of the +Spring IoC container. + +| |XML-based metadata is not the only allowed form of configuration metadata.
The Spring IoC container itself is totally decoupled from the format in which this
configuration metadata is actually written. These days, many developers choose[Java-based configuration](#beans-java) for their Spring applications.| +|---|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +For information about using other forms of metadata with the Spring container, see: + +* [Annotation-based configuration](#beans-annotation-config): Spring 2.5 introduced + support for annotation-based configuration metadata. + +* [Java-based configuration](#beans-java): Starting with Spring 3.0, many features + provided by the Spring JavaConfig project became part of the core Spring Framework. + Thus, you can define beans external to your application classes by using Java rather + than XML files. To use these new features, see the[`@Configuration`](https://docs.spring.io/spring-framework/docs/current/javadoc-api/org/springframework/context/annotation/Configuration.html),[`@Bean`](https://docs.spring.io/spring-framework/docs/current/javadoc-api/org/springframework/context/annotation/Bean.html),[`@Import`](https://docs.spring.io/spring-framework/docs/current/javadoc-api/org/springframework/context/annotation/Import.html), + and [`@DependsOn`](https://docs.spring.io/spring-framework/docs/current/javadoc-api/org/springframework/context/annotation/DependsOn.html) annotations. + +Spring configuration consists of at least one and typically more than one bean +definition that the container must manage. XML-based configuration metadata configures these +beans as `` elements inside a top-level `` element. Java +configuration typically uses `@Bean`-annotated methods within a `@Configuration` class. + +These bean definitions correspond to the actual objects that make up your application. +Typically, you define service layer objects, data access objects (DAOs), presentation +objects such as Struts `Action` instances, infrastructure objects such as Hibernate`SessionFactories`, JMS `Queues`, and so forth. Typically, one does not configure +fine-grained domain objects in the container, because it is usually the responsibility +of DAOs and business logic to create and load domain objects. However, you can use +Spring’s integration with AspectJ to configure objects that have been created outside +the control of an IoC container. See [Using AspectJ to +dependency-inject domain objects with Spring](#aop-atconfigurable). + +The following example shows the basic structure of XML-based configuration metadata: + +``` + + + + (1) (2) + + + + + + + + + + +``` + +|**1**| The `id` attribute is a string that identifies the individual bean definition. | +|-----|----------------------------------------------------------------------------------------------| +|**2**|The `class` attribute defines the type of the bean and uses the fully qualified
classname.| + +The value of the `id` attribute refers to collaborating objects. The XML for +referring to collaborating objects is not shown in this example. See[Dependencies](#beans-dependencies) for more information. + +#### 1.2.2. Instantiating a Container + +The location path or paths +supplied to an `ApplicationContext` constructor are resource strings that let +the container load configuration metadata from a variety of external resources, such +as the local file system, the Java `CLASSPATH`, and so on. + +Java + +``` +ApplicationContext context = new ClassPathXmlApplicationContext("services.xml", "daos.xml"); +``` + +Kotlin + +``` +val context = ClassPathXmlApplicationContext("services.xml", "daos.xml") +``` + +| |After you learn about Spring’s IoC container, you may want to know more about Spring’s`Resource` abstraction (as described in [Resources](#resources)), which provides a convenient
mechanism for reading an InputStream from locations defined in a URI syntax. In particular,`Resource` paths are used to construct applications contexts, as described in [Application Contexts and Resource Paths](#resources-app-ctx).| +|---|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +The following example shows the service layer objects `(services.xml)` configuration file: + +``` + + + + + + + + + + + + + + +``` + +The following example shows the data access objects `daos.xml` file: + +``` + + + + + + + + + + + + + + +``` + +In the preceding example, the service layer consists of the `PetStoreServiceImpl` class +and two data access objects of the types `JpaAccountDao` and `JpaItemDao` (based +on the JPA Object-Relational Mapping standard). The `property name` element refers to the +name of the JavaBean property, and the `ref` element refers to the name of another bean +definition. This linkage between `id` and `ref` elements expresses the dependency between +collaborating objects. For details of configuring an object’s dependencies, see[Dependencies](#beans-dependencies). + +##### Composing XML-based Configuration Metadata + +It can be useful to have bean definitions span multiple XML files. Often, each individual +XML configuration file represents a logical layer or module in your architecture. + +You can use the application context constructor to load bean definitions from all these +XML fragments. This constructor takes multiple `Resource` locations, as was shown in the[previous section](#beans-factory-instantiation). Alternatively, use one or more +occurrences of the `` element to load bean definitions from another file or +files. The following example shows how to do so: + +``` + + + + + + + + +``` + +In the preceding example, external bean definitions are loaded from three files:`services.xml`, `messageSource.xml`, and `themeSource.xml`. All location paths are +relative to the definition file doing the importing, so `services.xml` must be in the +same directory or classpath location as the file doing the importing, while`messageSource.xml` and `themeSource.xml` must be in a `resources` location below the +location of the importing file. As you can see, a leading slash is ignored. However, given +that these paths are relative, it is better form not to use the slash at all. The +contents of the files being imported, including the top level `` element, must +be valid XML bean definitions, according to the Spring Schema. + +| |It is possible, but not recommended, to reference files in parent directories using a
relative "../" path. Doing so creates a dependency on a file that is outside the current
application. In particular, this reference is not recommended for `classpath:` URLs (for
example, `classpath:../services.xml`), where the runtime resolution process chooses the
“nearest” classpath root and then looks into its parent directory. Classpath
configuration changes may lead to the choice of a different, incorrect directory.

You can always use fully qualified resource locations instead of relative paths: for
example, `file:C:/config/services.xml` or `classpath:/config/services.xml`. However, be
aware that you are coupling your application’s configuration to specific absolute
locations. It is generally preferable to keep an indirection for such absolute
locations — for example, through "${…​}" placeholders that are resolved against JVM
system properties at runtime.| +|---|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +The namespace itself provides the import directive feature. Further +configuration features beyond plain bean definitions are available in a selection +of XML namespaces provided by Spring — for example, the `context` and `util` namespaces. + +##### The Groovy Bean Definition DSL + +As a further example for externalized configuration metadata, bean definitions can also +be expressed in Spring’s Groovy Bean Definition DSL, as known from the Grails framework. +Typically, such configuration live in a ".groovy" file with the structure shown in the +following example: + +``` +beans { + dataSource(BasicDataSource) { + driverClassName = "org.hsqldb.jdbcDriver" + url = "jdbc:hsqldb:mem:grailsDB" + username = "sa" + password = "" + settings = [mynew:"setting"] + } + sessionFactory(SessionFactory) { + dataSource = dataSource + } + myService(MyService) { + nestedBean = { AnotherBean bean -> + dataSource = dataSource + } + } +} +``` + +This configuration style is largely equivalent to XML bean definitions and even +supports Spring’s XML configuration namespaces. It also allows for importing XML +bean definition files through an `importBeans` directive. + +#### 1.2.3. Using the Container + +The `ApplicationContext` is the interface for an advanced factory capable of maintaining +a registry of different beans and their dependencies. By using the method`T getBean(String name, Class requiredType)`, you can retrieve instances of your beans. + +The `ApplicationContext` lets you read bean definitions and access them, as the following +example shows: + +Java + +``` +// create and configure beans +ApplicationContext context = new ClassPathXmlApplicationContext("services.xml", "daos.xml"); + +// retrieve configured instance +PetStoreService service = context.getBean("petStore", PetStoreService.class); + +// use configured instance +List userList = service.getUsernameList(); +``` + +Kotlin + +``` +import org.springframework.beans.factory.getBean + +// create and configure beans +val context = ClassPathXmlApplicationContext("services.xml", "daos.xml") + +// retrieve configured instance +val service = context.getBean("petStore") + +// use configured instance +var userList = service.getUsernameList() +``` + +With Groovy configuration, bootstrapping looks very similar. It has a different context +implementation class which is Groovy-aware (but also understands XML bean definitions). +The following example shows Groovy configuration: + +Java + +``` +ApplicationContext context = new GenericGroovyApplicationContext("services.groovy", "daos.groovy"); +``` + +Kotlin + +``` +val context = GenericGroovyApplicationContext("services.groovy", "daos.groovy") +``` + +The most flexible variant is `GenericApplicationContext` in combination with reader +delegates — for example, with `XmlBeanDefinitionReader` for XML files, as the following +example shows: + +Java + +``` +GenericApplicationContext context = new GenericApplicationContext(); +new XmlBeanDefinitionReader(context).loadBeanDefinitions("services.xml", "daos.xml"); +context.refresh(); +``` + +Kotlin + +``` +val context = GenericApplicationContext() +XmlBeanDefinitionReader(context).loadBeanDefinitions("services.xml", "daos.xml") +context.refresh() +``` + +You can also use the `GroovyBeanDefinitionReader` for Groovy files, as the following +example shows: + +Java + +``` +GenericApplicationContext context = new GenericApplicationContext(); +new GroovyBeanDefinitionReader(context).loadBeanDefinitions("services.groovy", "daos.groovy"); +context.refresh(); +``` + +Kotlin + +``` +val context = GenericApplicationContext() +GroovyBeanDefinitionReader(context).loadBeanDefinitions("services.groovy", "daos.groovy") +context.refresh() +``` + +You can mix and match such reader delegates on the same `ApplicationContext`, +reading bean definitions from diverse configuration sources. + +You can then use `getBean` to retrieve instances of your beans. The `ApplicationContext`interface has a few other methods for retrieving beans, but, ideally, your application +code should never use them. Indeed, your application code should have no calls to the`getBean()` method at all and thus have no dependency on Spring APIs at all. For example, +Spring’s integration with web frameworks provides dependency injection for various web +framework components such as controllers and JSF-managed beans, letting you declare +a dependency on a specific bean through metadata (such as an autowiring annotation). + +### 1.3. Bean Overview + +A Spring IoC container manages one or more beans. These beans are created with the +configuration metadata that you supply to the container (for example, in the form of XML`` definitions). + +Within the container itself, these bean definitions are represented as `BeanDefinition`objects, which contain (among other information) the following metadata: + +* A package-qualified class name: typically, the actual implementation class of the + bean being defined. + +* Bean behavioral configuration elements, which state how the bean should behave in the + container (scope, lifecycle callbacks, and so forth). + +* References to other beans that are needed for the bean to do its work. These + references are also called collaborators or dependencies. + +* Other configuration settings to set in the newly created object — for example, the size + limit of the pool or the number of connections to use in a bean that manages a + connection pool. + +This metadata translates to a set of properties that make up each bean definition. +The following table describes these properties: + +| Property | Explained in…​ | +|------------------------|---------------------------------------------------------------------| +| Class | [Instantiating Beans](#beans-factory-class) | +| Name | [Naming Beans](#beans-beanname) | +| Scope | [Bean Scopes](#beans-factory-scopes) | +| Constructor arguments | [Dependency Injection](#beans-factory-collaborators) | +| Properties | [Dependency Injection](#beans-factory-collaborators) | +| Autowiring mode | [Autowiring Collaborators](#beans-factory-autowire) | +|Lazy initialization mode| [Lazy-initialized Beans](#beans-factory-lazy-init) | +| Initialization method |[Initialization Callbacks](#beans-factory-lifecycle-initializingbean)| +| Destruction method | [Destruction Callbacks](#beans-factory-lifecycle-disposablebean) | + +In addition to bean definitions that contain information on how to create a specific +bean, the `ApplicationContext` implementations also permit the registration of existing +objects that are created outside the container (by users). This is done by accessing the +ApplicationContext’s BeanFactory through the `getBeanFactory()` method, which returns the +BeanFactory `DefaultListableBeanFactory` implementation. `DefaultListableBeanFactory`supports this registration through the `registerSingleton(..)` and`registerBeanDefinition(..)` methods. However, typical applications work solely with beans +defined through regular bean definition metadata. + +| |Bean metadata and manually supplied singleton instances need to be registered as early
as possible, in order for the container to properly reason about them during autowiring
and other introspection steps. While overriding existing metadata and existing
singleton instances is supported to some degree, the registration of new beans at
runtime (concurrently with live access to the factory) is not officially supported and may
lead to concurrent access exceptions, inconsistent state in the bean container, or both.| +|---|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +#### 1.3.1. Naming Beans + +Every bean has one or more identifiers. These identifiers must be unique within the +container that hosts the bean. A bean usually has only one identifier. However, if it +requires more than one, the extra ones can be considered aliases. + +In XML-based configuration metadata, you use the `id` attribute, the `name` attribute, or +both to specify the bean identifiers. The `id` attribute lets you specify +exactly one id. Conventionally, these names are alphanumeric ('myBean', +'someService', etc.), but they can contain special characters as well. If you want to +introduce other aliases for the bean, you can also specify them in the `name`attribute, separated by a comma (`,`), semicolon (`;`), or white space. As a +historical note, in versions prior to Spring 3.1, the `id` attribute was +defined as an `xsd:ID` type, which constrained possible characters. As of 3.1, +it is defined as an `xsd:string` type. Note that bean `id` uniqueness is still +enforced by the container, though no longer by XML parsers. + +You are not required to supply a `name` or an `id` for a bean. If you do not supply a`name` or `id` explicitly, the container generates a unique name for that bean. However, +if you want to refer to that bean by name, through the use of the `ref` element or a +Service Locator style lookup, you must provide a name. +Motivations for not supplying a name are related to using [inner +beans](#beans-inner-beans) and [autowiring collaborators](#beans-factory-autowire). + +Bean Naming Conventions + +The convention is to use the standard Java convention for instance field names when +naming beans. That is, bean names start with a lowercase letter and are camel-cased +from there. Examples of such names include `accountManager`,`accountService`, `userDao`, `loginController`, and so forth. + +Naming beans consistently makes your configuration easier to read and understand. +Also, if you use Spring AOP, it helps a lot when applying advice to a set of beans +related by name. + +| |With component scanning in the classpath, Spring generates bean names for unnamed
components, following the rules described earlier: essentially, taking the simple class name
and turning its initial character to lower-case. However, in the (unusual) special
case when there is more than one character and both the first and second characters
are upper case, the original casing gets preserved. These are the same rules as
defined by `java.beans.Introspector.decapitalize` (which Spring uses here).| +|---|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +##### Aliasing a Bean outside the Bean Definition + +In a bean definition itself, you can supply more than one name for the bean, by using a +combination of up to one name specified by the `id` attribute and any number of other +names in the `name` attribute. These names can be equivalent aliases to the same bean +and are useful for some situations, such as letting each component in an application +refer to a common dependency by using a bean name that is specific to that component +itself. + +Specifying all aliases where the bean is actually defined is not always adequate, +however. It is sometimes desirable to introduce an alias for a bean that is defined +elsewhere. This is commonly the case in large systems where configuration is split +amongst each subsystem, with each subsystem having its own set of object definitions. +In XML-based configuration metadata, you can use the `` element to accomplish +this. The following example shows how to do so: + +``` + +``` + +In this case, a bean (in the same container) named `fromName` may also, +after the use of this alias definition, be referred to as `toName`. + +For example, the configuration metadata for subsystem A may refer to a DataSource by the +name of `subsystemA-dataSource`. The configuration metadata for subsystem B may refer to +a DataSource by the name of `subsystemB-dataSource`. When composing the main application +that uses both these subsystems, the main application refers to the DataSource by the +name of `myApp-dataSource`. To have all three names refer to the same object, you can +add the following alias definitions to the configuration metadata: + +``` + + +``` + +Now each component and the main application can refer to the dataSource through a name +that is unique and guaranteed not to clash with any other definition (effectively +creating a namespace), yet they refer to the same bean. + +Java-configuration + +If you use Javaconfiguration, the `@Bean` annotation can be used to provide aliases. +See [Using the `@Bean` Annotation](#beans-java-bean-annotation) for details. + +#### 1.3.2. Instantiating Beans + +A bean definition is essentially a recipe for creating one or more objects. The +container looks at the recipe for a named bean when asked and uses the configuration +metadata encapsulated by that bean definition to create (or acquire) an actual object. + +If you use XML-based configuration metadata, you specify the type (or class) of object +that is to be instantiated in the `class` attribute of the `` element. This`class` attribute (which, internally, is a `Class` property on a `BeanDefinition`instance) is usually mandatory. (For exceptions, see[Instantiation by Using an Instance Factory Method](#beans-factory-class-instance-factory-method) and [Bean Definition Inheritance](#beans-child-bean-definitions).) +You can use the `Class` property in one of two ways: + +* Typically, to specify the bean class to be constructed in the case where the container + itself directly creates the bean by calling its constructor reflectively, somewhat + equivalent to Java code with the `new` operator. + +* To specify the actual class containing the `static` factory method that is + invoked to create the object, in the less common case where the container invokes a`static` factory method on a class to create the bean. The object type returned + from the invocation of the `static` factory method may be the same class or another + class entirely. + +Nested class names + +If you want to configure a bean definition for a nested class, you may use either the +binary name or the source name of the nested class. + +For example, if you have a class called `SomeThing` in the `com.example` package, and +this `SomeThing` class has a `static` nested class called `OtherThing`, they can be +separated by a dollar sign (`$`) or a dot (`.`). So the value of the `class` attribute in +a bean definition would be `com.example.SomeThing$OtherThing` or`com.example.SomeThing.OtherThing`. + +##### Instantiation with a Constructor + +When you create a bean by the constructor approach, all normal classes are usable by and +compatible with Spring. That is, the class being developed does not need to implement +any specific interfaces or to be coded in a specific fashion. Simply specifying the bean +class should suffice. However, depending on what type of IoC you use for that specific +bean, you may need a default (empty) constructor. + +The Spring IoC container can manage virtually any class you want it to manage. It is +not limited to managing true JavaBeans. Most Spring users prefer actual JavaBeans with +only a default (no-argument) constructor and appropriate setters and getters modeled +after the properties in the container. You can also have more exotic non-bean-style +classes in your container. If, for example, you need to use a legacy connection pool +that absolutely does not adhere to the JavaBean specification, Spring can manage it as +well. + +With XML-based configuration metadata you can specify your bean class as follows: + +``` + + + +``` + +For details about the mechanism for supplying arguments to the constructor (if required) +and setting object instance properties after the object is constructed, see[Injecting Dependencies](#beans-factory-collaborators). + +##### Instantiation with a Static Factory Method + +When defining a bean that you create with a static factory method, use the `class`attribute to specify the class that contains the `static` factory method and an attribute +named `factory-method` to specify the name of the factory method itself. You should be +able to call this method (with optional arguments, as described later) and return a live +object, which subsequently is treated as if it had been created through a constructor. +One use for such a bean definition is to call `static` factories in legacy code. + +The following bean definition specifies that the bean be created by calling a +factory method. The definition does not specify the type (class) of the returned object, +only the class containing the factory method. In this example, the `createInstance()`method must be a static method. The following example shows how to specify a factory method: + +``` + +``` + +The following example shows a class that would work with the preceding bean definition: + +Java + +``` +public class ClientService { + private static ClientService clientService = new ClientService(); + private ClientService() {} + + public static ClientService createInstance() { + return clientService; + } +} +``` + +Kotlin + +``` +class ClientService private constructor() { + companion object { + private val clientService = ClientService() + fun createInstance() = clientService + } +} +``` + +For details about the mechanism for supplying (optional) arguments to the factory method +and setting object instance properties after the object is returned from the factory, +see [Dependencies and Configuration in Detail](#beans-factory-properties-detailed). + +##### Instantiation by Using an Instance Factory Method + +Similar to instantiation through a [static +factory method](#beans-factory-class-static-factory-method), instantiation with an instance factory method invokes a non-static +method of an existing bean from the container to create a new bean. To use this +mechanism, leave the `class` attribute empty and, in the `factory-bean` attribute, +specify the name of a bean in the current (or parent or ancestor) container that contains +the instance method that is to be invoked to create the object. Set the name of the +factory method itself with the `factory-method` attribute. The following example shows +how to configure such a bean: + +``` + + + + + + + +``` + +The following example shows the corresponding class: + +Java + +``` +public class DefaultServiceLocator { + + private static ClientService clientService = new ClientServiceImpl(); + + public ClientService createClientServiceInstance() { + return clientService; + } +} +``` + +Kotlin + +``` +class DefaultServiceLocator { + companion object { + private val clientService = ClientServiceImpl() + } + fun createClientServiceInstance(): ClientService { + return clientService + } +} +``` + +One factory class can also hold more than one factory method, as the following example shows: + +``` + + + + + + + +``` + +The following example shows the corresponding class: + +Java + +``` +public class DefaultServiceLocator { + + private static ClientService clientService = new ClientServiceImpl(); + + private static AccountService accountService = new AccountServiceImpl(); + + public ClientService createClientServiceInstance() { + return clientService; + } + + public AccountService createAccountServiceInstance() { + return accountService; + } +} +``` + +Kotlin + +``` +class DefaultServiceLocator { + companion object { + private val clientService = ClientServiceImpl() + private val accountService = AccountServiceImpl() + } + + fun createClientServiceInstance(): ClientService { + return clientService + } + + fun createAccountServiceInstance(): AccountService { + return accountService + } +} +``` + +This approach shows that the factory bean itself can be managed and configured through +dependency injection (DI). See [Dependencies and +Configuration in Detail](#beans-factory-properties-detailed). + +| |In Spring documentation, "factory bean" refers to a bean that is configured in the
Spring container and that creates objects through an[instance](#beans-factory-class-instance-factory-method) or[static](#beans-factory-class-static-factory-method) factory method. By contrast,`FactoryBean` (notice the capitalization) refers to a Spring-specific[`FactoryBean`](#beans-factory-extension-factorybean) implementation class.| +|---|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +##### Determining a Bean’s Runtime Type + +The runtime type of a specific bean is non-trivial to determine. A specified class in +the bean metadata definition is just an initial class reference, potentially combined +with a declared factory method or being a `FactoryBean` class which may lead to a +different runtime type of the bean, or not being set at all in case of an instance-level +factory method (which is resolved via the specified `factory-bean` name instead). +Additionally, AOP proxying may wrap a bean instance with an interface-based proxy with +limited exposure of the target bean’s actual type (just its implemented interfaces). + +The recommended way to find out about the actual runtime type of a particular bean is +a `BeanFactory.getType` call for the specified bean name. This takes all of the above +cases into account and returns the type of object that a `BeanFactory.getBean` call is +going to return for the same bean name. + +### 1.4. Dependencies + +A typical enterprise application does not consist of a single object (or bean in the +Spring parlance). Even the simplest application has a few objects that work together to +present what the end-user sees as a coherent application. This next section explains how +you go from defining a number of bean definitions that stand alone to a fully realized +application where objects collaborate to achieve a goal. + +#### 1.4.1. Dependency Injection + +Dependency injection (DI) is a process whereby objects define their dependencies +(that is, the other objects with which they work) only through constructor arguments, +arguments to a factory method, or properties that are set on the object instance after +it is constructed or returned from a factory method. The container then injects those +dependencies when it creates the bean. This process is fundamentally the inverse (hence +the name, Inversion of Control) of the bean itself controlling the instantiation +or location of its dependencies on its own by using direct construction of classes or +the Service Locator pattern. + +Code is cleaner with the DI principle, and decoupling is more effective when objects are +provided with their dependencies. The object does not look up its dependencies and does +not know the location or class of the dependencies. As a result, your classes become easier +to test, particularly when the dependencies are on interfaces or abstract base classes, +which allow for stub or mock implementations to be used in unit tests. + +DI exists in two major variants: [Constructor-based +dependency injection](#beans-constructor-injection) and [Setter-based dependency injection](#beans-setter-injection). + +##### Constructor-based Dependency Injection + +Constructor-based DI is accomplished by the container invoking a constructor with a +number of arguments, each representing a dependency. Calling a `static` factory method +with specific arguments to construct the bean is nearly equivalent, and this discussion +treats arguments to a constructor and to a `static` factory method similarly. The +following example shows a class that can only be dependency-injected with constructor +injection: + +Java + +``` +public class SimpleMovieLister { + + // the SimpleMovieLister has a dependency on a MovieFinder + private final MovieFinder movieFinder; + + // a constructor so that the Spring container can inject a MovieFinder + public SimpleMovieLister(MovieFinder movieFinder) { + this.movieFinder = movieFinder; + } + + // business logic that actually uses the injected MovieFinder is omitted... +} +``` + +Kotlin + +``` +// a constructor so that the Spring container can inject a MovieFinder +class SimpleMovieLister(private val movieFinder: MovieFinder) { + // business logic that actually uses the injected MovieFinder is omitted... +} +``` + +Notice that there is nothing special about this class. It is a POJO that +has no dependencies on container specific interfaces, base classes, or annotations. + +###### Constructor Argument Resolution + +Constructor argument resolution matching occurs by using the argument’s type. If no +potential ambiguity exists in the constructor arguments of a bean definition, the +order in which the constructor arguments are defined in a bean definition is the order +in which those arguments are supplied to the appropriate constructor when the bean is +being instantiated. Consider the following class: + +Java + +``` +package x.y; + +public class ThingOne { + + public ThingOne(ThingTwo thingTwo, ThingThree thingThree) { + // ... + } +} +``` + +Kotlin + +``` +package x.y + +class ThingOne(thingTwo: ThingTwo, thingThree: ThingThree) +``` + +Assuming that the `ThingTwo` and `ThingThree` classes are not related by inheritance, no +potential ambiguity exists. Thus, the following configuration works fine, and you do not +need to specify the constructor argument indexes or types explicitly in the`` element. + +``` + + + + + + + + + + +``` + +When another bean is referenced, the type is known, and matching can occur (as was the +case with the preceding example). When a simple type is used, such as`true`, Spring cannot determine the type of the value, and so cannot match +by type without help. Consider the following class: + +Java + +``` +package examples; + +public class ExampleBean { + + // Number of years to calculate the Ultimate Answer + private final int years; + + // The Answer to Life, the Universe, and Everything + private final String ultimateAnswer; + + public ExampleBean(int years, String ultimateAnswer) { + this.years = years; + this.ultimateAnswer = ultimateAnswer; + } +} +``` + +Kotlin + +``` +package examples + +class ExampleBean( + private val years: Int, // Number of years to calculate the Ultimate Answer + private val ultimateAnswer: String // The Answer to Life, the Universe, and Everything +) +``` + +[]()Constructor argument type matching + +In the preceding scenario, the container can use type matching with simple types if +you explicitly specify the type of the constructor argument by using the `type` attribute, +as the following example shows: + +``` + + + + +``` + +[]()Constructor argument index + +You can use the `index` attribute to specify explicitly the index of constructor arguments, +as the following example shows: + +``` + + + + +``` + +In addition to resolving the ambiguity of multiple simple values, specifying an index +resolves ambiguity where a constructor has two arguments of the same type. + +| |The index is 0-based.| +|---|---------------------| + +[]()Constructor argument name + +You can also use the constructor parameter name for value disambiguation, as the following +example shows: + +``` + + + + +``` + +Keep in mind that, to make this work out of the box, your code must be compiled with the +debug flag enabled so that Spring can look up the parameter name from the constructor. +If you cannot or do not want to compile your code with the debug flag, you can use the[@ConstructorProperties](https://download.oracle.com/javase/8/docs/api/java/beans/ConstructorProperties.html)JDK annotation to explicitly name your constructor arguments. The sample class would +then have to look as follows: + +Java + +``` +package examples; + +public class ExampleBean { + + // Fields omitted + + @ConstructorProperties({"years", "ultimateAnswer"}) + public ExampleBean(int years, String ultimateAnswer) { + this.years = years; + this.ultimateAnswer = ultimateAnswer; + } +} +``` + +Kotlin + +``` +package examples + +class ExampleBean +@ConstructorProperties("years", "ultimateAnswer") +constructor(val years: Int, val ultimateAnswer: String) +``` + +##### Setter-based Dependency Injection + +Setter-based DI is accomplished by the container calling setter methods on your +beans after invoking a no-argument constructor or a no-argument `static` factory method to +instantiate your bean. + +The following example shows a class that can only be dependency-injected by using pure +setter injection. This class is conventional Java. It is a POJO that has no dependencies +on container specific interfaces, base classes, or annotations. + +Java + +``` +public class SimpleMovieLister { + + // the SimpleMovieLister has a dependency on the MovieFinder + private MovieFinder movieFinder; + + // a setter method so that the Spring container can inject a MovieFinder + public void setMovieFinder(MovieFinder movieFinder) { + this.movieFinder = movieFinder; + } + + // business logic that actually uses the injected MovieFinder is omitted... +} +``` + +Kotlin + +``` +class SimpleMovieLister { + + // a late-initialized property so that the Spring container can inject a MovieFinder + lateinit var movieFinder: MovieFinder + + // business logic that actually uses the injected MovieFinder is omitted... +} +``` + +The `ApplicationContext` supports constructor-based and setter-based DI for the beans it +manages. It also supports setter-based DI after some dependencies have already been +injected through the constructor approach. You configure the dependencies in the form of +a `BeanDefinition`, which you use in conjunction with `PropertyEditor` instances to +convert properties from one format to another. However, most Spring users do not work +with these classes directly (that is, programmatically) but rather with XML `bean`definitions, annotated components (that is, classes annotated with `@Component`,`@Controller`, and so forth), or `@Bean` methods in Java-based `@Configuration` classes. +These sources are then converted internally into instances of `BeanDefinition` and used to +load an entire Spring IoC container instance. + +Constructor-based or setter-based DI? + +Since you can mix constructor-based and setter-based DI, it is a good rule of thumb to +use constructors for mandatory dependencies and setter methods or configuration methods +for optional dependencies. Note that use of the [@Required](#beans-required-annotation)annotation on a setter method can be used to make the property be a required dependency; +however, constructor injection with programmatic validation of arguments is preferable. + +The Spring team generally advocates constructor injection, as it lets you implement +application components as immutable objects and ensures that required dependencies +are not `null`. Furthermore, constructor-injected components are always returned to the client +(calling) code in a fully initialized state. As a side note, a large number of constructor +arguments is a bad code smell, implying that the class likely has too many +responsibilities and should be refactored to better address proper separation of concerns. + +Setter injection should primarily only be used for optional dependencies that can be +assigned reasonable default values within the class. Otherwise, not-null checks must be +performed everywhere the code uses the dependency. One benefit of setter injection is that +setter methods make objects of that class amenable to reconfiguration or re-injection +later. Management through [JMX MBeans](integration.html#jmx) is therefore a compelling +use case for setter injection. + +Use the DI style that makes the most sense for a particular class. Sometimes, when dealing +with third-party classes for which you do not have the source, the choice is made for you. +For example, if a third-party class does not expose any setter methods, then constructor +injection may be the only available form of DI. + +##### Dependency Resolution Process + +The container performs bean dependency resolution as follows: + +* The `ApplicationContext` is created and initialized with configuration metadata that + describes all the beans. Configuration metadata can be specified by XML, Java code, or + annotations. + +* For each bean, its dependencies are expressed in the form of properties, constructor + arguments, or arguments to the static-factory method (if you use that instead of a + normal constructor). These dependencies are provided to the bean, when the bean is + actually created. + +* Each property or constructor argument is an actual definition of the value to set, or + a reference to another bean in the container. + +* Each property or constructor argument that is a value is converted from its specified + format to the actual type of that property or constructor argument. By default, Spring + can convert a value supplied in string format to all built-in types, such as `int`,`long`, `String`, `boolean`, and so forth. + +The Spring container validates the configuration of each bean as the container is created. +However, the bean properties themselves are not set until the bean is actually created. +Beans that are singleton-scoped and set to be pre-instantiated (the default) are created +when the container is created. Scopes are defined in [Bean Scopes](#beans-factory-scopes). Otherwise, +the bean is created only when it is requested. Creation of a bean potentially causes a +graph of beans to be created, as the bean’s dependencies and its dependencies' +dependencies (and so on) are created and assigned. Note that resolution mismatches among +those dependencies may show up late — that is, on first creation of the affected bean. + +Circular dependencies + +If you use predominantly constructor injection, it is possible to create an unresolvable +circular dependency scenario. + +For example: Class A requires an instance of class B through constructor injection, and +class B requires an instance of class A through constructor injection. If you configure +beans for classes A and B to be injected into each other, the Spring IoC container +detects this circular reference at runtime, and throws a`BeanCurrentlyInCreationException`. + +One possible solution is to edit the source code of some classes to be configured by +setters rather than constructors. Alternatively, avoid constructor injection and use +setter injection only. In other words, although it is not recommended, you can configure +circular dependencies with setter injection. + +Unlike the typical case (with no circular dependencies), a circular dependency +between bean A and bean B forces one of the beans to be injected into the other prior to +being fully initialized itself (a classic chicken-and-egg scenario). + +You can generally trust Spring to do the right thing. It detects configuration problems, +such as references to non-existent beans and circular dependencies, at container +load-time. Spring sets properties and resolves dependencies as late as possible, when +the bean is actually created. This means that a Spring container that has loaded +correctly can later generate an exception when you request an object if there is a +problem creating that object or one of its dependencies — for example, the bean throws an +exception as a result of a missing or invalid property. This potentially delayed +visibility of some configuration issues is why `ApplicationContext` implementations by +default pre-instantiate singleton beans. At the cost of some upfront time and memory to +create these beans before they are actually needed, you discover configuration issues +when the `ApplicationContext` is created, not later. You can still override this default +behavior so that singleton beans initialize lazily, rather than being eagerly +pre-instantiated. + +If no circular dependencies exist, when one or more collaborating beans are being +injected into a dependent bean, each collaborating bean is totally configured prior +to being injected into the dependent bean. This means that, if bean A has a dependency on +bean B, the Spring IoC container completely configures bean B prior to invoking the +setter method on bean A. In other words, the bean is instantiated (if it is not a +pre-instantiated singleton), its dependencies are set, and the relevant lifecycle +methods (such as a [configured init method](#beans-factory-lifecycle-initializingbean)or the [InitializingBean callback method](#beans-factory-lifecycle-initializingbean)) +are invoked. + +##### Examples of Dependency Injection + +The following example uses XML-based configuration metadata for setter-based DI. A small +part of a Spring XML configuration file specifies some bean definitions as follows: + +``` + + + + + + + + + + + + + +``` + +The following example shows the corresponding `ExampleBean` class: + +Java + +``` +public class ExampleBean { + + private AnotherBean beanOne; + + private YetAnotherBean beanTwo; + + private int i; + + public void setBeanOne(AnotherBean beanOne) { + this.beanOne = beanOne; + } + + public void setBeanTwo(YetAnotherBean beanTwo) { + this.beanTwo = beanTwo; + } + + public void setIntegerProperty(int i) { + this.i = i; + } +} +``` + +Kotlin + +``` +class ExampleBean { + lateinit var beanOne: AnotherBean + lateinit var beanTwo: YetAnotherBean + var i: Int = 0 +} +``` + +In the preceding example, setters are declared to match against the properties specified +in the XML file. The following example uses constructor-based DI: + +``` + + + + + + + + + + + + + + +``` + +The following example shows the corresponding `ExampleBean` class: + +Java + +``` +public class ExampleBean { + + private AnotherBean beanOne; + + private YetAnotherBean beanTwo; + + private int i; + + public ExampleBean( + AnotherBean anotherBean, YetAnotherBean yetAnotherBean, int i) { + this.beanOne = anotherBean; + this.beanTwo = yetAnotherBean; + this.i = i; + } +} +``` + +Kotlin + +``` +class ExampleBean( + private val beanOne: AnotherBean, + private val beanTwo: YetAnotherBean, + private val i: Int) +``` + +The constructor arguments specified in the bean definition are used as arguments to +the constructor of the `ExampleBean`. + +Now consider a variant of this example, where, instead of using a constructor, Spring is +told to call a `static` factory method to return an instance of the object: + +``` + + + + + + + + +``` + +The following example shows the corresponding `ExampleBean` class: + +Java + +``` +public class ExampleBean { + + // a private constructor + private ExampleBean(...) { + ... + } + + // a static factory method; the arguments to this method can be + // considered the dependencies of the bean that is returned, + // regardless of how those arguments are actually used. + public static ExampleBean createInstance ( + AnotherBean anotherBean, YetAnotherBean yetAnotherBean, int i) { + + ExampleBean eb = new ExampleBean (...); + // some other operations... + return eb; + } +} +``` + +Kotlin + +``` +class ExampleBean private constructor() { + companion object { + // a static factory method; the arguments to this method can be + // considered the dependencies of the bean that is returned, + // regardless of how those arguments are actually used. + fun createInstance(anotherBean: AnotherBean, yetAnotherBean: YetAnotherBean, i: Int): ExampleBean { + val eb = ExampleBean (...) + // some other operations... + return eb + } + } +} +``` + +Arguments to the `static` factory method are supplied by `` elements, +exactly the same as if a constructor had actually been used. The type of the class being +returned by the factory method does not have to be of the same type as the class that +contains the `static` factory method (although, in this example, it is). An instance +(non-static) factory method can be used in an essentially identical fashion (aside +from the use of the `factory-bean` attribute instead of the `class` attribute), so we +do not discuss those details here. + +#### 1.4.2. Dependencies and Configuration in Detail + +As mentioned in the [previous section](#beans-factory-collaborators), you can define bean +properties and constructor arguments as references to other managed beans (collaborators) +or as values defined inline. Spring’s XML-based configuration metadata supports +sub-element types within its `` and `` elements for this +purpose. + +##### Straight Values (Primitives, Strings, and so on) + +The `value` attribute of the `` element specifies a property or constructor +argument as a human-readable string representation. Spring’s[conversion service](#core-convert-ConversionService-API) is used to convert these +values from a `String` to the actual type of the property or argument. +The following example shows various values being set: + +``` + + + + + + + +``` + +The following example uses the [p-namespace](#beans-p-namespace) for even more succinct +XML configuration: + +``` + + + + + +``` + +The preceding XML is more succinct. However, typos are discovered at runtime rather than +design time, unless you use an IDE (such as [IntelliJ +IDEA](https://www.jetbrains.com/idea/) or the [Spring Tools for Eclipse](https://spring.io/tools)) +that supports automatic property completion when you create bean definitions. Such IDE +assistance is highly recommended. + +You can also configure a `java.util.Properties` instance, as follows: + +``` + + + + + + jdbc.driver.className=com.mysql.jdbc.Driver + jdbc.url=jdbc:mysql://localhost:3306/mydb + + + +``` + +The Spring container converts the text inside the `` element into a`java.util.Properties` instance by using the JavaBeans `PropertyEditor` mechanism. This +is a nice shortcut, and is one of a few places where the Spring team do favor the use of +the nested `` element over the `value` attribute style. + +###### The `idref` element + +The `idref` element is simply an error-proof way to pass the `id` (a string value - not +a reference) of another bean in the container to a `` or ``element. The following example shows how to use it: + +``` + + + + + + + +``` + +The preceding bean definition snippet is exactly equivalent (at runtime) to the +following snippet: + +``` + + + + + +``` + +The first form is preferable to the second, because using the `idref` tag lets the +container validate at deployment time that the referenced, named bean actually +exists. In the second variation, no validation is performed on the value that is passed +to the `targetName` property of the `client` bean. Typos are only discovered (with most +likely fatal results) when the `client` bean is actually instantiated. If the `client`bean is a [prototype](#beans-factory-scopes) bean, this typo and the resulting exception +may only be discovered long after the container is deployed. + +| |The `local` attribute on the `idref` element is no longer supported in the 4.0 beans
XSD, since it does not provide value over a regular `bean` reference any more. Change
your existing `idref local` references to `idref bean` when upgrading to the 4.0 schema.| +|---|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +A common place (at least in versions earlier than Spring 2.0) where the `` element +brings value is in the configuration of [AOP interceptors](#aop-pfb-1) in a`ProxyFactoryBean` bean definition. Using `` elements when you specify the +interceptor names prevents you from misspelling an interceptor ID. + +##### References to Other Beans (Collaborators) + +The `ref` element is the final element inside a `` or ``definition element. Here, you set the value of the specified property of a bean to be a +reference to another bean (a collaborator) managed by the container. The referenced bean +is a dependency of the bean whose property is to be set, and it is initialized on demand +as needed before the property is set. (If the collaborator is a singleton bean, it may +already be initialized by the container.) All references are ultimately a reference to +another object. Scoping and validation depend on whether you specify the ID or name of the +other object through the `bean` or `parent` attribute. + +Specifying the target bean through the `bean` attribute of the `` tag is the most +general form and allows creation of a reference to any bean in the same container or +parent container, regardless of whether it is in the same XML file. The value of the`bean` attribute may be the same as the `id` attribute of the target bean or be the same +as one of the values in the `name` attribute of the target bean. The following example +shows how to use a `ref` element: + +``` + +``` + +Specifying the target bean through the `parent` attribute creates a reference to a bean +that is in a parent container of the current container. The value of the `parent`attribute may be the same as either the `id` attribute of the target bean or one of the +values in the `name` attribute of the target bean. The target bean must be in a +parent container of the current one. You should use this bean reference variant mainly +when you have a hierarchy of containers and you want to wrap an existing bean in a parent +container with a proxy that has the same name as the parent bean. The following pair of +listings shows how to use the `parent` attribute: + +``` + + + + +``` + +``` + + + class="org.springframework.aop.framework.ProxyFactoryBean"> + + + + + +``` + +| |The `local` attribute on the `ref` element is no longer supported in the 4.0 beans
XSD, since it does not provide value over a regular `bean` reference any more. Change
your existing `ref local` references to `ref bean` when upgrading to the 4.0 schema.| +|---|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +##### Inner Beans + +A `` element inside the `` or `` elements defines an +inner bean, as the following example shows: + +``` + + + + + + + + + +``` + +An inner bean definition does not require a defined ID or name. If specified, the container +does not use such a value as an identifier. The container also ignores the `scope` flag on +creation, because inner beans are always anonymous and are always created with the outer +bean. It is not possible to access inner beans independently or to inject them into +collaborating beans other than into the enclosing bean. + +As a corner case, it is possible to receive destruction callbacks from a custom scope — for example, for a request-scoped inner bean contained within a singleton bean. The creation +of the inner bean instance is tied to its containing bean, but destruction callbacks let it +participate in the request scope’s lifecycle. This is not a common scenario. Inner beans +typically simply share their containing bean’s scope. + +##### Collections + +The ``, ``, ``, and `` elements set the properties +and arguments of the Java `Collection` types `List`, `Set`, `Map`, and `Properties`, +respectively. The following example shows how to use them: + +``` + + + + + [email protected] + [email protected] + [email protected] + + + + + + a list element followed by a reference + + + + + + + + + + + + + + just some string + + + + +``` + +The value of a map key or value, or a set value, can also be any of the +following elements: + +``` +bean | ref | idref | list | set | map | props | value | null +``` + +###### Collection Merging + +The Spring container also supports merging collections. An application +developer can define a parent ``, ``, `` or `` element +and have child ``, ``, `` or `` elements inherit and +override values from the parent collection. That is, the child collection’s values are +the result of merging the elements of the parent and child collections, with the child’s +collection elements overriding values specified in the parent collection. + +This section on merging discusses the parent-child bean mechanism. Readers unfamiliar +with parent and child bean definitions may wish to read the[relevant section](#beans-child-bean-definitions) before continuing. + +The following example demonstrates collection merging: + +``` + + + + + [email protected] + [email protected] + + + + + + + + [email protected] + [email protected] + + + + +``` + +Notice the use of the `merge=true` attribute on the `` element of the`adminEmails` property of the `child` bean definition. When the `child` bean is resolved +and instantiated by the container, the resulting instance has an `adminEmails``Properties` collection that contains the result of merging the child’s`adminEmails` collection with the parent’s `adminEmails` collection. The following listing +shows the result: + +``` +[email protected] +[email protected] +[email protected] +``` + +The child `Properties` collection’s value set inherits all property elements from the +parent ``, and the child’s value for the `support` value overrides the value in +the parent collection. + +This merging behavior applies similarly to the ``, ``, and ``collection types. In the specific case of the `` element, the semantics +associated with the `List` collection type (that is, the notion of an `ordered`collection of values) is maintained. The parent’s values precede all of the child list’s +values. In the case of the `Map`, `Set`, and `Properties` collection types, no ordering +exists. Hence, no ordering semantics are in effect for the collection types that underlie +the associated `Map`, `Set`, and `Properties` implementation types that the container +uses internally. + +###### Limitations of Collection Merging + +You cannot merge different collection types (such as a `Map` and a `List`). If you +do attempt to do so, an appropriate `Exception` is thrown. The `merge` attribute must be +specified on the lower, inherited, child definition. Specifying the `merge` attribute on +a parent collection definition is redundant and does not result in the desired merging. + +###### Strongly-typed collection + +With the introduction of generic types in Java 5, you can use strongly typed collections. +That is, it is possible to declare a `Collection` type such that it can only contain +(for example) `String` elements. If you use Spring to dependency-inject a +strongly-typed `Collection` into a bean, you can take advantage of Spring’s +type-conversion support such that the elements of your strongly-typed `Collection`instances are converted to the appropriate type prior to being added to the `Collection`. +The following Java class and bean definition show how to do so: + +Java + +``` +public class SomeClass { + + private Map accounts; + + public void setAccounts(Map accounts) { + this.accounts = accounts; + } +} +``` + +Kotlin + +``` +class SomeClass { + lateinit var accounts: Map +} +``` + +``` + + + + + + + + + + + +``` + +When the `accounts` property of the `something` bean is prepared for injection, the generics +information about the element type of the strongly-typed `Map` is +available by reflection. Thus, Spring’s type conversion infrastructure recognizes the +various value elements as being of type `Float`, and the string values (`9.99`, `2.75`, and`3.99`) are converted into an actual `Float` type. + +##### Null and Empty String Values + +Spring treats empty arguments for properties and the like as empty `Strings`. The +following XML-based configuration metadata snippet sets the `email` property to the empty`String` value (""). + +``` + + + +``` + +The preceding example is equivalent to the following Java code: + +Java + +``` +exampleBean.setEmail(""); +``` + +Kotlin + +``` +exampleBean.email = "" +``` + +The `` element handles `null` values. The following listing shows an example: + +``` + + + + + +``` + +The preceding configuration is equivalent to the following Java code: + +Java + +``` +exampleBean.setEmail(null); +``` + +Kotlin + +``` +exampleBean.email = null +``` + +##### XML Shortcut with the p-namespace + +The p-namespace lets you use the `bean` element’s attributes (instead of nested`` elements) to describe your property values collaborating beans, or both. + +Spring supports extensible configuration formats [with namespaces](#xsd-schemas), +which are based on an XML Schema definition. The `beans` configuration format discussed in +this chapter is defined in an XML Schema document. However, the p-namespace is not defined +in an XSD file and exists only in the core of Spring. + +The following example shows two XML snippets (the first uses +standard XML format and the second uses the p-namespace) that resolve to the same result: + +``` + + + + + + + + +``` + +The example shows an attribute in the p-namespace called `email` in the bean definition. +This tells Spring to include a property declaration. As previously mentioned, the +p-namespace does not have a schema definition, so you can set the name of the attribute +to the property name. + +This next example includes two more bean definitions that both have a reference to +another bean: + +``` + + + + + + + + + + + + + +``` + +This example includes not only a property value using the p-namespace +but also uses a special format to declare property references. Whereas the first bean +definition uses `` to create a reference from bean`john` to bean `jane`, the second bean definition uses `p:spouse-ref="jane"` as an +attribute to do the exact same thing. In this case, `spouse` is the property name, +whereas the `-ref` part indicates that this is not a straight value but rather a +reference to another bean. + +| |The p-namespace is not as flexible as the standard XML format. For example, the format
for declaring property references clashes with properties that end in `Ref`, whereas the
standard XML format does not. We recommend that you choose your approach carefully and
communicate this to your team members to avoid producing XML documents that use all
three approaches at the same time.| +|---|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +##### XML Shortcut with the c-namespace + +Similar to the [XML Shortcut with the p-namespace](#beans-p-namespace), the c-namespace, introduced in Spring +3.1, allows inlined attributes for configuring the constructor arguments rather +then nested `constructor-arg` elements. + +The following example uses the `c:` namespace to do the same thing as the from[Constructor-based Dependency Injection](#beans-constructor-injection): + +``` + + + + + + + + + + + + + + + + +``` + +The `c:` namespace uses the same conventions as the `p:` one (a trailing `-ref` for +bean references) for setting the constructor arguments by their names. Similarly, +it needs to be declared in the XML file even though it is not defined in an XSD schema +(it exists inside the Spring core). + +For the rare cases where the constructor argument names are not available (usually if +the bytecode was compiled without debugging information), you can use fallback to the +argument indexes, as follows: + +``` + + +``` + +| |Due to the XML grammar, the index notation requires the presence of the leading `_`,
as XML attribute names cannot start with a number (even though some IDEs allow it).
A corresponding index notation is also available for `` elements but
not commonly used since the plain order of declaration is usually sufficient there.| +|---|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +In practice, the constructor resolution[mechanism](#beans-factory-ctor-arguments-resolution) is quite efficient in matching +arguments, so unless you really need to, we recommend using the name notation +throughout your configuration. + +##### Compound Property Names + +You can use compound or nested property names when you set bean properties, as long as +all components of the path except the final property name are not `null`. Consider the +following bean definition: + +``` + + + +``` + +The `something` bean has a `fred` property, which has a `bob` property, which has a `sammy`property, and that final `sammy` property is being set to a value of `123`. In order for +this to work, the `fred` property of `something` and the `bob` property of `fred` must not +be `null` after the bean is constructed. Otherwise, a `NullPointerException` is thrown. + +#### 1.4.3. Using `depends-on` + +If a bean is a dependency of another bean, that usually means that one bean is set as a +property of another. Typically you accomplish this with the [``element](#beans-ref-element) in XML-based configuration metadata. However, sometimes dependencies between +beans are less direct. An example is when a static initializer in a class needs to be +triggered, such as for database driver registration. The `depends-on` attribute can +explicitly force one or more beans to be initialized before the bean using this element +is initialized. The following example uses the `depends-on` attribute to express a +dependency on a single bean: + +``` + + +``` + +To express a dependency on multiple beans, supply a list of bean names as the value of +the `depends-on` attribute (commas, whitespace, and semicolons are valid +delimiters): + +``` + + + + + + +``` + +| |The `depends-on` attribute can specify both an initialization-time dependency and,
in the case of [singleton](#beans-factory-scopes-singleton) beans only, a corresponding
destruction-time dependency. Dependent beans that define a `depends-on` relationship
with a given bean are destroyed first, prior to the given bean itself being destroyed.
Thus, `depends-on` can also control shutdown order.| +|---|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +#### 1.4.4. Lazy-initialized Beans + +By default, `ApplicationContext` implementations eagerly create and configure all[singleton](#beans-factory-scopes-singleton) beans as part of the initialization +process. Generally, this pre-instantiation is desirable, because errors in the +configuration or surrounding environment are discovered immediately, as opposed to hours +or even days later. When this behavior is not desirable, you can prevent +pre-instantiation of a singleton bean by marking the bean definition as being +lazy-initialized. A lazy-initialized bean tells the IoC container to create a bean +instance when it is first requested, rather than at startup. + +In XML, this behavior is controlled by the `lazy-init` attribute on the ``element, as the following example shows: + +``` + + +``` + +When the preceding configuration is consumed by an `ApplicationContext`, the `lazy` bean +is not eagerly pre-instantiated when the `ApplicationContext` starts, +whereas the `not.lazy` bean is eagerly pre-instantiated. + +However, when a lazy-initialized bean is a dependency of a singleton bean that is +not lazy-initialized, the `ApplicationContext` creates the lazy-initialized bean at +startup, because it must satisfy the singleton’s dependencies. The lazy-initialized bean +is injected into a singleton bean elsewhere that is not lazy-initialized. + +You can also control lazy-initialization at the container level by using the`default-lazy-init` attribute on the `` element, as the following example shows: + +``` + + + +``` + +#### 1.4.5. Autowiring Collaborators + +The Spring container can autowire relationships between collaborating beans. You can +let Spring resolve collaborators (other beans) automatically for your bean by +inspecting the contents of the `ApplicationContext`. Autowiring has the following +advantages: + +* Autowiring can significantly reduce the need to specify properties or constructor + arguments. (Other mechanisms such as a bean template[discussed elsewhere in this chapter](#beans-child-bean-definitions) are also valuable + in this regard.) + +* Autowiring can update a configuration as your objects evolve. For example, if you need + to add a dependency to a class, that dependency can be satisfied automatically without + you needing to modify the configuration. Thus autowiring can be especially useful + during development, without negating the option of switching to explicit wiring when + the code base becomes more stable. + +When using XML-based configuration metadata (see [Dependency Injection](#beans-factory-collaborators)), you +can specify the autowire mode for a bean definition with the `autowire` attribute of the`` element. The autowiring functionality has four modes. You specify autowiring +per bean and can thus choose which ones to autowire. The following table describes the +four autowiring modes: + +| Mode | Explanation | +|-------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| `no` | (Default) No autowiring. Bean references must be defined by `ref` elements. Changing
the default setting is not recommended for larger deployments, because specifying
collaborators explicitly gives greater control and clarity. To some extent, it
documents the structure of a system. | +| `byName` |Autowiring by property name. Spring looks for a bean with the same name as the
property that needs to be autowired. For example, if a bean definition is set to
autowire by name and it contains a `master` property (that is, it has a`setMaster(..)` method), Spring looks for a bean definition named `master` and uses
it to set the property.| +| `byType` | Lets a property be autowired if exactly one bean of the property type exists in
the container. If more than one exists, a fatal exception is thrown, which indicates
that you may not use `byType` autowiring for that bean. If there are no matching
beans, nothing happens (the property is not set). | +|`constructor`| Analogous to `byType` but applies to constructor arguments. If there is not exactly
one bean of the constructor argument type in the container, a fatal error is raised. | + +With `byType` or `constructor` autowiring mode, you can wire arrays and +typed collections. In such cases, all autowire candidates within the container that +match the expected type are provided to satisfy the dependency. You can autowire +strongly-typed `Map` instances if the expected key type is `String`. An autowired `Map`instance’s values consist of all bean instances that match the expected type, and the`Map` instance’s keys contain the corresponding bean names. + +##### Limitations and Disadvantages of Autowiring + +Autowiring works best when it is used consistently across a project. If autowiring is +not used in general, it might be confusing to developers to use it to wire only one or +two bean definitions. + +Consider the limitations and disadvantages of autowiring: + +* Explicit dependencies in `property` and `constructor-arg` settings always override + autowiring. You cannot autowire simple properties such as primitives,`Strings`, and `Classes` (and arrays of such simple properties). This limitation is + by-design. + +* Autowiring is less exact than explicit wiring. Although, as noted in the earlier table, + Spring is careful to avoid guessing in case of ambiguity that might have unexpected + results. The relationships between your Spring-managed objects are no longer + documented explicitly. + +* Wiring information may not be available to tools that may generate documentation from + a Spring container. + +* Multiple bean definitions within the container may match the type specified by the + setter method or constructor argument to be autowired. For arrays, collections, or`Map` instances, this is not necessarily a problem. However, for dependencies that + expect a single value, this ambiguity is not arbitrarily resolved. If no unique bean + definition is available, an exception is thrown. + +In the latter scenario, you have several options: + +* Abandon autowiring in favor of explicit wiring. + +* Avoid autowiring for a bean definition by setting its `autowire-candidate` attributes + to `false`, as described in the [next section](#beans-factory-autowire-candidate). + +* Designate a single bean definition as the primary candidate by setting the`primary` attribute of its `` element to `true`. + +* Implement the more fine-grained control available with annotation-based configuration, + as described in [Annotation-based Container Configuration](#beans-annotation-config). + +##### Excluding a Bean from Autowiring + +On a per-bean basis, you can exclude a bean from autowiring. In Spring’s XML format, set +the `autowire-candidate` attribute of the `` element to `false`. The container +makes that specific bean definition unavailable to the autowiring infrastructure +(including annotation style configurations such as [`@Autowired`](#beans-autowired-annotation)). + +| |The `autowire-candidate` attribute is designed to only affect type-based autowiring.
It does not affect explicit references by name, which get resolved even if the
specified bean is not marked as an autowire candidate. As a consequence, autowiring
by name nevertheless injects a bean if the name matches.| +|---|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +You can also limit autowire candidates based on pattern-matching against bean names. The +top-level `` element accepts one or more patterns within its`default-autowire-candidates` attribute. For example, to limit autowire candidate status +to any bean whose name ends with `Repository`, provide a value of `*Repository`. To +provide multiple patterns, define them in a comma-separated list. An explicit value of`true` or `false` for a bean definition’s `autowire-candidate` attribute always takes +precedence. For such beans, the pattern matching rules do not apply. + +These techniques are useful for beans that you never want to be injected into other +beans by autowiring. It does not mean that an excluded bean cannot itself be configured by +using autowiring. Rather, the bean itself is not a candidate for autowiring other beans. + +#### 1.4.6. Method Injection + +In most application scenarios, most beans in the container are[singletons](#beans-factory-scopes-singleton). When a singleton bean needs to +collaborate with another singleton bean or a non-singleton bean needs to collaborate +with another non-singleton bean, you typically handle the dependency by defining one +bean as a property of the other. A problem arises when the bean lifecycles are +different. Suppose singleton bean A needs to use non-singleton (prototype) bean B, +perhaps on each method invocation on A. The container creates the singleton bean A only +once, and thus only gets one opportunity to set the properties. The container cannot +provide bean A with a new instance of bean B every time one is needed. + +A solution is to forego some inversion of control. You can [make +bean A aware of the container](#beans-factory-aware) by implementing the `ApplicationContextAware` interface, +and by [making a `getBean("B")` call to the container](#beans-factory-client) ask for (a +typically new) bean B instance every time bean A needs it. The following example +shows this approach: + +Java + +``` +// a class that uses a stateful Command-style class to perform some processing +package fiona.apple; + +// Spring-API imports +import org.springframework.beans.BeansException; +import org.springframework.context.ApplicationContext; +import org.springframework.context.ApplicationContextAware; + +public class CommandManager implements ApplicationContextAware { + + private ApplicationContext applicationContext; + + public Object process(Map commandState) { + // grab a new instance of the appropriate Command + Command command = createCommand(); + // set the state on the (hopefully brand new) Command instance + command.setState(commandState); + return command.execute(); + } + + protected Command createCommand() { + // notice the Spring API dependency! + return this.applicationContext.getBean("command", Command.class); + } + + public void setApplicationContext( + ApplicationContext applicationContext) throws BeansException { + this.applicationContext = applicationContext; + } +} +``` + +Kotlin + +``` +// a class that uses a stateful Command-style class to perform some processing +package fiona.apple + +// Spring-API imports +import org.springframework.context.ApplicationContext +import org.springframework.context.ApplicationContextAware + +class CommandManager : ApplicationContextAware { + + private lateinit var applicationContext: ApplicationContext + + fun process(commandState: Map<*, *>): Any { + // grab a new instance of the appropriate Command + val command = createCommand() + // set the state on the (hopefully brand new) Command instance + command.state = commandState + return command.execute() + } + + // notice the Spring API dependency! + protected fun createCommand() = + applicationContext.getBean("command", Command::class.java) + + override fun setApplicationContext(applicationContext: ApplicationContext) { + this.applicationContext = applicationContext + } +} +``` + +The preceding is not desirable, because the business code is aware of and coupled to the +Spring Framework. Method Injection, a somewhat advanced feature of the Spring IoC +container, lets you handle this use case cleanly. + +You can read more about the motivation for Method Injection in[this blog entry](https://spring.io/blog/2004/08/06/method-injection/). + +##### Lookup Method Injection + +Lookup method injection is the ability of the container to override methods on +container-managed beans and return the lookup result for another named bean in the +container. The lookup typically involves a prototype bean, as in the scenario described +in [the preceding section](#beans-factory-method-injection). The Spring Framework +implements this method injection by using bytecode generation from the CGLIB library to +dynamically generate a subclass that overrides the method. + +| |* For this dynamic subclassing to work, the class that the Spring bean container
subclasses cannot be `final`, and the method to be overridden cannot be `final`, either.

* Unit-testing a class that has an `abstract` method requires you to subclass the class
yourself and to supply a stub implementation of the `abstract` method.

* Concrete methods are also necessary for component scanning, which requires concrete
classes to pick up.

* A further key limitation is that lookup methods do not work with factory methods and
in particular not with `@Bean` methods in configuration classes, since, in that case,
the container is not in charge of creating the instance and therefore cannot create
a runtime-generated subclass on the fly.| +|---|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +In the case of the `CommandManager` class in the previous code snippet, the +Spring container dynamically overrides the implementation of the `createCommand()`method. The `CommandManager` class does not have any Spring dependencies, as +the reworked example shows: + +Java + +``` +package fiona.apple; + +// no more Spring imports! + +public abstract class CommandManager { + + public Object process(Object commandState) { + // grab a new instance of the appropriate Command interface + Command command = createCommand(); + // set the state on the (hopefully brand new) Command instance + command.setState(commandState); + return command.execute(); + } + + // okay... but where is the implementation of this method? + protected abstract Command createCommand(); +} +``` + +Kotlin + +``` +package fiona.apple + +// no more Spring imports! + +abstract class CommandManager { + + fun process(commandState: Any): Any { + // grab a new instance of the appropriate Command interface + val command = createCommand() + // set the state on the (hopefully brand new) Command instance + command.state = commandState + return command.execute() + } + + // okay... but where is the implementation of this method? + protected abstract fun createCommand(): Command +} +``` + +In the client class that contains the method to be injected (the `CommandManager` in this +case), the method to be injected requires a signature of the following form: + +``` + [abstract] theMethodName(no-arguments); +``` + +If the method is `abstract`, the dynamically-generated subclass implements the method. +Otherwise, the dynamically-generated subclass overrides the concrete method defined in +the original class. Consider the following example: + +``` + + + + + + + + + +``` + +The bean identified as `commandManager` calls its own `createCommand()` method +whenever it needs a new instance of the `myCommand` bean. You must be careful to deploy +the `myCommand` bean as a prototype if that is actually what is needed. If it is +a [singleton](#beans-factory-scopes-singleton), the same instance of the `myCommand`bean is returned each time. + +Alternatively, within the annotation-based component model, you can declare a lookup +method through the `@Lookup` annotation, as the following example shows: + +Java + +``` +public abstract class CommandManager { + + public Object process(Object commandState) { + Command command = createCommand(); + command.setState(commandState); + return command.execute(); + } + + @Lookup("myCommand") + protected abstract Command createCommand(); +} +``` + +Kotlin + +``` +abstract class CommandManager { + + fun process(commandState: Any): Any { + val command = createCommand() + command.state = commandState + return command.execute() + } + + @Lookup("myCommand") + protected abstract fun createCommand(): Command +} +``` + +Or, more idiomatically, you can rely on the target bean getting resolved against the +declared return type of the lookup method: + +Java + +``` +public abstract class CommandManager { + + public Object process(Object commandState) { + Command command = createCommand(); + command.setState(commandState); + return command.execute(); + } + + @Lookup + protected abstract Command createCommand(); +} +``` + +Kotlin + +``` +abstract class CommandManager { + + fun process(commandState: Any): Any { + val command = createCommand() + command.state = commandState + return command.execute() + } + + @Lookup + protected abstract fun createCommand(): Command +} +``` + +Note that you should typically declare such annotated lookup methods with a concrete +stub implementation, in order for them to be compatible with Spring’s component +scanning rules where abstract classes get ignored by default. This limitation does not +apply to explicitly registered or explicitly imported bean classes. + +| |Another way of accessing differently scoped target beans is an `ObjectFactory`/`Provider` injection point. See [Scoped Beans as Dependencies](#beans-factory-scopes-other-injection).

You may also find the `ServiceLocatorFactoryBean` (in the`org.springframework.beans.factory.config` package) to be useful.| +|---|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +##### Arbitrary Method Replacement + +A less useful form of method injection than lookup method injection is the ability to +replace arbitrary methods in a managed bean with another method implementation. You +can safely skip the rest of this section until you actually need this functionality. + +With XML-based configuration metadata, you can use the `replaced-method` element to +replace an existing method implementation with another, for a deployed bean. Consider +the following class, which has a method called `computeValue` that we want to override: + +Java + +``` +public class MyValueCalculator { + + public String computeValue(String input) { + // some real code... + } + + // some other methods... +} +``` + +Kotlin + +``` +class MyValueCalculator { + + fun computeValue(input: String): String { + // some real code... + } + + // some other methods... +} +``` + +A class that implements the `org.springframework.beans.factory.support.MethodReplacer`interface provides the new method definition, as the following example shows: + +Java + +``` +/** + * meant to be used to override the existing computeValue(String) + * implementation in MyValueCalculator + */ +public class ReplacementComputeValue implements MethodReplacer { + + public Object reimplement(Object o, Method m, Object[] args) throws Throwable { + // get the input value, work with it, and return a computed result + String input = (String) args[0]; + ... + return ...; + } +} +``` + +Kotlin + +``` +/** + * meant to be used to override the existing computeValue(String) + * implementation in MyValueCalculator + */ +class ReplacementComputeValue : MethodReplacer { + + override fun reimplement(obj: Any, method: Method, args: Array): Any { + // get the input value, work with it, and return a computed result + val input = args[0] as String; + ... + return ...; + } +} +``` + +The bean definition to deploy the original class and specify the method override would +resemble the following example: + +``` + + + + String + + + + +``` + +You can use one or more `` elements within the ``element to indicate the method signature of the method being overridden. The signature +for the arguments is necessary only if the method is overloaded and multiple variants +exist within the class. For convenience, the type string for an argument may be a +substring of the fully qualified type name. For example, the following all match`java.lang.String`: + +``` +java.lang.String +String +Str +``` + +Because the number of arguments is often enough to distinguish between each possible +choice, this shortcut can save a lot of typing, by letting you type only the +shortest string that matches an argument type. + +### 1.5. Bean Scopes + +When you create a bean definition, you create a recipe for creating actual instances +of the class defined by that bean definition. The idea that a bean definition is a +recipe is important, because it means that, as with a class, you can create many object +instances from a single recipe. + +You can control not only the various dependencies and configuration values that are to +be plugged into an object that is created from a particular bean definition but also control +the scope of the objects created from a particular bean definition. This approach is +powerful and flexible, because you can choose the scope of the objects you create +through configuration instead of having to bake in the scope of an object at the Java +class level. Beans can be defined to be deployed in one of a number of scopes. +The Spring Framework supports six scopes, four of which are available only if +you use a web-aware `ApplicationContext`. You can also create[a custom scope.](#beans-factory-scopes-custom) + +The following table describes the supported scopes: + +| Scope | Description | +|-----------------------------------------------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| [singleton](#beans-factory-scopes-singleton) | (Default) Scopes a single bean definition to a single object instance for each Spring IoC
container. | +| [prototype](#beans-factory-scopes-prototype) | Scopes a single bean definition to any number of object instances. | +| [request](#beans-factory-scopes-request) |Scopes a single bean definition to the lifecycle of a single HTTP request. That is,
each HTTP request has its own instance of a bean created off the back of a single bean
definition. Only valid in the context of a web-aware Spring `ApplicationContext`.| +| [session](#beans-factory-scopes-session) | Scopes a single bean definition to the lifecycle of an HTTP `Session`. Only valid in
the context of a web-aware Spring `ApplicationContext`. | +| [application](#beans-factory-scopes-application) | Scopes a single bean definition to the lifecycle of a `ServletContext`. Only valid in
the context of a web-aware Spring `ApplicationContext`. | +|[websocket](web.html#websocket-stomp-websocket-scope)| Scopes a single bean definition to the lifecycle of a `WebSocket`. Only valid in
the context of a web-aware Spring `ApplicationContext`. | + +| |As of Spring 3.0, a thread scope is available but is not registered by default. For
more information, see the documentation for[`SimpleThreadScope`](https://docs.spring.io/spring-framework/docs/5.3.16/javadoc-api/org/springframework/context/support/SimpleThreadScope.html).
For instructions on how to register this or any other custom scope, see[Using a Custom Scope](#beans-factory-scopes-custom-using).| +|---|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +#### 1.5.1. The Singleton Scope + +Only one shared instance of a singleton bean is managed, and all requests for beans +with an ID or IDs that match that bean definition result in that one specific bean +instance being returned by the Spring container. + +To put it another way, when you define a bean definition and it is scoped as a +singleton, the Spring IoC container creates exactly one instance of the object +defined by that bean definition. This single instance is stored in a cache of such +singleton beans, and all subsequent requests and references for that named bean +return the cached object. The following image shows how the singleton scope works: + +![singleton](images/singleton.png) + +Spring’s concept of a singleton bean differs from the singleton pattern as defined in +the Gang of Four (GoF) patterns book. The GoF singleton hard-codes the scope of an +object such that one and only one instance of a particular class is created per +ClassLoader. The scope of the Spring singleton is best described as being per-container +and per-bean. This means that, if you define one bean for a particular class in a +single Spring container, the Spring container creates one and only one instance +of the class defined by that bean definition. The singleton scope is the default scope +in Spring. To define a bean as a singleton in XML, you can define a bean as shown in the +following example: + +``` + + + + +``` + +#### 1.5.2. The Prototype Scope + +The non-singleton prototype scope of bean deployment results in the creation of a new +bean instance every time a request for that specific bean is made. That is, the bean +is injected into another bean or you request it through a `getBean()` method call on the +container. As a rule, you should use the prototype scope for all stateful beans and the +singleton scope for stateless beans. + +The following diagram illustrates the Spring prototype scope: + +![prototype](images/prototype.png) + +(A data access object +(DAO) is not typically configured as a prototype, because a typical DAO does not hold +any conversational state. It was easier for us to reuse the core of the +singleton diagram.) + +The following example defines a bean as a prototype in XML: + +``` + +``` + +In contrast to the other scopes, Spring does not manage the complete lifecycle of a +prototype bean. The container instantiates, configures, and otherwise assembles a +prototype object and hands it to the client, with no further record of that prototype +instance. Thus, although initialization lifecycle callback methods are called on all +objects regardless of scope, in the case of prototypes, configured destruction +lifecycle callbacks are not called. The client code must clean up prototype-scoped +objects and release expensive resources that the prototype beans hold. To get +the Spring container to release resources held by prototype-scoped beans, try using a +custom [bean post-processor](#beans-factory-extension-bpp), which holds a reference to +beans that need to be cleaned up. + +In some respects, the Spring container’s role in regard to a prototype-scoped bean is a +replacement for the Java `new` operator. All lifecycle management past that point must +be handled by the client. (For details on the lifecycle of a bean in the Spring +container, see [Lifecycle Callbacks](#beans-factory-lifecycle).) + +#### 1.5.3. Singleton Beans with Prototype-bean Dependencies + +When you use singleton-scoped beans with dependencies on prototype beans, be aware that +dependencies are resolved at instantiation time. Thus, if you dependency-inject a +prototype-scoped bean into a singleton-scoped bean, a new prototype bean is instantiated +and then dependency-injected into the singleton bean. The prototype instance is the sole +instance that is ever supplied to the singleton-scoped bean. + +However, suppose you want the singleton-scoped bean to acquire a new instance of the +prototype-scoped bean repeatedly at runtime. You cannot dependency-inject a +prototype-scoped bean into your singleton bean, because that injection occurs only +once, when the Spring container instantiates the singleton bean and resolves +and injects its dependencies. If you need a new instance of a prototype bean at +runtime more than once, see [Method Injection](#beans-factory-method-injection). + +#### 1.5.4. Request, Session, Application, and WebSocket Scopes + +The `request`, `session`, `application`, and `websocket` scopes are available only +if you use a web-aware Spring `ApplicationContext` implementation (such as`XmlWebApplicationContext`). If you use these scopes with regular Spring IoC containers, +such as the `ClassPathXmlApplicationContext`, an `IllegalStateException` that complains +about an unknown bean scope is thrown. + +##### Initial Web Configuration + +To support the scoping of beans at the `request`, `session`, `application`, and`websocket` levels (web-scoped beans), some minor initial configuration is +required before you define your beans. (This initial setup is not required +for the standard scopes: `singleton` and `prototype`.) + +How you accomplish this initial setup depends on your particular Servlet environment. + +If you access scoped beans within Spring Web MVC, in effect, within a request that is +processed by the Spring `DispatcherServlet`, no special setup is necessary.`DispatcherServlet` already exposes all relevant state. + +If you use a Servlet 2.5 web container, with requests processed outside of Spring’s`DispatcherServlet` (for example, when using JSF or Struts), you need to register the`org.springframework.web.context.request.RequestContextListener` `ServletRequestListener`. +For Servlet 3.0+, this can be done programmatically by using the `WebApplicationInitializer`interface. Alternatively, or for older containers, add the following declaration to +your web application’s `web.xml` file: + +``` + + ... + + + org.springframework.web.context.request.RequestContextListener + + + ... + +``` + +Alternatively, if there are issues with your listener setup, consider using Spring’s`RequestContextFilter`. The filter mapping depends on the surrounding web +application configuration, so you have to change it as appropriate. The following listing +shows the filter part of a web application: + +``` + + ... + + requestContextFilter + org.springframework.web.filter.RequestContextFilter + + + requestContextFilter + /* + + ... + +``` + +`DispatcherServlet`, `RequestContextListener`, and `RequestContextFilter` all do exactly +the same thing, namely bind the HTTP request object to the `Thread` that is servicing +that request. This makes beans that are request- and session-scoped available further +down the call chain. + +##### Request scope + +Consider the following XML configuration for a bean definition: + +``` + +``` + +The Spring container creates a new instance of the `LoginAction` bean by using the`loginAction` bean definition for each and every HTTP request. That is, the`loginAction` bean is scoped at the HTTP request level. You can change the internal +state of the instance that is created as much as you want, because other instances +created from the same `loginAction` bean definition do not see these changes in state. +They are particular to an individual request. When the request completes processing, the +bean that is scoped to the request is discarded. + +When using annotation-driven components or Java configuration, the `@RequestScope` annotation +can be used to assign a component to the `request` scope. The following example shows how +to do so: + +Java + +``` +@RequestScope +@Component +public class LoginAction { + // ... +} +``` + +Kotlin + +``` +@RequestScope +@Component +class LoginAction { + // ... +} +``` + +##### Session Scope + +Consider the following XML configuration for a bean definition: + +``` + +``` + +The Spring container creates a new instance of the `UserPreferences` bean by using the`userPreferences` bean definition for the lifetime of a single HTTP `Session`. In other +words, the `userPreferences` bean is effectively scoped at the HTTP `Session` level. As +with request-scoped beans, you can change the internal state of the instance that is +created as much as you want, knowing that other HTTP `Session` instances that are also +using instances created from the same `userPreferences` bean definition do not see these +changes in state, because they are particular to an individual HTTP `Session`. When the +HTTP `Session` is eventually discarded, the bean that is scoped to that particular HTTP`Session` is also discarded. + +When using annotation-driven components or Java configuration, you can use the`@SessionScope` annotation to assign a component to the `session` scope. + +Java + +``` +@SessionScope +@Component +public class UserPreferences { + // ... +} +``` + +Kotlin + +``` +@SessionScope +@Component +class UserPreferences { + // ... +} +``` + +##### Application Scope + +Consider the following XML configuration for a bean definition: + +``` + +``` + +The Spring container creates a new instance of the `AppPreferences` bean by using the`appPreferences` bean definition once for the entire web application. That is, the`appPreferences` bean is scoped at the `ServletContext` level and stored as a regular`ServletContext` attribute. This is somewhat similar to a Spring singleton bean but +differs in two important ways: It is a singleton per `ServletContext`, not per Spring`ApplicationContext` (for which there may be several in any given web application), +and it is actually exposed and therefore visible as a `ServletContext` attribute. + +When using annotation-driven components or Java configuration, you can use the`@ApplicationScope` annotation to assign a component to the `application` scope. The +following example shows how to do so: + +Java + +``` +@ApplicationScope +@Component +public class AppPreferences { + // ... +} +``` + +Kotlin + +``` +@ApplicationScope +@Component +class AppPreferences { + // ... +} +``` + +##### WebSocket Scope + +WebSocket scope is associated with the lifecycle of a WebSocket session and applies to +STOMP over WebSocket applications, see[WebSocket scope](web.html#websocket-stomp-websocket-scope) for more details. + +##### Scoped Beans as Dependencies + +The Spring IoC container manages not only the instantiation of your objects (beans), +but also the wiring up of collaborators (or dependencies). If you want to inject (for +example) an HTTP request-scoped bean into another bean of a longer-lived scope, you may +choose to inject an AOP proxy in place of the scoped bean. That is, you need to inject +a proxy object that exposes the same public interface as the scoped object but that can +also retrieve the real target object from the relevant scope (such as an HTTP request) +and delegate method calls onto the real object. + +| |You may also use `` between beans that are scoped as `singleton`,
with the reference then going through an intermediate proxy that is serializable
and therefore able to re-obtain the target singleton bean on deserialization.

When declaring `` against a bean of scope `prototype`, every method
call on the shared proxy leads to the creation of a new target instance to which the
call is then being forwarded.

Also, scoped proxies are not the only way to access beans from shorter scopes in a
lifecycle-safe fashion. You may also declare your injection point (that is, the
constructor or setter argument or autowired field) as `ObjectFactory`,
allowing for a `getObject()` call to retrieve the current instance on demand every
time it is needed — without holding on to the instance or storing it separately.

As an extended variant, you may declare `ObjectProvider` which delivers
several additional access variants, including `getIfAvailable` and `getIfUnique`.

The JSR-330 variant of this is called `Provider` and is used with a `Provider`declaration and a corresponding `get()` call for every retrieval attempt.
See [here](#beans-standard-annotations) for more details on JSR-330 overall.| +|---|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +The configuration in the following example is only one line, but it is important to +understand the “why” as well as the “how” behind it: + +``` + + + + + + + (1) + + + + + + + + +``` + +|**1**|The line that defines the proxy.| +|-----|--------------------------------| + +To create such a proxy, you insert a child `` element into a scoped +bean definition (see [Choosing the Type of Proxy to Create](#beans-factory-scopes-other-injection-proxies) and[XML Schema-based configuration](#xsd-schemas)). +Why do definitions of beans scoped at the `request`, `session` and custom-scope +levels require the `` element? +Consider the following singleton bean definition and contrast it with +what you need to define for the aforementioned scopes (note that the following`userPreferences` bean definition as it stands is incomplete): + +``` + + + + + +``` + +In the preceding example, the singleton bean (`userManager`) is injected with a reference +to the HTTP `Session`-scoped bean (`userPreferences`). The salient point here is that the`userManager` bean is a singleton: it is instantiated exactly once per +container, and its dependencies (in this case only one, the `userPreferences` bean) are +also injected only once. This means that the `userManager` bean operates only on the +exact same `userPreferences` object (that is, the one with which it was originally injected). + +This is not the behavior you want when injecting a shorter-lived scoped bean into a +longer-lived scoped bean (for example, injecting an HTTP `Session`-scoped collaborating +bean as a dependency into singleton bean). Rather, you need a single `userManager`object, and, for the lifetime of an HTTP `Session`, you need a `userPreferences` object +that is specific to the HTTP `Session`. Thus, the container creates an object that +exposes the exact same public interface as the `UserPreferences` class (ideally an +object that is a `UserPreferences` instance), which can fetch the real`UserPreferences` object from the scoping mechanism (HTTP request, `Session`, and so +forth). The container injects this proxy object into the `userManager` bean, which is +unaware that this `UserPreferences` reference is a proxy. In this example, when a`UserManager` instance invokes a method on the dependency-injected `UserPreferences`object, it is actually invoking a method on the proxy. The proxy then fetches the real`UserPreferences` object from (in this case) the HTTP `Session` and delegates the +method invocation onto the retrieved real `UserPreferences` object. + +Thus, you need the following (correct and complete) configuration when injecting`request-` and `session-scoped` beans into collaborating objects, as the following example +shows: + +``` + + + + + + + +``` + +###### Choosing the Type of Proxy to Create + +By default, when the Spring container creates a proxy for a bean that is marked up with +the `` element, a CGLIB-based class proxy is created. + +| |CGLIB proxies intercept only public method calls! Do not call non-public methods
on such a proxy. They are not delegated to the actual scoped target object.| +|---|----------------------------------------------------------------------------------------------------------------------------------------------------------------| + +Alternatively, you can configure the Spring container to create standard JDK +interface-based proxies for such scoped beans, by specifying `false` for the value of +the `proxy-target-class` attribute of the `` element. Using JDK +interface-based proxies means that you do not need additional libraries in your +application classpath to affect such proxying. However, it also means that the class of +the scoped bean must implement at least one interface and that all collaborators +into which the scoped bean is injected must reference the bean through one of its +interfaces. The following example shows a proxy based on an interface: + +``` + + + + + + + + +``` + +For more detailed information about choosing class-based or interface-based proxying, +see [Proxying Mechanisms](#aop-proxying). + +#### 1.5.5. Custom Scopes + +The bean scoping mechanism is extensible. You can define your own +scopes or even redefine existing scopes, although the latter is considered bad practice +and you cannot override the built-in `singleton` and `prototype` scopes. + +##### Creating a Custom Scope + +To integrate your custom scopes into the Spring container, you need to implement the`org.springframework.beans.factory.config.Scope` interface, which is described in this +section. For an idea of how to implement your own scopes, see the `Scope`implementations that are supplied with the Spring Framework itself and the[`Scope`](https://docs.spring.io/spring-framework/docs/5.3.16/javadoc-api/org/springframework/beans/factory/config/Scope.html) javadoc, +which explains the methods you need to implement in more detail. + +The `Scope` interface has four methods to get objects from the scope, remove them from +the scope, and let them be destroyed. + +The session scope implementation, for example, returns the session-scoped bean (if it +does not exist, the method returns a new instance of the bean, after having bound it to +the session for future reference). The following method returns the object from the +underlying scope: + +Java + +``` +Object get(String name, ObjectFactory objectFactory) +``` + +Kotlin + +``` +fun get(name: String, objectFactory: ObjectFactory<*>): Any +``` + +The session scope implementation, for example, removes the session-scoped bean from the +underlying session. The object should be returned, but you can return `null` if the +object with the specified name is not found. The following method removes the object from +the underlying scope: + +Java + +``` +Object remove(String name) +``` + +Kotlin + +``` +fun remove(name: String): Any +``` + +The following method registers a callback that the scope should invoke when it is +destroyed or when the specified object in the scope is destroyed: + +Java + +``` +void registerDestructionCallback(String name, Runnable destructionCallback) +``` + +Kotlin + +``` +fun registerDestructionCallback(name: String, destructionCallback: Runnable) +``` + +See the [javadoc](https://docs.spring.io/spring-framework/docs/5.3.16/javadoc-api/org/springframework/beans/factory/config/Scope.html#registerDestructionCallback)or a Spring scope implementation for more information on destruction callbacks. + +The following method obtains the conversation identifier for the underlying scope: + +Java + +``` +String getConversationId() +``` + +Kotlin + +``` +fun getConversationId(): String +``` + +This identifier is different for each scope. For a session scoped implementation, this +identifier can be the session identifier. + +##### Using a Custom Scope + +After you write and test one or more custom `Scope` implementations, you need to make +the Spring container aware of your new scopes. The following method is the central +method to register a new `Scope` with the Spring container: + +Java + +``` +void registerScope(String scopeName, Scope scope); +``` + +Kotlin + +``` +fun registerScope(scopeName: String, scope: Scope) +``` + +This method is declared on the `ConfigurableBeanFactory` interface, which is available +through the `BeanFactory` property on most of the concrete `ApplicationContext`implementations that ship with Spring. + +The first argument to the `registerScope(..)` method is the unique name associated with +a scope. Examples of such names in the Spring container itself are `singleton` and`prototype`. The second argument to the `registerScope(..)` method is an actual instance +of the custom `Scope` implementation that you wish to register and use. + +Suppose that you write your custom `Scope` implementation, and then register it as shown +in the next example. + +| |The next example uses `SimpleThreadScope`, which is included with Spring but is not
registered by default. The instructions would be the same for your own custom `Scope`implementations.| +|---|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +Java + +``` +Scope threadScope = new SimpleThreadScope(); +beanFactory.registerScope("thread", threadScope); +``` + +Kotlin + +``` +val threadScope = SimpleThreadScope() +beanFactory.registerScope("thread", threadScope) +``` + +You can then create bean definitions that adhere to the scoping rules of your custom`Scope`, as follows: + +``` + +``` + +With a custom `Scope` implementation, you are not limited to programmatic registration +of the scope. You can also do the `Scope` registration declaratively, by using the`CustomScopeConfigurer` class, as the following example shows: + +``` + + + + + + + + + + + + + + + + + + + + + + + +``` + +| |When you place `` within a `` declaration for a`FactoryBean` implementation, it is the factory bean itself that is scoped, not the object
returned from `getObject()`.| +|---|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +### 1.6. Customizing the Nature of a Bean + +The Spring Framework provides a number of interfaces you can use to customize the nature +of a bean. This section groups them as follows: + +* [Lifecycle Callbacks](#beans-factory-lifecycle) + +* [`ApplicationContextAware` and `BeanNameAware`](#beans-factory-aware) + +* [Other `Aware` Interfaces](#aware-list) + +#### 1.6.1. Lifecycle Callbacks + +To interact with the container’s management of the bean lifecycle, you can implement +the Spring `InitializingBean` and `DisposableBean` interfaces. The container calls`afterPropertiesSet()` for the former and `destroy()` for the latter to let the bean +perform certain actions upon initialization and destruction of your beans. + +| |The JSR-250 `@PostConstruct` and `@PreDestroy` annotations are generally considered best
practice for receiving lifecycle callbacks in a modern Spring application. Using these
annotations means that your beans are not coupled to Spring-specific interfaces.
For details, see [Using `@PostConstruct` and `@PreDestroy`](#beans-postconstruct-and-predestroy-annotations).

If you do not want to use the JSR-250 annotations but you still want to remove
coupling, consider `init-method` and `destroy-method` bean definition metadata.| +|---|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +Internally, the Spring Framework uses `BeanPostProcessor` implementations to process any +callback interfaces it can find and call the appropriate methods. If you need custom +features or other lifecycle behavior Spring does not by default offer, you can +implement a `BeanPostProcessor` yourself. For more information, see[Container Extension Points](#beans-factory-extension). + +In addition to the initialization and destruction callbacks, Spring-managed objects may +also implement the `Lifecycle` interface so that those objects can participate in the +startup and shutdown process, as driven by the container’s own lifecycle. + +The lifecycle callback interfaces are described in this section. + +##### Initialization Callbacks + +The `org.springframework.beans.factory.InitializingBean` interface lets a bean +perform initialization work after the container has set all necessary properties on the +bean. The `InitializingBean` interface specifies a single method: + +``` +void afterPropertiesSet() throws Exception; +``` + +We recommend that you do not use the `InitializingBean` interface, because it +unnecessarily couples the code to Spring. Alternatively, we suggest using +the [`@PostConstruct`](#beans-postconstruct-and-predestroy-annotations) annotation or +specifying a POJO initialization method. In the case of XML-based configuration metadata, +you can use the `init-method` attribute to specify the name of the method that has a void +no-argument signature. With Java configuration, you can use the `initMethod` attribute of`@Bean`. See [Receiving Lifecycle Callbacks](#beans-java-lifecycle-callbacks). Consider the following example: + +``` + +``` + +Java + +``` +public class ExampleBean { + + public void init() { + // do some initialization work + } +} +``` + +Kotlin + +``` +class ExampleBean { + + fun init() { + // do some initialization work + } +} +``` + +The preceding example has almost exactly the same effect as the following example +(which consists of two listings): + +``` + +``` + +Java + +``` +public class AnotherExampleBean implements InitializingBean { + + @Override + public void afterPropertiesSet() { + // do some initialization work + } +} +``` + +Kotlin + +``` +class AnotherExampleBean : InitializingBean { + + override fun afterPropertiesSet() { + // do some initialization work + } +} +``` + +However, the first of the two preceding examples does not couple the code to Spring. + +##### Destruction Callbacks + +Implementing the `org.springframework.beans.factory.DisposableBean` interface lets a +bean get a callback when the container that contains it is destroyed. The`DisposableBean` interface specifies a single method: + +``` +void destroy() throws Exception; +``` + +We recommend that you do not use the `DisposableBean` callback interface, because it +unnecessarily couples the code to Spring. Alternatively, we suggest using +the [`@PreDestroy`](#beans-postconstruct-and-predestroy-annotations) annotation or +specifying a generic method that is supported by bean definitions. With XML-based +configuration metadata, you can use the `destroy-method` attribute on the ``. +With Java configuration, you can use the `destroyMethod` attribute of `@Bean`. See[Receiving Lifecycle Callbacks](#beans-java-lifecycle-callbacks). Consider the following definition: + +``` + +``` + +Java + +``` +public class ExampleBean { + + public void cleanup() { + // do some destruction work (like releasing pooled connections) + } +} +``` + +Kotlin + +``` +class ExampleBean { + + fun cleanup() { + // do some destruction work (like releasing pooled connections) + } +} +``` + +The preceding definition has almost exactly the same effect as the following definition: + +``` + +``` + +Java + +``` +public class AnotherExampleBean implements DisposableBean { + + @Override + public void destroy() { + // do some destruction work (like releasing pooled connections) + } +} +``` + +Kotlin + +``` +class AnotherExampleBean : DisposableBean { + + override fun destroy() { + // do some destruction work (like releasing pooled connections) + } +} +``` + +However, the first of the two preceding definitions does not couple the code to Spring. + +| |You can assign the `destroy-method` attribute of a `` element a special`(inferred)` value, which instructs Spring to automatically detect a public `close` or`shutdown` method on the specific bean class. (Any class that implements`java.lang.AutoCloseable` or `java.io.Closeable` would therefore match.) You can also set
this special `(inferred)` value on the `default-destroy-method` attribute of a`` element to apply this behavior to an entire set of beans (see[Default Initialization and Destroy Methods](#beans-factory-lifecycle-default-init-destroy-methods)). Note that this is the
default behavior with Java configuration.| +|---|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +##### Default Initialization and Destroy Methods + +When you write initialization and destroy method callbacks that do not use the +Spring-specific `InitializingBean` and `DisposableBean` callback interfaces, you +typically write methods with names such as `init()`, `initialize()`, `dispose()`, and so +on. Ideally, the names of such lifecycle callback methods are standardized across a +project so that all developers use the same method names and ensure consistency. + +You can configure the Spring container to “look” for named initialization and destroy +callback method names on every bean. This means that you, as an application +developer, can write your application classes and use an initialization callback called`init()`, without having to configure an `init-method="init"` attribute with each bean +definition. The Spring IoC container calls that method when the bean is created (and in +accordance with the standard lifecycle callback contract [described previously](#beans-factory-lifecycle)). This feature also enforces a consistent naming convention for +initialization and destroy method callbacks. + +Suppose that your initialization callback methods are named `init()` and your destroy +callback methods are named `destroy()`. Your class then resembles the class in the +following example: + +Java + +``` +public class DefaultBlogService implements BlogService { + + private BlogDao blogDao; + + public void setBlogDao(BlogDao blogDao) { + this.blogDao = blogDao; + } + + // this is (unsurprisingly) the initialization callback method + public void init() { + if (this.blogDao == null) { + throw new IllegalStateException("The [blogDao] property must be set."); + } + } +} +``` + +Kotlin + +``` +class DefaultBlogService : BlogService { + + private var blogDao: BlogDao? = null + + // this is (unsurprisingly) the initialization callback method + fun init() { + if (blogDao == null) { + throw IllegalStateException("The [blogDao] property must be set.") + } + } +} +``` + +You could then use that class in a bean resembling the following: + +``` + + + + + + + +``` + +The presence of the `default-init-method` attribute on the top-level `` element +attribute causes the Spring IoC container to recognize a method called `init` on the bean +class as the initialization method callback. When a bean is created and assembled, if the +bean class has such a method, it is invoked at the appropriate time. + +You can configure destroy method callbacks similarly (in XML, that is) by using the`default-destroy-method` attribute on the top-level `` element. + +Where existing bean classes already have callback methods that are named at variance +with the convention, you can override the default by specifying (in XML, that is) the +method name by using the `init-method` and `destroy-method` attributes of the ``itself. + +The Spring container guarantees that a configured initialization callback is called +immediately after a bean is supplied with all dependencies. Thus, the initialization +callback is called on the raw bean reference, which means that AOP interceptors and so +forth are not yet applied to the bean. A target bean is fully created first and +then an AOP proxy (for example) with its interceptor chain is applied. If the target +bean and the proxy are defined separately, your code can even interact with the raw +target bean, bypassing the proxy. Hence, it would be inconsistent to apply the +interceptors to the `init` method, because doing so would couple the lifecycle of the +target bean to its proxy or interceptors and leave strange semantics when your code +interacts directly with the raw target bean. + +##### Combining Lifecycle Mechanisms + +As of Spring 2.5, you have three options for controlling bean lifecycle behavior: + +* The [`InitializingBean`](#beans-factory-lifecycle-initializingbean) and[`DisposableBean`](#beans-factory-lifecycle-disposablebean) callback interfaces + +* Custom `init()` and `destroy()` methods + +* The [`@PostConstruct` and `@PreDestroy`annotations](#beans-postconstruct-and-predestroy-annotations). You can combine these mechanisms to control a given bean. + +| |If multiple lifecycle mechanisms are configured for a bean and each mechanism is
configured with a different method name, then each configured method is run in the
order listed after this note. However, if the same method name is configured — for example,`init()` for an initialization method — for more than one of these lifecycle mechanisms,
that method is run once, as explained in the[preceding section](#beans-factory-lifecycle-default-init-destroy-methods).| +|---|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +Multiple lifecycle mechanisms configured for the same bean, with different +initialization methods, are called as follows: + +1. Methods annotated with `@PostConstruct` + +2. `afterPropertiesSet()` as defined by the `InitializingBean` callback interface + +3. A custom configured `init()` method + +Destroy methods are called in the same order: + +1. Methods annotated with `@PreDestroy` + +2. `destroy()` as defined by the `DisposableBean` callback interface + +3. A custom configured `destroy()` method + +##### Startup and Shutdown Callbacks + +The `Lifecycle` interface defines the essential methods for any object that has its own +lifecycle requirements (such as starting and stopping some background process): + +``` +public interface Lifecycle { + + void start(); + + void stop(); + + boolean isRunning(); +} +``` + +Any Spring-managed object may implement the `Lifecycle` interface. Then, when the`ApplicationContext` itself receives start and stop signals (for example, for a stop/restart +scenario at runtime), it cascades those calls to all `Lifecycle` implementations +defined within that context. It does this by delegating to a `LifecycleProcessor`, shown +in the following listing: + +``` +public interface LifecycleProcessor extends Lifecycle { + + void onRefresh(); + + void onClose(); +} +``` + +Notice that the `LifecycleProcessor` is itself an extension of the `Lifecycle`interface. It also adds two other methods for reacting to the context being refreshed +and closed. + +| |Note that the regular `org.springframework.context.Lifecycle` interface is a plain
contract for explicit start and stop notifications and does not imply auto-startup at context
refresh time. For fine-grained control over auto-startup of a specific bean (including startup phases),
consider implementing `org.springframework.context.SmartLifecycle` instead.

Also, please note that stop notifications are not guaranteed to come before destruction.
On regular shutdown, all `Lifecycle` beans first receive a stop notification before
the general destruction callbacks are being propagated. However, on hot refresh during a
context’s lifetime or on stopped refresh attempts, only destroy methods are called.| +|---|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +The order of startup and shutdown invocations can be important. If a “depends-on” +relationship exists between any two objects, the dependent side starts after its +dependency, and it stops before its dependency. However, at times, the direct +dependencies are unknown. You may only know that objects of a certain type should start +prior to objects of another type. In those cases, the `SmartLifecycle` interface defines +another option, namely the `getPhase()` method as defined on its super-interface,`Phased`. The following listing shows the definition of the `Phased` interface: + +``` +public interface Phased { + + int getPhase(); +} +``` + +The following listing shows the definition of the `SmartLifecycle` interface: + +``` +public interface SmartLifecycle extends Lifecycle, Phased { + + boolean isAutoStartup(); + + void stop(Runnable callback); +} +``` + +When starting, the objects with the lowest phase start first. When stopping, the +reverse order is followed. Therefore, an object that implements `SmartLifecycle` and +whose `getPhase()` method returns `Integer.MIN_VALUE` would be among the first to start +and the last to stop. At the other end of the spectrum, a phase value of`Integer.MAX_VALUE` would indicate that the object should be started last and stopped +first (likely because it depends on other processes to be running). When considering the +phase value, it is also important to know that the default phase for any “normal”`Lifecycle` object that does not implement `SmartLifecycle` is `0`. Therefore, any +negative phase value indicates that an object should start before those standard +components (and stop after them). The reverse is true for any positive phase value. + +The stop method defined by `SmartLifecycle` accepts a callback. Any +implementation must invoke that callback’s `run()` method after that implementation’s +shutdown process is complete. That enables asynchronous shutdown where necessary, since +the default implementation of the `LifecycleProcessor` interface,`DefaultLifecycleProcessor`, waits up to its timeout value for the group of objects +within each phase to invoke that callback. The default per-phase timeout is 30 seconds. +You can override the default lifecycle processor instance by defining a bean named`lifecycleProcessor` within the context. If you want only to modify the timeout, +defining the following would suffice: + +``` + + + + +``` + +As mentioned earlier, the `LifecycleProcessor` interface defines callback methods for the +refreshing and closing of the context as well. The latter drives the shutdown +process as if `stop()` had been called explicitly, but it happens when the context is +closing. The 'refresh' callback, on the other hand, enables another feature of`SmartLifecycle` beans. When the context is refreshed (after all objects have been +instantiated and initialized), that callback is invoked. At that point, the +default lifecycle processor checks the boolean value returned by each`SmartLifecycle` object’s `isAutoStartup()` method. If `true`, that object is +started at that point rather than waiting for an explicit invocation of the context’s or +its own `start()` method (unlike the context refresh, the context start does not happen +automatically for a standard context implementation). The `phase` value and any +“depends-on” relationships determine the startup order as described earlier. + +##### Shutting Down the Spring IoC Container Gracefully in Non-Web Applications + +| |This section applies only to non-web applications. Spring’s web-based`ApplicationContext` implementations already have code in place to gracefully shut down
the Spring IoC container when the relevant web application is shut down.| +|---|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +If you use Spring’s IoC container in a non-web application environment (for +example, in a rich client desktop environment), register a shutdown hook with the +JVM. Doing so ensures a graceful shutdown and calls the relevant destroy methods on your +singleton beans so that all resources are released. You must still configure +and implement these destroy callbacks correctly. + +To register a shutdown hook, call the `registerShutdownHook()` method that is +declared on the `ConfigurableApplicationContext` interface, as the following example shows: + +Java + +``` +import org.springframework.context.ConfigurableApplicationContext; +import org.springframework.context.support.ClassPathXmlApplicationContext; + +public final class Boot { + + public static void main(final String[] args) throws Exception { + ConfigurableApplicationContext ctx = new ClassPathXmlApplicationContext("beans.xml"); + + // add a shutdown hook for the above context... + ctx.registerShutdownHook(); + + // app runs here... + + // main method exits, hook is called prior to the app shutting down... + } +} +``` + +Kotlin + +``` +import org.springframework.context.support.ClassPathXmlApplicationContext + +fun main() { + val ctx = ClassPathXmlApplicationContext("beans.xml") + + // add a shutdown hook for the above context... + ctx.registerShutdownHook() + + // app runs here... + + // main method exits, hook is called prior to the app shutting down... +} +``` + +#### 1.6.2. `ApplicationContextAware` and `BeanNameAware` + +When an `ApplicationContext` creates an object instance that implements the`org.springframework.context.ApplicationContextAware` interface, the instance is provided +with a reference to that `ApplicationContext`. The following listing shows the definition +of the `ApplicationContextAware` interface: + +``` +public interface ApplicationContextAware { + + void setApplicationContext(ApplicationContext applicationContext) throws BeansException; +} +``` + +Thus, beans can programmatically manipulate the `ApplicationContext` that created them, +through the `ApplicationContext` interface or by casting the reference to a known +subclass of this interface (such as `ConfigurableApplicationContext`, which exposes +additional functionality). One use would be the programmatic retrieval of other beans. +Sometimes this capability is useful. However, in general, you should avoid it, because +it couples the code to Spring and does not follow the Inversion of Control style, +where collaborators are provided to beans as properties. Other methods of the`ApplicationContext` provide access to file resources, publishing application events, +and accessing a `MessageSource`. These additional features are described in[Additional Capabilities of the `ApplicationContext`](#context-introduction). + +Autowiring is another alternative to obtain a reference to the`ApplicationContext`. The *traditional* `constructor` and `byType` autowiring modes +(as described in [Autowiring Collaborators](#beans-factory-autowire)) can provide a dependency of type`ApplicationContext` for a constructor argument or a setter method parameter, +respectively. For more flexibility, including the ability to autowire fields and +multiple parameter methods, use the annotation-based autowiring features. If you do, +the `ApplicationContext` is autowired into a field, constructor argument, or method +parameter that expects the `ApplicationContext` type if the field, constructor, or +method in question carries the `@Autowired` annotation. For more information, see[Using `@Autowired`](#beans-autowired-annotation). + +When an `ApplicationContext` creates a class that implements the`org.springframework.beans.factory.BeanNameAware` interface, the class is provided with +a reference to the name defined in its associated object definition. The following listing +shows the definition of the BeanNameAware interface: + +``` +public interface BeanNameAware { + + void setBeanName(String name) throws BeansException; +} +``` + +The callback is invoked after population of normal bean properties but before an +initialization callback such as `InitializingBean.afterPropertiesSet()` or a custom +init-method. + +#### 1.6.3. Other `Aware` Interfaces + +Besides `ApplicationContextAware` and `BeanNameAware` (discussed [earlier](#beans-factory-aware)), +Spring offers a wide range of `Aware` callback interfaces that let beans indicate to the container +that they require a certain infrastructure dependency. As a general rule, the name indicates the +dependency type. The following table summarizes the most important `Aware` interfaces: + +| Name | Injected Dependency | Explained in…​ | +|--------------------------------|-----------------------------------------------------------------------------------------------------------|----------------------------------------------------------------------------| +| `ApplicationContextAware` | Declaring `ApplicationContext`. | [`ApplicationContextAware` and `BeanNameAware`](#beans-factory-aware) | +|`ApplicationEventPublisherAware`| Event publisher of the enclosing `ApplicationContext`. |[Additional Capabilities of the `ApplicationContext`](#context-introduction)| +| `BeanClassLoaderAware` | Class loader used to load the bean classes. | [Instantiating Beans](#beans-factory-class) | +| `BeanFactoryAware` | Declaring `BeanFactory`. | [The `BeanFactory`](#beans-beanfactory) | +| `BeanNameAware` | Name of the declaring bean. | [`ApplicationContextAware` and `BeanNameAware`](#beans-factory-aware) | +| `LoadTimeWeaverAware` | Defined weaver for processing class definition at load time. | [Load-time Weaving with AspectJ in the Spring Framework](#aop-aj-ltw) | +| `MessageSourceAware` |Configured strategy for resolving messages (with support for parametrization and
internationalization).|[Additional Capabilities of the `ApplicationContext`](#context-introduction)| +| `NotificationPublisherAware` | Spring JMX notification publisher. | [Notifications](integration.html#jmx-notifications) | +| `ResourceLoaderAware` | Configured loader for low-level access to resources. | [Resources](#resources) | +| `ServletConfigAware` | Current `ServletConfig` the container runs in. Valid only in a web-aware Spring`ApplicationContext`. | [Spring MVC](web.html#mvc) | +| `ServletContextAware` | Current `ServletContext` the container runs in. Valid only in a web-aware Spring`ApplicationContext`. | [Spring MVC](web.html#mvc) | + +Note again that using these interfaces ties your code to the Spring API and does not +follow the Inversion of Control style. As a result, we recommend them for infrastructure +beans that require programmatic access to the container. + +### 1.7. Bean Definition Inheritance + +A bean definition can contain a lot of configuration information, including constructor +arguments, property values, and container-specific information, such as the initialization +method, a static factory method name, and so on. A child bean definition inherits +configuration data from a parent definition. The child definition can override some +values or add others as needed. Using parent and child bean definitions can save a lot +of typing. Effectively, this is a form of templating. + +If you work with an `ApplicationContext` interface programmatically, child bean +definitions are represented by the `ChildBeanDefinition` class. Most users do not work +with them on this level. Instead, they configure bean definitions declaratively in a class +such as the `ClassPathXmlApplicationContext`. When you use XML-based configuration +metadata, you can indicate a child bean definition by using the `parent` attribute, +specifying the parent bean as the value of this attribute. The following example shows how +to do so: + +``` + + + + + + (1) + + + +``` + +|**1**|Note the `parent` attribute.| +|-----|----------------------------| + +A child bean definition uses the bean class from the parent definition if none is +specified but can also override it. In the latter case, the child bean class must be +compatible with the parent (that is, it must accept the parent’s property values). + +A child bean definition inherits scope, constructor argument values, property values, and +method overrides from the parent, with the option to add new values. Any scope, initialization +method, destroy method, or `static` factory method settings that you specify +override the corresponding parent settings. + +The remaining settings are always taken from the child definition: depends on, +autowire mode, dependency check, singleton, and lazy init. + +The preceding example explicitly marks the parent bean definition as abstract by using +the `abstract` attribute. If the parent definition does not specify a class, explicitly +marking the parent bean definition as `abstract` is required, as the following example +shows: + +``` + + + + + + + + + +``` + +The parent bean cannot be instantiated on its own because it is incomplete, and it is +also explicitly marked as `abstract`. When a definition is `abstract`, it is +usable only as a pure template bean definition that serves as a parent definition for +child definitions. Trying to use such an `abstract` parent bean on its own, by referring +to it as a ref property of another bean or doing an explicit `getBean()` call with the +parent bean ID returns an error. Similarly, the container’s internal`preInstantiateSingletons()` method ignores bean definitions that are defined as +abstract. + +| |`ApplicationContext` pre-instantiates all singletons by default. Therefore, it is
important (at least for singleton beans) that if you have a (parent) bean definition
which you intend to use only as a template, and this definition specifies a class, you
must make sure to set the *abstract* attribute to *true*, otherwise the application
context will actually (attempt to) pre-instantiate the `abstract` bean.| +|---|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +### 1.8. Container Extension Points + +Typically, an application developer does not need to subclass `ApplicationContext`implementation classes. Instead, the Spring IoC container can be extended by plugging in +implementations of special integration interfaces. The next few sections describe these +integration interfaces. + +#### 1.8.1. Customizing Beans by Using a `BeanPostProcessor` + +The `BeanPostProcessor` interface defines callback methods that you can implement to +provide your own (or override the container’s default) instantiation logic, dependency +resolution logic, and so forth. If you want to implement some custom logic after the +Spring container finishes instantiating, configuring, and initializing a bean, you can +plug in one or more custom `BeanPostProcessor` implementations. + +You can configure multiple `BeanPostProcessor` instances, and you can control the order +in which these `BeanPostProcessor` instances run by setting the `order` property. +You can set this property only if the `BeanPostProcessor` implements the `Ordered`interface. If you write your own `BeanPostProcessor`, you should consider implementing +the `Ordered` interface, too. For further details, see the javadoc of the[`BeanPostProcessor`](https://docs.spring.io/spring-framework/docs/5.3.16/javadoc-api/org/springframework/beans/factory/config/BeanPostProcessor.html)and [`Ordered`](https://docs.spring.io/spring-framework/docs/5.3.16/javadoc-api/org/springframework/core/Ordered.html) interfaces. See also the note +on [programmatic +registration of `BeanPostProcessor` instances](#beans-factory-programmatically-registering-beanpostprocessors). + +| |`BeanPostProcessor` instances operate on bean (or object) instances. That is,
the Spring IoC container instantiates a bean instance and then `BeanPostProcessor`instances do their work.

`BeanPostProcessor` instances are scoped per-container. This is relevant only if you
use container hierarchies. If you define a `BeanPostProcessor` in one container,
it post-processes only the beans in that container. In other words, beans that are
defined in one container are not post-processed by a `BeanPostProcessor` defined in
another container, even if both containers are part of the same hierarchy.

To change the actual bean definition (that is, the blueprint that defines the bean),
you instead need to use a `BeanFactoryPostProcessor`, as described in[Customizing Configuration Metadata with a `BeanFactoryPostProcessor`](#beans-factory-extension-factory-postprocessors).| +|---|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +The `org.springframework.beans.factory.config.BeanPostProcessor` interface consists of +exactly two callback methods. When such a class is registered as a post-processor with +the container, for each bean instance that is created by the container, the +post-processor gets a callback from the container both before container +initialization methods (such as `InitializingBean.afterPropertiesSet()` or any +declared `init` method) are called, and after any bean initialization callbacks. +The post-processor can take any action with the bean instance, including ignoring the +callback completely. A bean post-processor typically checks for callback interfaces, +or it may wrap a bean with a proxy. Some Spring AOP infrastructure classes are +implemented as bean post-processors in order to provide proxy-wrapping logic. + +An `ApplicationContext` automatically detects any beans that are defined in the +configuration metadata that implement the `BeanPostProcessor` interface. The`ApplicationContext` registers these beans as post-processors so that they can be called +later, upon bean creation. Bean post-processors can be deployed in the container in the +same fashion as any other beans. + +Note that, when declaring a `BeanPostProcessor` by using an `@Bean` factory method on a +configuration class, the return type of the factory method should be the implementation +class itself or at least the `org.springframework.beans.factory.config.BeanPostProcessor`interface, clearly indicating the post-processor nature of that bean. Otherwise, the`ApplicationContext` cannot autodetect it by type before fully creating it. +Since a `BeanPostProcessor` needs to be instantiated early in order to apply to the +initialization of other beans in the context, this early type detection is critical. + +| |Programmatically registering `BeanPostProcessor` instances

While the recommended approach for `BeanPostProcessor` registration is through`ApplicationContext` auto-detection (as described earlier), you can register them
programmatically against a `ConfigurableBeanFactory` by using the `addBeanPostProcessor`method. This can be useful when you need to evaluate conditional logic before
registration or even for copying bean post processors across contexts in a hierarchy.
Note, however, that `BeanPostProcessor` instances added programmatically do not respect
the `Ordered` interface. Here, it is the order of registration that dictates the order
of execution. Note also that `BeanPostProcessor` instances registered programmatically
are always processed before those registered through auto-detection, regardless of any
explicit ordering.| +|---|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +| |`BeanPostProcessor` instances and AOP auto-proxying

Classes that implement the `BeanPostProcessor` interface are special and are treated
differently by the container. All `BeanPostProcessor` instances and beans that they
directly reference are instantiated on startup, as part of the special startup phase
of the `ApplicationContext`. Next, all `BeanPostProcessor` instances are registered
in a sorted fashion and applied to all further beans in the container. Because AOP
auto-proxying is implemented as a `BeanPostProcessor` itself, neither `BeanPostProcessor`instances nor the beans they directly reference are eligible for auto-proxying and,
thus, do not have aspects woven into them.

For any such bean, you should see an informational log message: `Bean someBean is not
eligible for getting processed by all BeanPostProcessor interfaces (for example: not
eligible for auto-proxying)`.

If you have beans wired into your `BeanPostProcessor` by using autowiring or`@Resource` (which may fall back to autowiring), Spring might access unexpected beans
when searching for type-matching dependency candidates and, therefore, make them
ineligible for auto-proxying or other kinds of bean post-processing. For example, if you
have a dependency annotated with `@Resource` where the field or setter name does not
directly correspond to the declared name of a bean and no name attribute is used,
Spring accesses other beans for matching them by type.| +|---|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +The following examples show how to write, register, and use `BeanPostProcessor` instances +in an `ApplicationContext`. + +##### Example: Hello World, `BeanPostProcessor`-style + +This first example illustrates basic usage. The example shows a custom`BeanPostProcessor` implementation that invokes the `toString()` method of each bean as +it is created by the container and prints the resulting string to the system console. + +The following listing shows the custom `BeanPostProcessor` implementation class definition: + +Java + +``` +package scripting; + +import org.springframework.beans.factory.config.BeanPostProcessor; + +public class InstantiationTracingBeanPostProcessor implements BeanPostProcessor { + + // simply return the instantiated bean as-is + public Object postProcessBeforeInitialization(Object bean, String beanName) { + return bean; // we could potentially return any object reference here... + } + + public Object postProcessAfterInitialization(Object bean, String beanName) { + System.out.println("Bean '" + beanName + "' created : " + bean.toString()); + return bean; + } +} +``` + +Kotlin + +``` +import org.springframework.beans.factory.config.BeanPostProcessor + +class InstantiationTracingBeanPostProcessor : BeanPostProcessor { + + // simply return the instantiated bean as-is + override fun postProcessBeforeInitialization(bean: Any, beanName: String): Any? { + return bean // we could potentially return any object reference here... + } + + override fun postProcessAfterInitialization(bean: Any, beanName: String): Any? { + println("Bean '$beanName' created : $bean") + return bean + } +} +``` + +The following `beans` element uses the `InstantiationTracingBeanPostProcessor`: + +``` + + + + + + + + + + + +``` + +Notice how the `InstantiationTracingBeanPostProcessor` is merely defined. It does not +even have a name, and, because it is a bean, it can be dependency-injected as you would any +other bean. (The preceding configuration also defines a bean that is backed by a Groovy +script. The Spring dynamic language support is detailed in the chapter entitled[Dynamic Language Support](languages.html#dynamic-language).) + +The following Java application runs the preceding code and configuration: + +Java + +``` +import org.springframework.context.ApplicationContext; +import org.springframework.context.support.ClassPathXmlApplicationContext; +import org.springframework.scripting.Messenger; + +public final class Boot { + + public static void main(final String[] args) throws Exception { + ApplicationContext ctx = new ClassPathXmlApplicationContext("scripting/beans.xml"); + Messenger messenger = ctx.getBean("messenger", Messenger.class); + System.out.println(messenger); + } + +} +``` + +Kotlin + +``` +import org.springframework.beans.factory.getBean + +fun main() { + val ctx = ClassPathXmlApplicationContext("scripting/beans.xml") + val messenger = ctx.getBean("messenger") + println(messenger) +} +``` + +The output of the preceding application resembles the following: + +``` +Bean 'messenger' created : [email protected] +[email protected] +``` + +##### Example: The `AutowiredAnnotationBeanPostProcessor` + +Using callback interfaces or annotations in conjunction with a custom `BeanPostProcessor`implementation is a common means of extending the Spring IoC container. An example is +Spring’s `AutowiredAnnotationBeanPostProcessor` — a `BeanPostProcessor` implementation +that ships with the Spring distribution and autowires annotated fields, setter methods, +and arbitrary config methods. + +#### 1.8.2. Customizing Configuration Metadata with a `BeanFactoryPostProcessor` #### + +The next extension point that we look at is the`org.springframework.beans.factory.config.BeanFactoryPostProcessor`. The semantics of +this interface are similar to those of the `BeanPostProcessor`, with one major +difference: `BeanFactoryPostProcessor` operates on the bean configuration metadata. +That is, the Spring IoC container lets a `BeanFactoryPostProcessor` read the +configuration metadata and potentially change it *before* the container instantiates +any beans other than `BeanFactoryPostProcessor` instances. + +You can configure multiple `BeanFactoryPostProcessor` instances, and you can control the order in +which these `BeanFactoryPostProcessor` instances run by setting the `order` property. +However, you can only set this property if the `BeanFactoryPostProcessor` implements the`Ordered` interface. If you write your own `BeanFactoryPostProcessor`, you should +consider implementing the `Ordered` interface, too. See the javadoc of the[`BeanFactoryPostProcessor`](https://docs.spring.io/spring-framework/docs/5.3.16/javadoc-api/org/springframework/beans/factory/config/BeanFactoryPostProcessor.html)and [`Ordered`](https://docs.spring.io/spring-framework/docs/5.3.16/javadoc-api/org/springframework/core/Ordered.html) interfaces for more details. + +| |If you want to change the actual bean instances (that is, the objects that are created
from the configuration metadata), then you instead need to use a `BeanPostProcessor`(described earlier in [Customizing Beans by Using a `BeanPostProcessor`](#beans-factory-extension-bpp)). While it is technically possible
to work with bean instances within a `BeanFactoryPostProcessor` (for example, by using`BeanFactory.getBean()`), doing so causes premature bean instantiation, violating the
standard container lifecycle. This may cause negative side effects, such as bypassing
bean post processing.

Also, `BeanFactoryPostProcessor` instances are scoped per-container. This is only relevant
if you use container hierarchies. If you define a `BeanFactoryPostProcessor` in one
container, it is applied only to the bean definitions in that container. Bean definitions
in one container are not post-processed by `BeanFactoryPostProcessor` instances in another
container, even if both containers are part of the same hierarchy.| +|---|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +A bean factory post-processor is automatically run when it is declared inside an`ApplicationContext`, in order to apply changes to the configuration metadata that +define the container. Spring includes a number of predefined bean factory +post-processors, such as `PropertyOverrideConfigurer` and`PropertySourcesPlaceholderConfigurer`. You can also use a custom `BeanFactoryPostProcessor` — for example, to register custom property editors. + +An `ApplicationContext` automatically detects any beans that are deployed into it that +implement the `BeanFactoryPostProcessor` interface. It uses these beans as bean factory +post-processors, at the appropriate time. You can deploy these post-processor beans as +you would any other bean. + +| |As with `BeanPostProcessor`s , you typically do not want to configure`BeanFactoryPostProcessor`s for lazy initialization. If no other bean references a`Bean(Factory)PostProcessor`, that post-processor will not get instantiated at all.
Thus, marking it for lazy initialization will be ignored, and the`Bean(Factory)PostProcessor` will be instantiated eagerly even if you set the`default-lazy-init` attribute to `true` on the declaration of your `` element.| +|---|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +##### Example: The Class Name Substitution `PropertySourcesPlaceholderConfigurer` ##### + +You can use the `PropertySourcesPlaceholderConfigurer` to externalize property values +from a bean definition in a separate file by using the standard Java `Properties` format. +Doing so enables the person deploying an application to customize environment-specific +properties, such as database URLs and passwords, without the complexity or risk of +modifying the main XML definition file or files for the container. + +Consider the following XML-based configuration metadata fragment, where a `DataSource`with placeholder values is defined: + +``` + + + + + + + + + + +``` + +The example shows properties configured from an external `Properties` file. At runtime, +a `PropertySourcesPlaceholderConfigurer` is applied to the metadata that replaces some +properties of the DataSource. The values to replace are specified as placeholders of the +form `${property-name}`, which follows the Ant and log4j and JSP EL style. + +The actual values come from another file in the standard Java `Properties` format: + +``` +jdbc.driverClassName=org.hsqldb.jdbcDriver +jdbc.url=jdbc:hsqldb:hsql://production:9002 +jdbc.username=sa +jdbc.password=root +``` + +Therefore, the `${jdbc.username}` string is replaced at runtime with the value, 'sa', and +the same applies for other placeholder values that match keys in the properties file. +The `PropertySourcesPlaceholderConfigurer` checks for placeholders in most properties and +attributes of a bean definition. Furthermore, you can customize the placeholder prefix and suffix. + +With the `context` namespace introduced in Spring 2.5, you can configure property placeholders +with a dedicated configuration element. You can provide one or more locations as a +comma-separated list in the `location` attribute, as the following example shows: + +``` + +``` + +The `PropertySourcesPlaceholderConfigurer` not only looks for properties in the `Properties`file you specify. By default, if it cannot find a property in the specified properties files, +it checks against Spring `Environment` properties and regular Java `System` properties. + +| |You can use the `PropertySourcesPlaceholderConfigurer` to substitute class names, which
is sometimes useful when you have to pick a particular implementation class at runtime.
The following example shows how to do so:

```


classpath:com/something/strategy.properties


custom.strategy.class=com.something.DefaultStrategy




```

If the class cannot be resolved at runtime to a valid class, resolution of the bean
fails when it is about to be created, which is during the `preInstantiateSingletons()`phase of an `ApplicationContext` for a non-lazy-init bean.| +|---|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +##### Example: The `PropertyOverrideConfigurer` + +The `PropertyOverrideConfigurer`, another bean factory post-processor, resembles the`PropertySourcesPlaceholderConfigurer`, but unlike the latter, the original definitions +can have default values or no values at all for bean properties. If an overriding`Properties` file does not have an entry for a certain bean property, the default +context definition is used. + +Note that the bean definition is not aware of being overridden, so it is not +immediately obvious from the XML definition file that the override configurer is being +used. In case of multiple `PropertyOverrideConfigurer` instances that define different +values for the same bean property, the last one wins, due to the overriding mechanism. + +Properties file configuration lines take the following format: + +``` +beanName.property=value +``` + +The following listing shows an example of the format: + +``` +dataSource.driverClassName=com.mysql.jdbc.Driver +dataSource.url=jdbc:mysql:mydb +``` + +This example file can be used with a container definition that contains a bean called`dataSource` that has `driver` and `url` properties. + +Compound property names are also supported, as long as every component of the path +except the final property being overridden is already non-null (presumably initialized +by the constructors). In the following example, the `sammy` property of the `bob` property of the `fred` property of the `tom` bean +is set to the scalar value `123`: + +``` +tom.fred.bob.sammy=123 +``` + +| |Specified override values are always literal values. They are not translated into
bean references. This convention also applies when the original value in the XML bean
definition specifies a bean reference.| +|---|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +With the `context` namespace introduced in Spring 2.5, it is possible to configure +property overriding with a dedicated configuration element, as the following example shows: + +``` + +``` + +#### 1.8.3. Customizing Instantiation Logic with a `FactoryBean` + +You can implement the `org.springframework.beans.factory.FactoryBean` interface for objects that +are themselves factories. + +The `FactoryBean` interface is a point of pluggability into the Spring IoC container’s +instantiation logic. If you have complex initialization code that is better expressed in +Java as opposed to a (potentially) verbose amount of XML, you can create your own`FactoryBean`, write the complex initialization inside that class, and then plug your +custom `FactoryBean` into the container. + +The `FactoryBean` interface provides three methods: + +* `T getObject()`: Returns an instance of the object this factory creates. The + instance can possibly be shared, depending on whether this factory returns singletons + or prototypes. + +* `boolean isSingleton()`: Returns `true` if this `FactoryBean` returns singletons or`false` otherwise. The default implementation of this method returns `true`. + +* `Class getObjectType()`: Returns the object type returned by the `getObject()` method + or `null` if the type is not known in advance. + +The `FactoryBean` concept and interface are used in a number of places within the Spring +Framework. More than 50 implementations of the `FactoryBean` interface ship with Spring +itself. + +When you need to ask a container for an actual `FactoryBean` instance itself instead of +the bean it produces, prefix the bean’s `id` with the ampersand symbol (`&`) when +calling the `getBean()` method of the `ApplicationContext`. So, for a given `FactoryBean`with an `id` of `myBean`, invoking `getBean("myBean")` on the container returns the +product of the `FactoryBean`, whereas invoking `getBean("&myBean")` returns the`FactoryBean` instance itself. + +### 1.9. Annotation-based Container Configuration + +Are annotations better than XML for configuring Spring? + +The introduction of annotation-based configuration raised the question of whether this +approach is “better” than XML. The short answer is “it depends.” The long answer is +that each approach has its pros and cons, and, usually, it is up to the developer to +decide which strategy suits them better. Due to the way they are defined, annotations +provide a lot of context in their declaration, leading to shorter and more concise +configuration. However, XML excels at wiring up components without touching their source +code or recompiling them. Some developers prefer having the wiring close to the source +while others argue that annotated classes are no longer POJOs and, furthermore, that the +configuration becomes decentralized and harder to control. + +No matter the choice, Spring can accommodate both styles and even mix them together. +It is worth pointing out that through its [JavaConfig](#beans-java) option, Spring lets +annotations be used in a non-invasive way, without touching the target components +source code and that, in terms of tooling, all configuration styles are supported by the[Spring Tools for Eclipse](https://spring.io/tools). + +An alternative to XML setup is provided by annotation-based configuration, which relies on +the bytecode metadata for wiring up components instead of angle-bracket declarations. +Instead of using XML to describe a bean wiring, the developer moves the configuration +into the component class itself by using annotations on the relevant class, method, or +field declaration. As mentioned in [Example: The `AutowiredAnnotationBeanPostProcessor`](#beans-factory-extension-bpp-examples-aabpp), using +a `BeanPostProcessor` in conjunction with annotations is a common means of extending the +Spring IoC container. For example, Spring 2.0 introduced the possibility of enforcing +required properties with the [`@Required`](#beans-required-annotation) annotation. Spring +2.5 made it possible to follow that same general approach to drive Spring’s dependency +injection. Essentially, the `@Autowired` annotation provides the same capabilities as +described in [Autowiring Collaborators](#beans-factory-autowire) but with more fine-grained control and wider +applicability. Spring 2.5 also added support for JSR-250 annotations, such as`@PostConstruct` and `@PreDestroy`. Spring 3.0 added support for JSR-330 (Dependency +Injection for Java) annotations contained in the `javax.inject` package such as `@Inject`and `@Named`. Details about those annotations can be found in the[relevant section](#beans-standard-annotations). + +| |Annotation injection is performed before XML injection. Thus, the XML configuration
overrides the annotations for properties wired through both approaches.| +|---|---------------------------------------------------------------------------------------------------------------------------------------------------------------| + +As always, you can register the post-processors as individual bean definitions, but they +can also be implicitly registered by including the following tag in an XML-based Spring +configuration (notice the inclusion of the `context` namespace): + +``` + + + + + + +``` + +The `` element implicitly registers the following post-processors: + +* [`ConfigurationClassPostProcessor`](https://docs.spring.io/spring-framework/docs/5.3.16/javadoc-api/org/springframework/context/annotation/ConfigurationClassPostProcessor.html) + +* [`AutowiredAnnotationBeanPostProcessor`](https://docs.spring.io/spring-framework/docs/5.3.16/javadoc-api/org/springframework/beans/factory/annotation/AutowiredAnnotationBeanPostProcessor.html) + +* [`CommonAnnotationBeanPostProcessor`](https://docs.spring.io/spring-framework/docs/5.3.16/javadoc-api/org/springframework/context/annotation/CommonAnnotationBeanPostProcessor.html) + +* [`PersistenceAnnotationBeanPostProcessor`](https://docs.spring.io/spring-framework/docs/5.3.16/javadoc-api/org/springframework/orm/jpa/support/PersistenceAnnotationBeanPostProcessor.html) + +* [`EventListenerMethodProcessor`](https://docs.spring.io/spring-framework/docs/5.3.16/javadoc-api/org/springframework/context/event/EventListenerMethodProcessor.html) + +| |`` only looks for annotations on beans in the same
application context in which it is defined. This means that, if you put`` in a `WebApplicationContext` for a `DispatcherServlet`,
it only checks for `@Autowired` beans in your controllers, and not your services. See[The DispatcherServlet](web.html#mvc-servlet) for more information.| +|---|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +#### 1.9.1. @Required + +The `@Required` annotation applies to bean property setter methods, as in the following +example: + +Java + +``` +public class SimpleMovieLister { + + private MovieFinder movieFinder; + + @Required + public void setMovieFinder(MovieFinder movieFinder) { + this.movieFinder = movieFinder; + } + + // ... +} +``` + +Kotlin + +``` +class SimpleMovieLister { + + @Required + lateinit var movieFinder: MovieFinder + + // ... +} +``` + +This annotation indicates that the affected bean property must be populated at +configuration time, through an explicit property value in a bean definition or through +autowiring. The container throws an exception if the affected bean property has not been +populated. This allows for eager and explicit failure, avoiding `NullPointerException`instances or the like later on. We still recommend that you put assertions into the +bean class itself (for example, into an init method). Doing so enforces those required +references and values even when you use the class outside of a container. + +| |The [`RequiredAnnotationBeanPostProcessor`](https://docs.spring.io/spring-framework/docs/5.3.16/javadoc-api/org/springframework/beans/factory/annotation/RequiredAnnotationBeanPostProcessor.html)must be registered as a bean to enable support for the `@Required` annotation.| +|---|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +| |The `@Required` annotation and `RequiredAnnotationBeanPostProcessor` are formally
deprecated as of Spring Framework 5.1, in favor of using constructor injection for
required settings (or a custom implementation of `InitializingBean.afterPropertiesSet()`or a custom `@PostConstruct` method along with bean property setter methods).| +|---|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +#### 1.9.2. Using `@Autowired` + +| |JSR 330’s `@Inject` annotation can be used in place of Spring’s `@Autowired` annotation in the
examples included in this section. See [here](#beans-standard-annotations) for more details.| +|---|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +You can apply the `@Autowired` annotation to constructors, as the following example shows: + +Java + +``` +public class MovieRecommender { + + private final CustomerPreferenceDao customerPreferenceDao; + + @Autowired + public MovieRecommender(CustomerPreferenceDao customerPreferenceDao) { + this.customerPreferenceDao = customerPreferenceDao; + } + + // ... +} +``` + +Kotlin + +``` +class MovieRecommender @Autowired constructor( + private val customerPreferenceDao: CustomerPreferenceDao) +``` + +| |As of Spring Framework 4.3, an `@Autowired` annotation on such a constructor is no longer
necessary if the target bean defines only one constructor to begin with. However, if
several constructors are available and there is no primary/default constructor, at least
one of the constructors must be annotated with `@Autowired` in order to instruct the
container which one to use. See the discussion on[constructor resolution](#beans-autowired-annotation-constructor-resolution) for details.| +|---|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +You can also apply the `@Autowired` annotation to *traditional* setter methods, +as the following example shows: + +Java + +``` +public class SimpleMovieLister { + + private MovieFinder movieFinder; + + @Autowired + public void setMovieFinder(MovieFinder movieFinder) { + this.movieFinder = movieFinder; + } + + // ... +} +``` + +Kotlin + +``` +class SimpleMovieLister { + + @Autowired + lateinit var movieFinder: MovieFinder + + // ... + +} +``` + +You can also apply the annotation to methods with arbitrary names and multiple +arguments, as the following example shows: + +Java + +``` +public class MovieRecommender { + + private MovieCatalog movieCatalog; + + private CustomerPreferenceDao customerPreferenceDao; + + @Autowired + public void prepare(MovieCatalog movieCatalog, + CustomerPreferenceDao customerPreferenceDao) { + this.movieCatalog = movieCatalog; + this.customerPreferenceDao = customerPreferenceDao; + } + + // ... +} +``` + +Kotlin + +``` +class MovieRecommender { + + private lateinit var movieCatalog: MovieCatalog + + private lateinit var customerPreferenceDao: CustomerPreferenceDao + + @Autowired + fun prepare(movieCatalog: MovieCatalog, + customerPreferenceDao: CustomerPreferenceDao) { + this.movieCatalog = movieCatalog + this.customerPreferenceDao = customerPreferenceDao + } + + // ... +} +``` + +You can apply `@Autowired` to fields as well and even mix it with constructors, as the +following example shows: + +Java + +``` +public class MovieRecommender { + + private final CustomerPreferenceDao customerPreferenceDao; + + @Autowired + private MovieCatalog movieCatalog; + + @Autowired + public MovieRecommender(CustomerPreferenceDao customerPreferenceDao) { + this.customerPreferenceDao = customerPreferenceDao; + } + + // ... +} +``` + +Kotlin + +``` +class MovieRecommender @Autowired constructor( + private val customerPreferenceDao: CustomerPreferenceDao) { + + @Autowired + private lateinit var movieCatalog: MovieCatalog + + // ... +} +``` + +| |Make sure that your target components (for example, `MovieCatalog` or `CustomerPreferenceDao`)
are consistently declared by the type that you use for your `@Autowired`-annotated
injection points. Otherwise, injection may fail due to a "no type match found" error at runtime.

For XML-defined beans or component classes found via classpath scanning, the container
usually knows the concrete type up front. However, for `@Bean` factory methods, you need
to make sure that the declared return type is sufficiently expressive. For components
that implement several interfaces or for components potentially referred to by their
implementation type, consider declaring the most specific return type on your factory
method (at least as specific as required by the injection points referring to your bean).| +|---|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +You can also instruct Spring to provide all beans of a particular type from the`ApplicationContext` by adding the `@Autowired` annotation to a field or method that +expects an array of that type, as the following example shows: + +Java + +``` +public class MovieRecommender { + + @Autowired + private MovieCatalog[] movieCatalogs; + + // ... +} +``` + +Kotlin + +``` +class MovieRecommender { + + @Autowired + private lateinit var movieCatalogs: Array + + // ... +} +``` + +The same applies for typed collections, as the following example shows: + +Java + +``` +public class MovieRecommender { + + private Set movieCatalogs; + + @Autowired + public void setMovieCatalogs(Set movieCatalogs) { + this.movieCatalogs = movieCatalogs; + } + + // ... +} +``` + +Kotlin + +``` +class MovieRecommender { + + @Autowired + lateinit var movieCatalogs: Set + + // ... +} +``` + +| |Your target beans can implement the `org.springframework.core.Ordered` interface or use
the `@Order` or standard `@Priority` annotation if you want items in the array or list
to be sorted in a specific order. Otherwise, their order follows the registration
order of the corresponding target bean definitions in the container.

You can declare the `@Order` annotation at the target class level and on `@Bean` methods,
potentially for individual bean definitions (in case of multiple definitions that
use the same bean class). `@Order` values may influence priorities at injection points,
but be aware that they do not influence singleton startup order, which is an
orthogonal concern determined by dependency relationships and `@DependsOn` declarations.

Note that the standard `javax.annotation.Priority` annotation is not available at the`@Bean` level, since it cannot be declared on methods. Its semantics can be modeled
through `@Order` values in combination with `@Primary` on a single bean for each type.| +|---|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +Even typed `Map` instances can be autowired as long as the expected key type is `String`. +The map values contain all beans of the expected type, and the keys contain the +corresponding bean names, as the following example shows: + +Java + +``` +public class MovieRecommender { + + private Map movieCatalogs; + + @Autowired + public void setMovieCatalogs(Map movieCatalogs) { + this.movieCatalogs = movieCatalogs; + } + + // ... +} +``` + +Kotlin + +``` +class MovieRecommender { + + @Autowired + lateinit var movieCatalogs: Map + + // ... +} +``` + +By default, autowiring fails when no matching candidate beans are available for a given +injection point. In the case of a declared array, collection, or map, at least one +matching element is expected. + +The default behavior is to treat annotated methods and fields as indicating required +dependencies. You can change this behavior as demonstrated in the following example, +enabling the framework to skip a non-satisfiable injection point through marking it as +non-required (i.e., by setting the `required` attribute in `@Autowired` to `false`): + +Java + +``` +public class SimpleMovieLister { + + private MovieFinder movieFinder; + + @Autowired(required = false) + public void setMovieFinder(MovieFinder movieFinder) { + this.movieFinder = movieFinder; + } + + // ... +} +``` + +Kotlin + +``` +class SimpleMovieLister { + + @Autowired(required = false) + var movieFinder: MovieFinder? = null + + // ... +} +``` + +A non-required method will not be called at all if its dependency (or one of its +dependencies, in case of multiple arguments) is not available. A non-required field will +not get populated at all in such cases, leaving its default value in place. + +Injected constructor and factory method arguments are a special case since the `required`attribute in `@Autowired` has a somewhat different meaning due to Spring’s constructor +resolution algorithm that may potentially deal with multiple constructors. Constructor +and factory method arguments are effectively required by default but with a few special +rules in a single-constructor scenario, such as multi-element injection points (arrays, +collections, maps) resolving to empty instances if no matching beans are available. This +allows for a common implementation pattern where all dependencies can be declared in a +unique multi-argument constructor — for example, declared as a single public constructor +without an `@Autowired` annotation. + +| |Only one constructor of any given bean class may declare `@Autowired` with the `required`attribute set to `true`, indicating *the* constructor to autowire when used as a Spring
bean. As a consequence, if the `required` attribute is left at its default value `true`,
only a single constructor may be annotated with `@Autowired`. If multiple constructors
declare the annotation, they will all have to declare `required=false` in order to be
considered as candidates for autowiring (analogous to `autowire=constructor` in XML).
The constructor with the greatest number of dependencies that can be satisfied by matching
beans in the Spring container will be chosen. If none of the candidates can be satisfied,
then a primary/default constructor (if present) will be used. Similarly, if a class
declares multiple constructors but none of them is annotated with `@Autowired`, then a
primary/default constructor (if present) will be used. If a class only declares a single
constructor to begin with, it will always be used, even if not annotated. Note that an
annotated constructor does not have to be public.

The `required` attribute of `@Autowired` is recommended over the deprecated `@Required`annotation on setter methods. Setting the `required` attribute to `false` indicates that
the property is not required for autowiring purposes, and the property is ignored if it
cannot be autowired. `@Required`, on the other hand, is stronger in that it enforces the
property to be set by any means supported by the container, and if no value is defined,
a corresponding exception is raised.| +|---|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +Alternatively, you can express the non-required nature of a particular dependency +through Java 8’s `java.util.Optional`, as the following example shows: + +``` +public class SimpleMovieLister { + + @Autowired + public void setMovieFinder(Optional movieFinder) { + ... + } +} +``` + +As of Spring Framework 5.0, you can also use a `@Nullable` annotation (of any kind +in any package — for example, `javax.annotation.Nullable` from JSR-305) or just leverage +Kotlin builtin null-safety support: + +Java + +``` +public class SimpleMovieLister { + + @Autowired + public void setMovieFinder(@Nullable MovieFinder movieFinder) { + ... + } +} +``` + +Kotlin + +``` +class SimpleMovieLister { + + @Autowired + var movieFinder: MovieFinder? = null + + // ... +} +``` + +You can also use `@Autowired` for interfaces that are well-known resolvable +dependencies: `BeanFactory`, `ApplicationContext`, `Environment`, `ResourceLoader`,`ApplicationEventPublisher`, and `MessageSource`. These interfaces and their extended +interfaces, such as `ConfigurableApplicationContext` or `ResourcePatternResolver`, are +automatically resolved, with no special setup necessary. The following example autowires +an `ApplicationContext` object: + +Java + +``` +public class MovieRecommender { + + @Autowired + private ApplicationContext context; + + public MovieRecommender() { + } + + // ... +} +``` + +Kotlin + +``` +class MovieRecommender { + + @Autowired + lateinit var context: ApplicationContext + + // ... +} +``` + +| |The `@Autowired`, `@Inject`, `@Value`, and `@Resource` annotations are handled by Spring`BeanPostProcessor` implementations. This means that you cannot apply these annotations
within your own `BeanPostProcessor` or `BeanFactoryPostProcessor` types (if any).
These types must be 'wired up' explicitly by using XML or a Spring `@Bean` method.| +|---|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +#### 1.9.3. Fine-tuning Annotation-based Autowiring with `@Primary` + +Because autowiring by type may lead to multiple candidates, it is often necessary to have +more control over the selection process. One way to accomplish this is with Spring’s`@Primary` annotation. `@Primary` indicates that a particular bean should be given +preference when multiple beans are candidates to be autowired to a single-valued +dependency. If exactly one primary bean exists among the candidates, it becomes the +autowired value. + +Consider the following configuration that defines `firstMovieCatalog` as the +primary `MovieCatalog`: + +Java + +``` +@Configuration +public class MovieConfiguration { + + @Bean + @Primary + public MovieCatalog firstMovieCatalog() { ... } + + @Bean + public MovieCatalog secondMovieCatalog() { ... } + + // ... +} +``` + +Kotlin + +``` +@Configuration +class MovieConfiguration { + + @Bean + @Primary + fun firstMovieCatalog(): MovieCatalog { ... } + + @Bean + fun secondMovieCatalog(): MovieCatalog { ... } + + // ... +} +``` + +With the preceding configuration, the following `MovieRecommender` is autowired with the`firstMovieCatalog`: + +Java + +``` +public class MovieRecommender { + + @Autowired + private MovieCatalog movieCatalog; + + // ... +} +``` + +Kotlin + +``` +class MovieRecommender { + + @Autowired + private lateinit var movieCatalog: MovieCatalog + + // ... +} +``` + +The corresponding bean definitions follow: + +``` + + + + + + + + + + + + + + + + +``` + +#### 1.9.4. Fine-tuning Annotation-based Autowiring with Qualifiers + +`@Primary` is an effective way to use autowiring by type with several instances when one +primary candidate can be determined. When you need more control over the selection process, +you can use Spring’s `@Qualifier` annotation. You can associate qualifier values +with specific arguments, narrowing the set of type matches so that a specific bean is +chosen for each argument. In the simplest case, this can be a plain descriptive value, as +shown in the following example: + +Java + +``` +public class MovieRecommender { + + @Autowired + @Qualifier("main") + private MovieCatalog movieCatalog; + + // ... +} +``` + +Kotlin + +``` +class MovieRecommender { + + @Autowired + @Qualifier("main") + private lateinit var movieCatalog: MovieCatalog + + // ... +} +``` + +You can also specify the `@Qualifier` annotation on individual constructor arguments or +method parameters, as shown in the following example: + +Java + +``` +public class MovieRecommender { + + private MovieCatalog movieCatalog; + + private CustomerPreferenceDao customerPreferenceDao; + + @Autowired + public void prepare(@Qualifier("main") MovieCatalog movieCatalog, + CustomerPreferenceDao customerPreferenceDao) { + this.movieCatalog = movieCatalog; + this.customerPreferenceDao = customerPreferenceDao; + } + + // ... +} +``` + +Kotlin + +``` +class MovieRecommender { + + private lateinit var movieCatalog: MovieCatalog + + private lateinit var customerPreferenceDao: CustomerPreferenceDao + + @Autowired + fun prepare(@Qualifier("main") movieCatalog: MovieCatalog, + customerPreferenceDao: CustomerPreferenceDao) { + this.movieCatalog = movieCatalog + this.customerPreferenceDao = customerPreferenceDao + } + + // ... +} +``` + +The following example shows corresponding bean definitions. + +``` + + + + + + + (1) + + + + + + (2) + + + + + + + +``` + +|**1**| The bean with the `main` qualifier value is wired with the constructor argument that
is qualified with the same value. | +|-----|----------------------------------------------------------------------------------------------------------------------------| +|**2**|The bean with the `action` qualifier value is wired with the constructor argument that
is qualified with the same value.| + +For a fallback match, the bean name is considered a default qualifier value. Thus, you +can define the bean with an `id` of `main` instead of the nested qualifier element, leading +to the same matching result. However, although you can use this convention to refer to +specific beans by name, `@Autowired` is fundamentally about type-driven injection with +optional semantic qualifiers. This means that qualifier values, even with the bean name +fallback, always have narrowing semantics within the set of type matches. They do not +semantically express a reference to a unique bean `id`. Good qualifier values are `main`or `EMEA` or `persistent`, expressing characteristics of a specific component that are +independent from the bean `id`, which may be auto-generated in case of an anonymous bean +definition such as the one in the preceding example. + +Qualifiers also apply to typed collections, as discussed earlier — for example, to`Set`. In this case, all matching beans, according to the declared +qualifiers, are injected as a collection. This implies that qualifiers do not have to be +unique. Rather, they constitute filtering criteria. For example, you can define +multiple `MovieCatalog` beans with the same qualifier value “action”, all of which are +injected into a `Set` annotated with `@Qualifier("action")`. + +| |Letting qualifier values select against target bean names, within the type-matching
candidates, does not require a `@Qualifier` annotation at the injection point.
If there is no other resolution indicator (such as a qualifier or a primary marker),
for a non-unique dependency situation, Spring matches the injection point name
(that is, the field name or parameter name) against the target bean names and chooses the
same-named candidate, if any.| +|---|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +That said, if you intend to express annotation-driven injection by name, do not +primarily use `@Autowired`, even if it is capable of selecting by bean name among +type-matching candidates. Instead, use the JSR-250 `@Resource` annotation, which is +semantically defined to identify a specific target component by its unique name, with +the declared type being irrelevant for the matching process. `@Autowired` has rather +different semantics: After selecting candidate beans by type, the specified `String`qualifier value is considered within those type-selected candidates only (for example, +matching an `account` qualifier against beans marked with the same qualifier label). + +For beans that are themselves defined as a collection, `Map`, or array type, `@Resource`is a fine solution, referring to the specific collection or array bean by unique name. +That said, as of 4.3, you can match collection, `Map`, and array types through Spring’s`@Autowired` type matching algorithm as well, as long as the element type information +is preserved in `@Bean` return type signatures or collection inheritance hierarchies. +In this case, you can use qualifier values to select among same-typed collections, +as outlined in the previous paragraph. + +As of 4.3, `@Autowired` also considers self references for injection (that is, references +back to the bean that is currently injected). Note that self injection is a fallback. +Regular dependencies on other components always have precedence. In that sense, self +references do not participate in regular candidate selection and are therefore in +particular never primary. On the contrary, they always end up as lowest precedence. +In practice, you should use self references as a last resort only (for example, for +calling other methods on the same instance through the bean’s transactional proxy). +Consider factoring out the affected methods to a separate delegate bean in such a scenario. +Alternatively, you can use `@Resource`, which may obtain a proxy back to the current bean +by its unique name. + +| |Trying to inject the results from `@Bean` methods on the same configuration class is
effectively a self-reference scenario as well. Either lazily resolve such references
in the method signature where it is actually needed (as opposed to an autowired field
in the configuration class) or declare the affected `@Bean` methods as `static`,
decoupling them from the containing configuration class instance and its lifecycle.
Otherwise, such beans are only considered in the fallback phase, with matching beans
on other configuration classes selected as primary candidates instead (if available).| +|---|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +`@Autowired` applies to fields, constructors, and multi-argument methods, allowing for +narrowing through qualifier annotations at the parameter level. In contrast, `@Resource`is supported only for fields and bean property setter methods with a single argument. +As a consequence, you should stick with qualifiers if your injection target is a +constructor or a multi-argument method. + +You can create your own custom qualifier annotations. To do so, define an annotation and +provide the `@Qualifier` annotation within your definition, as the following example shows: + +Java + +``` +@Target({ElementType.FIELD, ElementType.PARAMETER}) +@Retention(RetentionPolicy.RUNTIME) +@Qualifier +public @interface Genre { + + String value(); +} +``` + +Kotlin + +``` +@Target(AnnotationTarget.FIELD, AnnotationTarget.VALUE_PARAMETER) +@Retention(AnnotationRetention.RUNTIME) +@Qualifier +annotation class Genre(val value: String) +``` + +Then you can provide the custom qualifier on autowired fields and parameters, as the +following example shows: + +Java + +``` +public class MovieRecommender { + + @Autowired + @Genre("Action") + private MovieCatalog actionCatalog; + + private MovieCatalog comedyCatalog; + + @Autowired + public void setComedyCatalog(@Genre("Comedy") MovieCatalog comedyCatalog) { + this.comedyCatalog = comedyCatalog; + } + + // ... +} +``` + +Kotlin + +``` +class MovieRecommender { + + @Autowired + @Genre("Action") + private lateinit var actionCatalog: MovieCatalog + + private lateinit var comedyCatalog: MovieCatalog + + @Autowired + fun setComedyCatalog(@Genre("Comedy") comedyCatalog: MovieCatalog) { + this.comedyCatalog = comedyCatalog + } + + // ... +} +``` + +Next, you can provide the information for the candidate bean definitions. You can add`` tags as sub-elements of the `` tag and then specify the `type` and`value` to match your custom qualifier annotations. The type is matched against the +fully-qualified class name of the annotation. Alternately, as a convenience if no risk of +conflicting names exists, you can use the short class name. The following example +demonstrates both approaches: + +``` + + + + + + + + + + + + + + + + + + +``` + +In [Classpath Scanning and Managed Components](#beans-classpath-scanning), you can see an annotation-based alternative to +providing the qualifier metadata in XML. Specifically, see [Providing Qualifier Metadata with Annotations](#beans-scanning-qualifiers). + +In some cases, using an annotation without a value may suffice. This can be +useful when the annotation serves a more generic purpose and can be applied across +several different types of dependencies. For example, you may provide an offline +catalog that can be searched when no Internet connection is available. First, define +the simple annotation, as the following example shows: + +Java + +``` +@Target({ElementType.FIELD, ElementType.PARAMETER}) +@Retention(RetentionPolicy.RUNTIME) +@Qualifier +public @interface Offline { + +} +``` + +Kotlin + +``` +@Target(AnnotationTarget.FIELD, AnnotationTarget.VALUE_PARAMETER) +@Retention(AnnotationRetention.RUNTIME) +@Qualifier +annotation class Offline +``` + +Then add the annotation to the field or property to be autowired, as shown in the +following example: + +Java + +``` +public class MovieRecommender { + + @Autowired + @Offline (1) + private MovieCatalog offlineCatalog; + + // ... +} +``` + +|**1**|This line adds the `@Offline` annotation.| +|-----|-----------------------------------------| + +Kotlin + +``` +class MovieRecommender { + + @Autowired + @Offline (1) + private lateinit var offlineCatalog: MovieCatalog + + // ... +} +``` + +|**1**|This line adds the `@Offline` annotation.| +|-----|-----------------------------------------| + +Now the bean definition only needs a qualifier `type`, as shown in the following example: + +``` + + (1) + + +``` + +|**1**|This element specifies the qualifier.| +|-----|-------------------------------------| + +You can also define custom qualifier annotations that accept named attributes in +addition to or instead of the simple `value` attribute. If multiple attribute values are +then specified on a field or parameter to be autowired, a bean definition must match +all such attribute values to be considered an autowire candidate. As an example, +consider the following annotation definition: + +Java + +``` +@Target({ElementType.FIELD, ElementType.PARAMETER}) +@Retention(RetentionPolicy.RUNTIME) +@Qualifier +public @interface MovieQualifier { + + String genre(); + + Format format(); +} +``` + +Kotlin + +``` +@Target(AnnotationTarget.FIELD, AnnotationTarget.VALUE_PARAMETER) +@Retention(AnnotationRetention.RUNTIME) +@Qualifier +annotation class MovieQualifier(val genre: String, val format: Format) +``` + +In this case `Format` is an enum, defined as follows: + +Java + +``` +public enum Format { + VHS, DVD, BLURAY +} +``` + +Kotlin + +``` +enum class Format { + VHS, DVD, BLURAY +} +``` + +The fields to be autowired are annotated with the custom qualifier and include values +for both attributes: `genre` and `format`, as the following example shows: + +Java + +``` +public class MovieRecommender { + + @Autowired + @MovieQualifier(format=Format.VHS, genre="Action") + private MovieCatalog actionVhsCatalog; + + @Autowired + @MovieQualifier(format=Format.VHS, genre="Comedy") + private MovieCatalog comedyVhsCatalog; + + @Autowired + @MovieQualifier(format=Format.DVD, genre="Action") + private MovieCatalog actionDvdCatalog; + + @Autowired + @MovieQualifier(format=Format.BLURAY, genre="Comedy") + private MovieCatalog comedyBluRayCatalog; + + // ... +} +``` + +Kotlin + +``` +class MovieRecommender { + + @Autowired + @MovieQualifier(format = Format.VHS, genre = "Action") + private lateinit var actionVhsCatalog: MovieCatalog + + @Autowired + @MovieQualifier(format = Format.VHS, genre = "Comedy") + private lateinit var comedyVhsCatalog: MovieCatalog + + @Autowired + @MovieQualifier(format = Format.DVD, genre = "Action") + private lateinit var actionDvdCatalog: MovieCatalog + + @Autowired + @MovieQualifier(format = Format.BLURAY, genre = "Comedy") + private lateinit var comedyBluRayCatalog: MovieCatalog + + // ... +} +``` + +Finally, the bean definitions should contain matching qualifier values. This example +also demonstrates that you can use bean meta attributes instead of the`` elements. If available, the `` element and its attributes take +precedence, but the autowiring mechanism falls back on the values provided within the`` tags if no such qualifier is present, as in the last two bean definitions in +the following example: + +``` + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +``` + +#### 1.9.5. Using Generics as Autowiring Qualifiers + +In addition to the `@Qualifier` annotation, you can use Java generic types +as an implicit form of qualification. For example, suppose you have the following +configuration: + +Java + +``` +@Configuration +public class MyConfiguration { + + @Bean + public StringStore stringStore() { + return new StringStore(); + } + + @Bean + public IntegerStore integerStore() { + return new IntegerStore(); + } +} +``` + +Kotlin + +``` +@Configuration +class MyConfiguration { + + @Bean + fun stringStore() = StringStore() + + @Bean + fun integerStore() = IntegerStore() +} +``` + +Assuming that the preceding beans implement a generic interface, (that is, `Store` and`Store`), you can `@Autowire` the `Store` interface and the generic is +used as a qualifier, as the following example shows: + +Java + +``` +@Autowired +private Store s1; // qualifier, injects the stringStore bean + +@Autowired +private Store s2; // qualifier, injects the integerStore bean +``` + +Kotlin + +``` +@Autowired +private lateinit var s1: Store // qualifier, injects the stringStore bean + +@Autowired +private lateinit var s2: Store // qualifier, injects the integerStore bean +``` + +Generic qualifiers also apply when autowiring lists, `Map` instances and arrays. The +following example autowires a generic `List`: + +Java + +``` +// Inject all Store beans as long as they have an generic +// Store beans will not appear in this list +@Autowired +private List> s; +``` + +Kotlin + +``` +// Inject all Store beans as long as they have an generic +// Store beans will not appear in this list +@Autowired +private lateinit var s: List> +``` + +#### 1.9.6. Using `CustomAutowireConfigurer` + +[`CustomAutowireConfigurer`](https://docs.spring.io/spring-framework/docs/5.3.16/javadoc-api/org/springframework/beans/factory/annotation/CustomAutowireConfigurer.html)is a `BeanFactoryPostProcessor` that lets you register your own custom qualifier +annotation types, even if they are not annotated with Spring’s `@Qualifier` annotation. +The following example shows how to use `CustomAutowireConfigurer`: + +``` + + + + example.CustomQualifier + + + +``` + +The `AutowireCandidateResolver` determines autowire candidates by: + +* The `autowire-candidate` value of each bean definition + +* Any `default-autowire-candidates` patterns available on the `` element + +* The presence of `@Qualifier` annotations and any custom annotations registered + with the `CustomAutowireConfigurer` + +When multiple beans qualify as autowire candidates, the determination of a “primary” is +as follows: If exactly one bean definition among the candidates has a `primary`attribute set to `true`, it is selected. + +#### 1.9.7. Injection with `@Resource` + +Spring also supports injection by using the JSR-250 `@Resource` annotation +(`javax.annotation.Resource`) on fields or bean property setter methods. +This is a common pattern in Java EE: for example, in JSF-managed beans and JAX-WS +endpoints. Spring supports this pattern for Spring-managed objects as well. + +`@Resource` takes a name attribute. By default, Spring interprets that value as +the bean name to be injected. In other words, it follows by-name semantics, +as demonstrated in the following example: + +Java + +``` +public class SimpleMovieLister { + + private MovieFinder movieFinder; + + @Resource(name="myMovieFinder") (1) + public void setMovieFinder(MovieFinder movieFinder) { + this.movieFinder = movieFinder; + } +} +``` + +|**1**|This line injects a `@Resource`.| +|-----|--------------------------------| + +Kotlin + +``` +class SimpleMovieLister { + + @Resource(name="myMovieFinder") (1) + private lateinit var movieFinder:MovieFinder +} +``` + +|**1**|This line injects a `@Resource`.| +|-----|--------------------------------| + +If no name is explicitly specified, the default name is derived from the field name or +setter method. In case of a field, it takes the field name. In case of a setter method, +it takes the bean property name. The following example is going to have the bean +named `movieFinder` injected into its setter method: + +Java + +``` +public class SimpleMovieLister { + + private MovieFinder movieFinder; + + @Resource + public void setMovieFinder(MovieFinder movieFinder) { + this.movieFinder = movieFinder; + } +} +``` + +Kotlin + +``` +class SimpleMovieLister { + + @Resource + private lateinit var movieFinder: MovieFinder + +} +``` + +| |The name provided with the annotation is resolved as a bean name by the`ApplicationContext` of which the `CommonAnnotationBeanPostProcessor` is aware.
The names can be resolved through JNDI if you configure Spring’s[`SimpleJndiBeanFactory`](https://docs.spring.io/spring-framework/docs/5.3.16/javadoc-api/org/springframework/jndi/support/SimpleJndiBeanFactory.html)explicitly. However, we recommend that you rely on the default behavior and
use Spring’s JNDI lookup capabilities to preserve the level of indirection.| +|---|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +In the exclusive case of `@Resource` usage with no explicit name specified, and similar +to `@Autowired`, `@Resource` finds a primary type match instead of a specific named bean +and resolves well known resolvable dependencies: the `BeanFactory`,`ApplicationContext`, `ResourceLoader`, `ApplicationEventPublisher`, and `MessageSource`interfaces. + +Thus, in the following example, the `customerPreferenceDao` field first looks for a bean +named "customerPreferenceDao" and then falls back to a primary type match for the type`CustomerPreferenceDao`: + +Java + +``` +public class MovieRecommender { + + @Resource + private CustomerPreferenceDao customerPreferenceDao; + + @Resource + private ApplicationContext context; (1) + + public MovieRecommender() { + } + + // ... +} +``` + +|**1**|The `context` field is injected based on the known resolvable dependency type:`ApplicationContext`.| +|-----|---------------------------------------------------------------------------------------------------| + +Kotlin + +``` +class MovieRecommender { + + @Resource + private lateinit var customerPreferenceDao: CustomerPreferenceDao + + @Resource + private lateinit var context: ApplicationContext (1) + + // ... +} +``` + +|**1**|The `context` field is injected based on the known resolvable dependency type:`ApplicationContext`.| +|-----|---------------------------------------------------------------------------------------------------| + +#### 1.9.8. Using `@Value` + +`@Value` is typically used to inject externalized properties: + +Java + +``` +@Component +public class MovieRecommender { + + private final String catalog; + + public MovieRecommender(@Value("${catalog.name}") String catalog) { + this.catalog = catalog; + } +} +``` + +Kotlin + +``` +@Component +class MovieRecommender(@Value("\${catalog.name}") private val catalog: String) +``` + +With the following configuration: + +Java + +``` +@Configuration +@PropertySource("classpath:application.properties") +public class AppConfig { } +``` + +Kotlin + +``` +@Configuration +@PropertySource("classpath:application.properties") +class AppConfig +``` + +And the following `application.properties` file: + +``` +catalog.name=MovieCatalog +``` + +In that case, the `catalog` parameter and field will be equal to the `MovieCatalog` value. + +A default lenient embedded value resolver is provided by Spring. It will try to resolve the +property value and if it cannot be resolved, the property name (for example `${catalog.name}`) +will be injected as the value. If you want to maintain strict control over nonexistent +values, you should declare a `PropertySourcesPlaceholderConfigurer` bean, as the following +example shows: + +Java + +``` +@Configuration +public class AppConfig { + + @Bean + public static PropertySourcesPlaceholderConfigurer propertyPlaceholderConfigurer() { + return new PropertySourcesPlaceholderConfigurer(); + } +} +``` + +Kotlin + +``` +@Configuration +class AppConfig { + + @Bean + fun propertyPlaceholderConfigurer() = PropertySourcesPlaceholderConfigurer() +} +``` + +| |When configuring a `PropertySourcesPlaceholderConfigurer` using JavaConfig, the`@Bean` method must be `static`.| +|---|---------------------------------------------------------------------------------------------------------------| + +Using the above configuration ensures Spring initialization failure if any `${}`placeholder could not be resolved. It is also possible to use methods like`setPlaceholderPrefix`, `setPlaceholderSuffix`, or `setValueSeparator` to customize +placeholders. + +| |Spring Boot configures by default a `PropertySourcesPlaceholderConfigurer` bean that
will get properties from `application.properties` and `application.yml` files.| +|---|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +Built-in converter support provided by Spring allows simple type conversion (to `Integer`or `int` for example) to be automatically handled. Multiple comma-separated values can be +automatically converted to `String` array without extra effort. + +It is possible to provide a default value as following: + +Java + +``` +@Component +public class MovieRecommender { + + private final String catalog; + + public MovieRecommender(@Value("${catalog.name:defaultCatalog}") String catalog) { + this.catalog = catalog; + } +} +``` + +Kotlin + +``` +@Component +class MovieRecommender(@Value("\${catalog.name:defaultCatalog}") private val catalog: String) +``` + +A Spring `BeanPostProcessor` uses a `ConversionService` behind the scenes to handle the +process for converting the `String` value in `@Value` to the target type. If you want to +provide conversion support for your own custom type, you can provide your own`ConversionService` bean instance as the following example shows: + +Java + +``` +@Configuration +public class AppConfig { + + @Bean + public ConversionService conversionService() { + DefaultFormattingConversionService conversionService = new DefaultFormattingConversionService(); + conversionService.addConverter(new MyCustomConverter()); + return conversionService; + } +} +``` + +Kotlin + +``` +@Configuration +class AppConfig { + + @Bean + fun conversionService(): ConversionService { + return DefaultFormattingConversionService().apply { + addConverter(MyCustomConverter()) + } + } +} +``` + +When `@Value` contains a [`SpEL` expression](#expressions) the value will be dynamically +computed at runtime as the following example shows: + +Java + +``` +@Component +public class MovieRecommender { + + private final String catalog; + + public MovieRecommender(@Value("#{systemProperties['user.catalog'] + 'Catalog' }") String catalog) { + this.catalog = catalog; + } +} +``` + +Kotlin + +``` +@Component +class MovieRecommender( + @Value("#{systemProperties['user.catalog'] + 'Catalog' }") private val catalog: String) +``` + +SpEL also enables the use of more complex data structures: + +Java + +``` +@Component +public class MovieRecommender { + + private final Map countOfMoviesPerCatalog; + + public MovieRecommender( + @Value("#{{'Thriller': 100, 'Comedy': 300}}") Map countOfMoviesPerCatalog) { + this.countOfMoviesPerCatalog = countOfMoviesPerCatalog; + } +} +``` + +Kotlin + +``` +@Component +class MovieRecommender( + @Value("#{{'Thriller': 100, 'Comedy': 300}}") private val countOfMoviesPerCatalog: Map) +``` + +#### 1.9.9. Using `@PostConstruct` and `@PreDestroy` + +The `CommonAnnotationBeanPostProcessor` not only recognizes the `@Resource` annotation +but also the JSR-250 lifecycle annotations: `javax.annotation.PostConstruct` and`javax.annotation.PreDestroy`. Introduced in Spring 2.5, the support for these +annotations offers an alternative to the lifecycle callback mechanism described in[initialization callbacks](#beans-factory-lifecycle-initializingbean) and[destruction callbacks](#beans-factory-lifecycle-disposablebean). Provided that the`CommonAnnotationBeanPostProcessor` is registered within the Spring `ApplicationContext`, +a method carrying one of these annotations is invoked at the same point in the lifecycle +as the corresponding Spring lifecycle interface method or explicitly declared callback +method. In the following example, the cache is pre-populated upon initialization and +cleared upon destruction: + +Java + +``` +public class CachingMovieLister { + + @PostConstruct + public void populateMovieCache() { + // populates the movie cache upon initialization... + } + + @PreDestroy + public void clearMovieCache() { + // clears the movie cache upon destruction... + } +} +``` + +Kotlin + +``` +class CachingMovieLister { + + @PostConstruct + fun populateMovieCache() { + // populates the movie cache upon initialization... + } + + @PreDestroy + fun clearMovieCache() { + // clears the movie cache upon destruction... + } +} +``` + +For details about the effects of combining various lifecycle mechanisms, see[Combining Lifecycle Mechanisms](#beans-factory-lifecycle-combined-effects). + +| |Like `@Resource`, the `@PostConstruct` and `@PreDestroy` annotation types were a part
of the standard Java libraries from JDK 6 to 8. However, the entire `javax.annotation`package got separated from the core Java modules in JDK 9 and eventually removed in
JDK 11. If needed, the `javax.annotation-api` artifact needs to be obtained via Maven
Central now, simply to be added to the application’s classpath like any other library.| +|---|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +### 1.10. Classpath Scanning and Managed Components + +Most examples in this chapter use XML to specify the configuration metadata that produces +each `BeanDefinition` within the Spring container. The previous section +([Annotation-based Container Configuration](#beans-annotation-config)) demonstrates how to provide a lot of the configuration +metadata through source-level annotations. Even in those examples, however, the “base” +bean definitions are explicitly defined in the XML file, while the annotations drive only +the dependency injection. This section describes an option for implicitly detecting the +candidate components by scanning the classpath. Candidate components are classes that +match against a filter criteria and have a corresponding bean definition registered with +the container. This removes the need to use XML to perform bean registration. Instead, you +can use annotations (for example, `@Component`), AspectJ type expressions, or your own +custom filter criteria to select which classes have bean definitions registered with +the container. + +| |Starting with Spring 3.0, many features provided by the Spring JavaConfig project are
part of the core Spring Framework. This allows you to define beans using Java rather
than using the traditional XML files. Take a look at the `@Configuration`, `@Bean`,`@Import`, and `@DependsOn` annotations for examples of how to use these new features.| +|---|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +#### 1.10.1. `@Component` and Further Stereotype Annotations + +The `@Repository` annotation is a marker for any class that fulfills the role or +stereotype of a repository (also known as Data Access Object or DAO). Among the uses +of this marker is the automatic translation of exceptions, as described in[Exception Translation](data-access.html#orm-exception-translation). + +Spring provides further stereotype annotations: `@Component`, `@Service`, and`@Controller`. `@Component` is a generic stereotype for any Spring-managed component.`@Repository`, `@Service`, and `@Controller` are specializations of `@Component` for +more specific use cases (in the persistence, service, and presentation +layers, respectively). Therefore, you can annotate your component classes with`@Component`, but, by annotating them with `@Repository`, `@Service`, or `@Controller`instead, your classes are more properly suited for processing by tools or associating +with aspects. For example, these stereotype annotations make ideal targets for +pointcuts. `@Repository`, `@Service`, and `@Controller` can also +carry additional semantics in future releases of the Spring Framework. Thus, if you are +choosing between using `@Component` or `@Service` for your service layer, `@Service` is +clearly the better choice. Similarly, as stated earlier, `@Repository` is already +supported as a marker for automatic exception translation in your persistence layer. + +#### 1.10.2. Using Meta-annotations and Composed Annotations + +Many of the annotations provided by Spring can be used as meta-annotations in your +own code. A meta-annotation is an annotation that can be applied to another annotation. +For example, the `@Service` annotation mentioned [earlier](#beans-stereotype-annotations)is meta-annotated with `@Component`, as the following example shows: + +Java + +``` +@Target(ElementType.TYPE) +@Retention(RetentionPolicy.RUNTIME) +@Documented +@Component (1) +public @interface Service { + + // ... +} +``` + +|**1**|The `@Component` causes `@Service` to be treated in the same way as `@Component`.| +|-----|---------------------------------------------------------------------------------| + +Kotlin + +``` +@Target(AnnotationTarget.TYPE) +@Retention(AnnotationRetention.RUNTIME) +@MustBeDocumented +@Component (1) +annotation class Service { + + // ... +} +``` + +|**1**|The `@Component` causes `@Service` to be treated in the same way as `@Component`.| +|-----|---------------------------------------------------------------------------------| + +You can also combine meta-annotations to create “composed annotations”. For example, +the `@RestController` annotation from Spring MVC is composed of `@Controller` and`@ResponseBody`. + +In addition, composed annotations can optionally redeclare attributes from +meta-annotations to allow customization. This can be particularly useful when you +want to only expose a subset of the meta-annotation’s attributes. For example, Spring’s`@SessionScope` annotation hardcodes the scope name to `session` but still allows +customization of the `proxyMode`. The following listing shows the definition of the`SessionScope` annotation: + +Java + +``` +@Target({ElementType.TYPE, ElementType.METHOD}) +@Retention(RetentionPolicy.RUNTIME) +@Documented +@Scope(WebApplicationContext.SCOPE_SESSION) +public @interface SessionScope { + + /** + * Alias for {@link Scope#proxyMode}. + *

Defaults to {@link ScopedProxyMode#TARGET_CLASS}. + */ + @AliasFor(annotation = Scope.class) + ScopedProxyMode proxyMode() default ScopedProxyMode.TARGET_CLASS; + +} +``` + +Kotlin + +``` +@Target(AnnotationTarget.TYPE, AnnotationTarget.FUNCTION) +@Retention(AnnotationRetention.RUNTIME) +@MustBeDocumented +@Scope(WebApplicationContext.SCOPE_SESSION) +annotation class SessionScope( + @get:AliasFor(annotation = Scope::class) + val proxyMode: ScopedProxyMode = ScopedProxyMode.TARGET_CLASS +) +``` + +You can then use `@SessionScope` without declaring the `proxyMode` as follows: + +Java + +``` +@Service +@SessionScope +public class SessionScopedService { + // ... +} +``` + +Kotlin + +``` +@Service +@SessionScope +class SessionScopedService { + // ... +} +``` + +You can also override the value for the `proxyMode`, as the following example shows: + +Java + +``` +@Service +@SessionScope(proxyMode = ScopedProxyMode.INTERFACES) +public class SessionScopedUserService implements UserService { + // ... +} +``` + +Kotlin + +``` +@Service +@SessionScope(proxyMode = ScopedProxyMode.INTERFACES) +class SessionScopedUserService : UserService { + // ... +} +``` + +For further details, see the[Spring Annotation Programming Model](https://github.com/spring-projects/spring-framework/wiki/Spring-Annotation-Programming-Model)wiki page. + +#### 1.10.3. Automatically Detecting Classes and Registering Bean Definitions + +Spring can automatically detect stereotyped classes and register corresponding`BeanDefinition` instances with the `ApplicationContext`. For example, the following two classes +are eligible for such autodetection: + +Java + +``` +@Service +public class SimpleMovieLister { + + private MovieFinder movieFinder; + + public SimpleMovieLister(MovieFinder movieFinder) { + this.movieFinder = movieFinder; + } +} +``` + +Kotlin + +``` +@Service +class SimpleMovieLister(private val movieFinder: MovieFinder) +``` + +Java + +``` +@Repository +public class JpaMovieFinder implements MovieFinder { + // implementation elided for clarity +} +``` + +Kotlin + +``` +@Repository +class JpaMovieFinder : MovieFinder { + // implementation elided for clarity +} +``` + +To autodetect these classes and register the corresponding beans, you need to add`@ComponentScan` to your `@Configuration` class, where the `basePackages` attribute +is a common parent package for the two classes. (Alternatively, you can specify a +comma- or semicolon- or space-separated list that includes the parent package of each class.) + +Java + +``` +@Configuration +@ComponentScan(basePackages = "org.example") +public class AppConfig { + // ... +} +``` + +Kotlin + +``` +@Configuration +@ComponentScan(basePackages = ["org.example"]) +class AppConfig { + // ... +} +``` + +| |For brevity, the preceding example could have used the `value` attribute of the
annotation (that is, `@ComponentScan("org.example")`).| +|---|------------------------------------------------------------------------------------------------------------------------------------------| + +The following alternative uses XML: + +``` + + + + + + +``` + +| |The use of `` implicitly enables the functionality of``. There is usually no need to include the`` element when using ``.| +|---|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +| |The scanning of classpath packages requires the presence of corresponding directory
entries in the classpath. When you build JARs with Ant, make sure that you do not
activate the files-only switch of the JAR task. Also, classpath directories may not be
exposed based on security policies in some environments — for example, standalone apps on
JDK 1.7.0\_45 and higher (which requires 'Trusted-Library' setup in your manifests — see[https://stackoverflow.com/questions/19394570/java-jre-7u45-breaks-classloader-getresources](https://stackoverflow.com/questions/19394570/java-jre-7u45-breaks-classloader-getresources)).

On JDK 9’s module path (Jigsaw), Spring’s classpath scanning generally works as expected.
However, make sure that your component classes are exported in your `module-info`descriptors. If you expect Spring to invoke non-public members of your classes, make
sure that they are 'opened' (that is, that they use an `opens` declaration instead of an`exports` declaration in your `module-info` descriptor).| +|---|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +Furthermore, the `AutowiredAnnotationBeanPostProcessor` and`CommonAnnotationBeanPostProcessor` are both implicitly included when you use the +component-scan element. That means that the two components are autodetected and +wired together — all without any bean configuration metadata provided in XML. + +| |You can disable the registration of `AutowiredAnnotationBeanPostProcessor` and`CommonAnnotationBeanPostProcessor` by including the `annotation-config` attribute
with a value of `false`.| +|---|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +#### 1.10.4. Using Filters to Customize Scanning + +By default, classes annotated with `@Component`, `@Repository`, `@Service`, `@Controller`,`@Configuration`, or a custom annotation that itself is annotated with `@Component` are +the only detected candidate components. However, you can modify and extend this behavior +by applying custom filters. Add them as `includeFilters` or `excludeFilters` attributes of +the `@ComponentScan` annotation (or as `` or`` child elements of the `` element in +XML configuration). Each filter element requires the `type` and `expression` attributes. +The following table describes the filtering options: + +| Filter Type | Example Expression | Description | +|--------------------|----------------------------|------------------------------------------------------------------------------------------| +|annotation (default)|`org.example.SomeAnnotation`| An annotation to be *present* or *meta-present* at the type level in target components. | +| assignable | `org.example.SomeClass` |A class (or interface) that the target components are assignable to (extend or implement).| +| aspectj | `org.example..*Service+` | An AspectJ type expression to be matched by the target components. | +| regex | `org\.example\.Default.*` | A regex expression to be matched by the target components' class names. | +| custom | `org.example.MyTypeFilter` | A custom implementation of the `org.springframework.core.type.TypeFilter` interface. | + +The following example shows the configuration ignoring all `@Repository` annotations +and using “stub” repositories instead: + +Java + +``` +@Configuration +@ComponentScan(basePackages = "org.example", + includeFilters = @Filter(type = FilterType.REGEX, pattern = ".*Stub.*Repository"), + excludeFilters = @Filter(Repository.class)) +public class AppConfig { + // ... +} +``` + +Kotlin + +``` +@Configuration +@ComponentScan(basePackages = "org.example", + includeFilters = [Filter(type = FilterType.REGEX, pattern = [".*Stub.*Repository"])], + excludeFilters = [Filter(Repository::class)]) +class AppConfig { + // ... +} +``` + +The following listing shows the equivalent XML: + +``` + + + + + + +``` + +| |You can also disable the default filters by setting `useDefaultFilters=false` on the
annotation or by providing `use-default-filters="false"` as an attribute of the`` element. This effectively disables automatic detection of classes
annotated or meta-annotated with `@Component`, `@Repository`, `@Service`, `@Controller`,`@RestController`, or `@Configuration`.| +|---|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +#### 1.10.5. Defining Bean Metadata within Components + +Spring components can also contribute bean definition metadata to the container. You can do +this with the same `@Bean` annotation used to define bean metadata within `@Configuration`annotated classes. The following example shows how to do so: + +Java + +``` +@Component +public class FactoryMethodComponent { + + @Bean + @Qualifier("public") + public TestBean publicInstance() { + return new TestBean("publicInstance"); + } + + public void doWork() { + // Component method implementation omitted + } +} +``` + +Kotlin + +``` +@Component +class FactoryMethodComponent { + + @Bean + @Qualifier("public") + fun publicInstance() = TestBean("publicInstance") + + fun doWork() { + // Component method implementation omitted + } +} +``` + +The preceding class is a Spring component that has application-specific code in its`doWork()` method. However, it also contributes a bean definition that has a factory +method referring to the method `publicInstance()`. The `@Bean` annotation identifies the +factory method and other bean definition properties, such as a qualifier value through +the `@Qualifier` annotation. Other method-level annotations that can be specified are`@Scope`, `@Lazy`, and custom qualifier annotations. + +| |In addition to its role for component initialization, you can also place the `@Lazy`annotation on injection points marked with `@Autowired` or `@Inject`. In this context,
it leads to the injection of a lazy-resolution proxy. However, such a proxy approach
is rather limited. For sophisticated lazy interactions, in particular in combination
with optional dependencies, we recommend `ObjectProvider` instead.| +|---|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +Autowired fields and methods are supported, as previously discussed, with additional +support for autowiring of `@Bean` methods. The following example shows how to do so: + +Java + +``` +@Component +public class FactoryMethodComponent { + + private static int i; + + @Bean + @Qualifier("public") + public TestBean publicInstance() { + return new TestBean("publicInstance"); + } + + // use of a custom qualifier and autowiring of method parameters + @Bean + protected TestBean protectedInstance( + @Qualifier("public") TestBean spouse, + @Value("#{privateInstance.age}") String country) { + TestBean tb = new TestBean("protectedInstance", 1); + tb.setSpouse(spouse); + tb.setCountry(country); + return tb; + } + + @Bean + private TestBean privateInstance() { + return new TestBean("privateInstance", i++); + } + + @Bean + @RequestScope + public TestBean requestScopedInstance() { + return new TestBean("requestScopedInstance", 3); + } +} +``` + +Kotlin + +``` +@Component +class FactoryMethodComponent { + + companion object { + private var i: Int = 0 + } + + @Bean + @Qualifier("public") + fun publicInstance() = TestBean("publicInstance") + + // use of a custom qualifier and autowiring of method parameters + @Bean + protected fun protectedInstance( + @Qualifier("public") spouse: TestBean, + @Value("#{privateInstance.age}") country: String) = TestBean("protectedInstance", 1).apply { + this.spouse = spouse + this.country = country + } + + @Bean + private fun privateInstance() = TestBean("privateInstance", i++) + + @Bean + @RequestScope + fun requestScopedInstance() = TestBean("requestScopedInstance", 3) +} +``` + +The example autowires the `String` method parameter `country` to the value of the `age`property on another bean named `privateInstance`. A Spring Expression Language element +defines the value of the property through the notation `#{ }`. For `@Value`annotations, an expression resolver is preconfigured to look for bean names when +resolving expression text. + +As of Spring Framework 4.3, you may also declare a factory method parameter of type`InjectionPoint` (or its more specific subclass: `DependencyDescriptor`) to +access the requesting injection point that triggers the creation of the current bean. +Note that this applies only to the actual creation of bean instances, not to the +injection of existing instances. As a consequence, this feature makes most sense for +beans of prototype scope. For other scopes, the factory method only ever sees the +injection point that triggered the creation of a new bean instance in the given scope +(for example, the dependency that triggered the creation of a lazy singleton bean). +You can use the provided injection point metadata with semantic care in such scenarios. +The following example shows how to use `InjectionPoint`: + +Java + +``` +@Component +public class FactoryMethodComponent { + + @Bean @Scope("prototype") + public TestBean prototypeInstance(InjectionPoint injectionPoint) { + return new TestBean("prototypeInstance for " + injectionPoint.getMember()); + } +} +``` + +Kotlin + +``` +@Component +class FactoryMethodComponent { + + @Bean + @Scope("prototype") + fun prototypeInstance(injectionPoint: InjectionPoint) = + TestBean("prototypeInstance for ${injectionPoint.member}") +} +``` + +The `@Bean` methods in a regular Spring component are processed differently than their +counterparts inside a Spring `@Configuration` class. The difference is that `@Component`classes are not enhanced with CGLIB to intercept the invocation of methods and fields. +CGLIB proxying is the means by which invoking methods or fields within `@Bean` methods +in `@Configuration` classes creates bean metadata references to collaborating objects. +Such methods are not invoked with normal Java semantics but rather go through the +container in order to provide the usual lifecycle management and proxying of Spring +beans, even when referring to other beans through programmatic calls to `@Bean` methods. +In contrast, invoking a method or field in a `@Bean` method within a plain `@Component`class has standard Java semantics, with no special CGLIB processing or other +constraints applying. + +| |You may declare `@Bean` methods as `static`, allowing for them to be called without
creating their containing configuration class as an instance. This makes particular
sense when defining post-processor beans (for example, of type `BeanFactoryPostProcessor`or `BeanPostProcessor`), since such beans get initialized early in the container
lifecycle and should avoid triggering other parts of the configuration at that point.

Calls to static `@Bean` methods never get intercepted by the container, not even within`@Configuration` classes (as described earlier in this section), due to technical
limitations: CGLIB subclassing can override only non-static methods. As a consequence,
a direct call to another `@Bean` method has standard Java semantics, resulting
in an independent instance being returned straight from the factory method itself.

The Java language visibility of `@Bean` methods does not have an immediate impact on
the resulting bean definition in Spring’s container. You can freely declare your
factory methods as you see fit in non-`@Configuration` classes and also for static
methods anywhere. However, regular `@Bean` methods in `@Configuration` classes need
to be overridable — that is, they must not be declared as `private` or `final`.

`@Bean` methods are also discovered on base classes of a given component or
configuration class, as well as on Java 8 default methods declared in interfaces
implemented by the component or configuration class. This allows for a lot of
flexibility in composing complex configuration arrangements, with even multiple
inheritance being possible through Java 8 default methods as of Spring 4.2.

Finally, a single class may hold multiple `@Bean` methods for the same
bean, as an arrangement of multiple factory methods to use depending on available
dependencies at runtime. This is the same algorithm as for choosing the “greediest”
constructor or factory method in other configuration scenarios: The variant with
the largest number of satisfiable dependencies is picked at construction time,
analogous to how the container selects between multiple `@Autowired` constructors.| +|---|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +#### 1.10.6. Naming Autodetected Components + +When a component is autodetected as part of the scanning process, its bean name is +generated by the `BeanNameGenerator` strategy known to that scanner. By default, any +Spring stereotype annotation (`@Component`, `@Repository`, `@Service`, and`@Controller`) that contains a name `value` thereby provides that name to the +corresponding bean definition. + +If such an annotation contains no name `value` or for any other detected component +(such as those discovered by custom filters), the default bean name generator returns +the uncapitalized non-qualified class name. For example, if the following component +classes were detected, the names would be `myMovieLister` and `movieFinderImpl`: + +Java + +``` +@Service("myMovieLister") +public class SimpleMovieLister { + // ... +} +``` + +Kotlin + +``` +@Service("myMovieLister") +class SimpleMovieLister { + // ... +} +``` + +Java + +``` +@Repository +public class MovieFinderImpl implements MovieFinder { + // ... +} +``` + +Kotlin + +``` +@Repository +class MovieFinderImpl : MovieFinder { + // ... +} +``` + +If you do not want to rely on the default bean-naming strategy, you can provide a custom +bean-naming strategy. First, implement the[`BeanNameGenerator`](https://docs.spring.io/spring-framework/docs/5.3.16/javadoc-api/org/springframework/beans/factory/support/BeanNameGenerator.html)interface, and be sure to include a default no-arg constructor. Then, provide the fully +qualified class name when configuring the scanner, as the following example annotation +and bean definition show. + +| |If you run into naming conflicts due to multiple autodetected components having the
same non-qualified class name (i.e., classes with identical names but residing in
different packages), you may need to configure a `BeanNameGenerator` that defaults to the
fully qualified class name for the generated bean name. As of Spring Framework 5.2.3, the`FullyQualifiedAnnotationBeanNameGenerator` located in package`org.springframework.context.annotation` can be used for such purposes.| +|---|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +Java + +``` +@Configuration +@ComponentScan(basePackages = "org.example", nameGenerator = MyNameGenerator.class) +public class AppConfig { + // ... +} +``` + +Kotlin + +``` +@Configuration +@ComponentScan(basePackages = ["org.example"], nameGenerator = MyNameGenerator::class) +class AppConfig { + // ... +} +``` + +``` + + + +``` + +As a general rule, consider specifying the name with the annotation whenever other +components may be making explicit references to it. On the other hand, the +auto-generated names are adequate whenever the container is responsible for wiring. + +#### 1.10.7. Providing a Scope for Autodetected Components + +As with Spring-managed components in general, the default and most common scope for +autodetected components is `singleton`. However, sometimes you need a different scope +that can be specified by the `@Scope` annotation. You can provide the name of the +scope within the annotation, as the following example shows: + +Java + +``` +@Scope("prototype") +@Repository +public class MovieFinderImpl implements MovieFinder { + // ... +} +``` + +Kotlin + +``` +@Scope("prototype") +@Repository +class MovieFinderImpl : MovieFinder { + // ... +} +``` + +| |`@Scope` annotations are only introspected on the concrete bean class (for annotated
components) or the factory method (for `@Bean` methods). In contrast to XML bean
definitions, there is no notion of bean definition inheritance, and inheritance
hierarchies at the class level are irrelevant for metadata purposes.| +|---|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +For details on web-specific scopes such as “request” or “session” in a Spring context, +see [Request, Session, Application, and WebSocket Scopes](#beans-factory-scopes-other). As with the pre-built annotations for those scopes, +you may also compose your own scoping annotations by using Spring’s meta-annotation +approach: for example, a custom annotation meta-annotated with `@Scope("prototype")`, +possibly also declaring a custom scoped-proxy mode. + +| |To provide a custom strategy for scope resolution rather than relying on the
annotation-based approach, you can implement the[`ScopeMetadataResolver`](https://docs.spring.io/spring-framework/docs/5.3.16/javadoc-api/org/springframework/context/annotation/ScopeMetadataResolver.html)interface. Be sure to include a default no-arg constructor. Then you can provide the
fully qualified class name when configuring the scanner, as the following example of both
an annotation and a bean definition shows:| +|---|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +Java + +``` +@Configuration +@ComponentScan(basePackages = "org.example", scopeResolver = MyScopeResolver.class) +public class AppConfig { + // ... +} +``` + +Kotlin + +``` +@Configuration +@ComponentScan(basePackages = ["org.example"], scopeResolver = MyScopeResolver::class) +class AppConfig { + // ... +} +``` + +``` + + + +``` + +When using certain non-singleton scopes, it may be necessary to generate proxies for the +scoped objects. The reasoning is described in [Scoped Beans as Dependencies](#beans-factory-scopes-other-injection). +For this purpose, a scoped-proxy attribute is available on the component-scan +element. The three possible values are: `no`, `interfaces`, and `targetClass`. For example, +the following configuration results in standard JDK dynamic proxies: + +Java + +``` +@Configuration +@ComponentScan(basePackages = "org.example", scopedProxy = ScopedProxyMode.INTERFACES) +public class AppConfig { + // ... +} +``` + +Kotlin + +``` +@Configuration +@ComponentScan(basePackages = ["org.example"], scopedProxy = ScopedProxyMode.INTERFACES) +class AppConfig { + // ... +} +``` + +``` + + + +``` + +#### 1.10.8. Providing Qualifier Metadata with Annotations + +The `@Qualifier` annotation is discussed in [Fine-tuning Annotation-based Autowiring with Qualifiers](#beans-autowired-annotation-qualifiers). +The examples in that section demonstrate the use of the `@Qualifier` annotation and +custom qualifier annotations to provide fine-grained control when you resolve autowire +candidates. Because those examples were based on XML bean definitions, the qualifier +metadata was provided on the candidate bean definitions by using the `qualifier` or `meta`child elements of the `bean` element in the XML. When relying upon classpath scanning for +auto-detection of components, you can provide the qualifier metadata with type-level +annotations on the candidate class. The following three examples demonstrate this +technique: + +Java + +``` +@Component +@Qualifier("Action") +public class ActionMovieCatalog implements MovieCatalog { + // ... +} +``` + +Kotlin + +``` +@Component +@Qualifier("Action") +class ActionMovieCatalog : MovieCatalog +``` + +Java + +``` +@Component +@Genre("Action") +public class ActionMovieCatalog implements MovieCatalog { + // ... +} +``` + +Kotlin + +``` +@Component +@Genre("Action") +class ActionMovieCatalog : MovieCatalog { + // ... +} +``` + +Java + +``` +@Component +@Offline +public class CachingMovieCatalog implements MovieCatalog { + // ... +} +``` + +Kotlin + +``` +@Component +@Offline +class CachingMovieCatalog : MovieCatalog { + // ... +} +``` + +| |As with most annotation-based alternatives, keep in mind that the annotation metadata is
bound to the class definition itself, while the use of XML allows for multiple beans
of the same type to provide variations in their qualifier metadata, because that
metadata is provided per-instance rather than per-class.| +|---|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +#### 1.10.9. Generating an Index of Candidate Components + +While classpath scanning is very fast, it is possible to improve the startup performance +of large applications by creating a static list of candidates at compilation time. In this +mode, all modules that are targets of component scanning must use this mechanism. + +| |Your existing `@ComponentScan` or `` directives must remain
unchanged to request the context to scan candidates in certain packages. When the`ApplicationContext` detects such an index, it automatically uses it rather than scanning
the classpath.| +|---|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +To generate the index, add an additional dependency to each module that contains +components that are targets for component scan directives. The following example shows +how to do so with Maven: + +``` + + + org.springframework + spring-context-indexer + 5.3.16 + true + + +``` + +With Gradle 4.5 and earlier, the dependency should be declared in the `compileOnly`configuration, as shown in the following example: + +``` +dependencies { + compileOnly "org.springframework:spring-context-indexer:5.3.16" +} +``` + +With Gradle 4.6 and later, the dependency should be declared in the `annotationProcessor`configuration, as shown in the following example: + +``` +dependencies { + annotationProcessor "org.springframework:spring-context-indexer:5.3.16" +} +``` + +The `spring-context-indexer` artifact generates a `META-INF/spring.components` file that +is included in the jar file. + +| |When working with this mode in your IDE, the `spring-context-indexer` must be
registered as an annotation processor to make sure the index is up-to-date when
candidate components are updated.| +|---|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +| |The index is enabled automatically when a `META-INF/spring.components` file is found
on the classpath. If an index is partially available for some libraries (or use cases)
but could not be built for the whole application, you can fall back to a regular classpath
arrangement (as though no index were present at all) by setting `spring.index.ignore` to`true`, either as a JVM system property or via the[`SpringProperties`](appendix.html#appendix-spring-properties) mechanism.| +|---|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +### 1.11. Using JSR 330 Standard Annotations + +Starting with Spring 3.0, Spring offers support for JSR-330 standard annotations +(Dependency Injection). Those annotations are scanned in the same way as the Spring +annotations. To use them, you need to have the relevant jars in your classpath. + +| |If you use Maven, the `javax.inject` artifact is available in the standard Maven
repository ([https://repo1.maven.org/maven2/javax/inject/javax.inject/1/](https://repo1.maven.org/maven2/javax/inject/javax.inject/1/)).
You can add the following dependency to your file pom.xml:

```

javax.inject
javax.inject
1

```| +|---|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +#### 1.11.1. Dependency Injection with `@Inject` and `@Named` + +Instead of `@Autowired`, you can use `@javax.inject.Inject` as follows: + +Java + +``` +import javax.inject.Inject; + +public class SimpleMovieLister { + + private MovieFinder movieFinder; + + @Inject + public void setMovieFinder(MovieFinder movieFinder) { + this.movieFinder = movieFinder; + } + + public void listMovies() { + this.movieFinder.findMovies(...); + // ... + } +} +``` + +Kotlin + +``` +import javax.inject.Inject + +class SimpleMovieLister { + + @Inject + lateinit var movieFinder: MovieFinder + + fun listMovies() { + movieFinder.findMovies(...) + // ... + } +} +``` + +As with `@Autowired`, you can use `@Inject` at the field level, method level +and constructor-argument level. Furthermore, you may declare your injection point as a`Provider`, allowing for on-demand access to beans of shorter scopes or lazy access to +other beans through a `Provider.get()` call. The following example offers a variant of the +preceding example: + +Java + +``` +import javax.inject.Inject; +import javax.inject.Provider; + +public class SimpleMovieLister { + + private Provider movieFinder; + + @Inject + public void setMovieFinder(Provider movieFinder) { + this.movieFinder = movieFinder; + } + + public void listMovies() { + this.movieFinder.get().findMovies(...); + // ... + } +} +``` + +Kotlin + +``` +import javax.inject.Inject + +class SimpleMovieLister { + + @Inject + lateinit var movieFinder: MovieFinder + + fun listMovies() { + movieFinder.findMovies(...) + // ... + } +} +``` + +If you would like to use a qualified name for the dependency that should be injected, +you should use the `@Named` annotation, as the following example shows: + +Java + +``` +import javax.inject.Inject; +import javax.inject.Named; + +public class SimpleMovieLister { + + private MovieFinder movieFinder; + + @Inject + public void setMovieFinder(@Named("main") MovieFinder movieFinder) { + this.movieFinder = movieFinder; + } + + // ... +} +``` + +Kotlin + +``` +import javax.inject.Inject +import javax.inject.Named + +class SimpleMovieLister { + + private lateinit var movieFinder: MovieFinder + + @Inject + fun setMovieFinder(@Named("main") movieFinder: MovieFinder) { + this.movieFinder = movieFinder + } + + // ... +} +``` + +As with `@Autowired`, `@Inject` can also be used with `java.util.Optional` or`@Nullable`. This is even more applicable here, since `@Inject` does not have +a `required` attribute. The following pair of examples show how to use `@Inject` and`@Nullable`: + +``` +public class SimpleMovieLister { + + @Inject + public void setMovieFinder(Optional movieFinder) { + // ... + } +} +``` + +Java + +``` +public class SimpleMovieLister { + + @Inject + public void setMovieFinder(@Nullable MovieFinder movieFinder) { + // ... + } +} +``` + +Kotlin + +``` +class SimpleMovieLister { + + @Inject + var movieFinder: MovieFinder? = null +} +``` + +#### 1.11.2. `@Named` and `@ManagedBean`: Standard Equivalents to the `@Component` Annotation + +Instead of `@Component`, you can use `@javax.inject.Named` or `javax.annotation.ManagedBean`, +as the following example shows: + +Java + +``` +import javax.inject.Inject; +import javax.inject.Named; + +@Named("movieListener") // @ManagedBean("movieListener") could be used as well +public class SimpleMovieLister { + + private MovieFinder movieFinder; + + @Inject + public void setMovieFinder(MovieFinder movieFinder) { + this.movieFinder = movieFinder; + } + + // ... +} +``` + +Kotlin + +``` +import javax.inject.Inject +import javax.inject.Named + +@Named("movieListener") // @ManagedBean("movieListener") could be used as well +class SimpleMovieLister { + + @Inject + lateinit var movieFinder: MovieFinder + + // ... +} +``` + +It is very common to use `@Component` without specifying a name for the component.`@Named` can be used in a similar fashion, as the following example shows: + +Java + +``` +import javax.inject.Inject; +import javax.inject.Named; + +@Named +public class SimpleMovieLister { + + private MovieFinder movieFinder; + + @Inject + public void setMovieFinder(MovieFinder movieFinder) { + this.movieFinder = movieFinder; + } + + // ... +} +``` + +Kotlin + +``` +import javax.inject.Inject +import javax.inject.Named + +@Named +class SimpleMovieLister { + + @Inject + lateinit var movieFinder: MovieFinder + + // ... +} +``` + +When you use `@Named` or `@ManagedBean`, you can use component scanning in the +exact same way as when you use Spring annotations, as the following example shows: + +Java + +``` +@Configuration +@ComponentScan(basePackages = "org.example") +public class AppConfig { + // ... +} +``` + +Kotlin + +``` +@Configuration +@ComponentScan(basePackages = ["org.example"]) +class AppConfig { + // ... +} +``` + +| |In contrast to `@Component`, the JSR-330 `@Named` and the JSR-250 `@ManagedBean`annotations are not composable. You should use Spring’s stereotype model for building
custom component annotations.| +|---|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +#### 1.11.3. Limitations of JSR-330 Standard Annotations + +When you work with standard annotations, you should know that some significant +features are not available, as the following table shows: + +| Spring | javax.inject.\* | javax.inject restrictions / comments | +|-------------------|---------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| @Autowired | @Inject | `@Inject` has no 'required' attribute. Can be used with Java 8’s `Optional` instead. | +| @Component |@Named / @ManagedBean| JSR-330 does not provide a composable model, only a way to identify named components. | +|@Scope("singleton")| @Singleton |The JSR-330 default scope is like Spring’s `prototype`. However, in order to keep it
consistent with Spring’s general defaults, a JSR-330 bean declared in the Spring
container is a `singleton` by default. In order to use a scope other than `singleton`,
you should use Spring’s `@Scope` annotation. `javax.inject` also provides a[@Scope](https://download.oracle.com/javaee/6/api/javax/inject/Scope.html) annotation.
Nevertheless, this one is only intended to be used for creating your own annotations.| +| @Qualifier | @Qualifier / @Named | `javax.inject.Qualifier` is just a meta-annotation for building custom qualifiers.
Concrete `String` qualifiers (like Spring’s `@Qualifier` with a value) can be associated
through `javax.inject.Named`. | +| @Value | \- | no equivalent | +| @Required | \- | no equivalent | +| @Lazy | \- | no equivalent | +| ObjectFactory | Provider | `javax.inject.Provider` is a direct alternative to Spring’s `ObjectFactory`,
only with a shorter `get()` method name. It can also be used in combination with
Spring’s `@Autowired` or with non-annotated constructors and setter methods. | + +### 1.12. Java-based Container Configuration + +This section covers how to use annotations in your Java code to configure the Spring +container. It includes the following topics: + +* [Basic Concepts: `@Bean` and `@Configuration`](#beans-java-basic-concepts) + +* [Instantiating the Spring Container by Using `AnnotationConfigApplicationContext`](#beans-java-instantiating-container) + +* [Using the `@Bean` Annotation](#beans-java-bean-annotation) + +* [Using the `@Configuration` annotation](#beans-java-configuration-annotation) + +* [Composing Java-based Configurations](#beans-java-composing-configuration-classes) + +* [Bean Definition Profiles](#beans-definition-profiles) + +* [`PropertySource` Abstraction](#beans-property-source-abstraction) + +* [Using `@PropertySource`](#beans-using-propertysource) + +* [Placeholder Resolution in Statements](#beans-placeholder-resolution-in-statements) + +#### 1.12.1. Basic Concepts: `@Bean` and `@Configuration` + +The central artifacts in Spring’s new Java-configuration support are`@Configuration`-annotated classes and `@Bean`-annotated methods. + +The `@Bean` annotation is used to indicate that a method instantiates, configures, and +initializes a new object to be managed by the Spring IoC container. For those familiar +with Spring’s `` XML configuration, the `@Bean` annotation plays the same role as +the `` element. You can use `@Bean`-annotated methods with any Spring`@Component`. However, they are most often used with `@Configuration` beans. + +Annotating a class with `@Configuration` indicates that its primary purpose is as a +source of bean definitions. Furthermore, `@Configuration` classes let inter-bean +dependencies be defined by calling other `@Bean` methods in the same class. +The simplest possible `@Configuration` class reads as follows: + +Java + +``` +@Configuration +public class AppConfig { + + @Bean + public MyService myService() { + return new MyServiceImpl(); + } +} +``` + +Kotlin + +``` +@Configuration +class AppConfig { + + @Bean + fun myService(): MyService { + return MyServiceImpl() + } +} +``` + +The preceding `AppConfig` class is equivalent to the following Spring `` XML: + +``` + + + +``` + +Full @Configuration vs “lite” @Bean mode? + +When `@Bean` methods are declared within classes that are not annotated with`@Configuration`, they are referred to as being processed in a “lite” mode. Bean methods +declared in a `@Component` or even in a plain old class are considered to be “lite”, +with a different primary purpose of the containing class and a `@Bean` method +being a sort of bonus there. For example, service components may expose management views +to the container through an additional `@Bean` method on each applicable component class. +In such scenarios, `@Bean` methods are a general-purpose factory method mechanism. + +Unlike full `@Configuration`, lite `@Bean` methods cannot declare inter-bean dependencies. +Instead, they operate on their containing component’s internal state and, optionally, on +arguments that they may declare. Such a `@Bean` method should therefore not invoke other`@Bean` methods. Each such method is literally only a factory method for a particular +bean reference, without any special runtime semantics. The positive side-effect here is +that no CGLIB subclassing has to be applied at runtime, so there are no limitations in +terms of class design (that is, the containing class may be `final` and so forth). + +In common scenarios, `@Bean` methods are to be declared within `@Configuration` classes, +ensuring that “full” mode is always used and that cross-method references therefore +get redirected to the container’s lifecycle management. This prevents the same`@Bean` method from accidentally being invoked through a regular Java call, which helps +to reduce subtle bugs that can be hard to track down when operating in “lite” mode. + +The `@Bean` and `@Configuration` annotations are discussed in depth in the following sections. +First, however, we cover the various ways of creating a spring container by using +Java-based configuration. + +#### 1.12.2. Instantiating the Spring Container by Using `AnnotationConfigApplicationContext` #### + +The following sections document Spring’s `AnnotationConfigApplicationContext`, introduced in Spring +3.0. This versatile `ApplicationContext` implementation is capable of accepting not only`@Configuration` classes as input but also plain `@Component` classes and classes +annotated with JSR-330 metadata. + +When `@Configuration` classes are provided as input, the `@Configuration` class itself +is registered as a bean definition and all declared `@Bean` methods within the class +are also registered as bean definitions. + +When `@Component` and JSR-330 classes are provided, they are registered as bean +definitions, and it is assumed that DI metadata such as `@Autowired` or `@Inject` are +used within those classes where necessary. + +##### Simple Construction + +In much the same way that Spring XML files are used as input when instantiating a`ClassPathXmlApplicationContext`, you can use `@Configuration` classes as input when +instantiating an `AnnotationConfigApplicationContext`. This allows for completely +XML-free usage of the Spring container, as the following example shows: + +Java + +``` +public static void main(String[] args) { + ApplicationContext ctx = new AnnotationConfigApplicationContext(AppConfig.class); + MyService myService = ctx.getBean(MyService.class); + myService.doStuff(); +} +``` + +Kotlin + +``` +import org.springframework.beans.factory.getBean + +fun main() { + val ctx = AnnotationConfigApplicationContext(AppConfig::class.java) + val myService = ctx.getBean() + myService.doStuff() +} +``` + +As mentioned earlier, `AnnotationConfigApplicationContext` is not limited to working only +with `@Configuration` classes. Any `@Component` or JSR-330 annotated class may be supplied +as input to the constructor, as the following example shows: + +Java + +``` +public static void main(String[] args) { + ApplicationContext ctx = new AnnotationConfigApplicationContext(MyServiceImpl.class, Dependency1.class, Dependency2.class); + MyService myService = ctx.getBean(MyService.class); + myService.doStuff(); +} +``` + +Kotlin + +``` +import org.springframework.beans.factory.getBean + +fun main() { + val ctx = AnnotationConfigApplicationContext(MyServiceImpl::class.java, Dependency1::class.java, Dependency2::class.java) + val myService = ctx.getBean() + myService.doStuff() +} +``` + +The preceding example assumes that `MyServiceImpl`, `Dependency1`, and `Dependency2` use Spring +dependency injection annotations such as `@Autowired`. + +##### Building the Container Programmatically by Using `register(Class…​)` ##### + +You can instantiate an `AnnotationConfigApplicationContext` by using a no-arg constructor +and then configure it by using the `register()` method. This approach is particularly useful +when programmatically building an `AnnotationConfigApplicationContext`. The following +example shows how to do so: + +Java + +``` +public static void main(String[] args) { + AnnotationConfigApplicationContext ctx = new AnnotationConfigApplicationContext(); + ctx.register(AppConfig.class, OtherConfig.class); + ctx.register(AdditionalConfig.class); + ctx.refresh(); + MyService myService = ctx.getBean(MyService.class); + myService.doStuff(); +} +``` + +Kotlin + +``` +import org.springframework.beans.factory.getBean + +fun main() { + val ctx = AnnotationConfigApplicationContext() + ctx.register(AppConfig::class.java, OtherConfig::class.java) + ctx.register(AdditionalConfig::class.java) + ctx.refresh() + val myService = ctx.getBean() + myService.doStuff() +} +``` + +##### Enabling Component Scanning with `scan(String…​)` + +To enable component scanning, you can annotate your `@Configuration` class as follows: + +Java + +``` +@Configuration +@ComponentScan(basePackages = "com.acme") (1) +public class AppConfig { + // ... +} +``` + +|**1**|This annotation enables component scanning.| +|-----|-------------------------------------------| + +Kotlin + +``` +@Configuration +@ComponentScan(basePackages = ["com.acme"]) (1) +class AppConfig { + // ... +} +``` + +|**1**|This annotation enables component scanning.| +|-----|-------------------------------------------| + +| |Experienced Spring users may be familiar with the XML declaration equivalent from
Spring’s `context:` namespace, shown in the following example:

```



```| +|---|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +In the preceding example, the `com.acme` package is scanned to look for any`@Component`-annotated classes, and those classes are registered as Spring bean +definitions within the container. `AnnotationConfigApplicationContext` exposes the`scan(String…​)` method to allow for the same component-scanning functionality, as the +following example shows: + +Java + +``` +public static void main(String[] args) { + AnnotationConfigApplicationContext ctx = new AnnotationConfigApplicationContext(); + ctx.scan("com.acme"); + ctx.refresh(); + MyService myService = ctx.getBean(MyService.class); +} +``` + +Kotlin + +``` +fun main() { + val ctx = AnnotationConfigApplicationContext() + ctx.scan("com.acme") + ctx.refresh() + val myService = ctx.getBean() +} +``` + +| |Remember that `@Configuration` classes are [meta-annotated](#beans-meta-annotations)with `@Component`, so they are candidates for component-scanning. In the preceding example,
assuming that `AppConfig` is declared within the `com.acme` package (or any package
underneath), it is picked up during the call to `scan()`. Upon `refresh()`, all its `@Bean`methods are processed and registered as bean definitions within the container.| +|---|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +##### Support for Web Applications with `AnnotationConfigWebApplicationContext` ##### + +A `WebApplicationContext` variant of `AnnotationConfigApplicationContext` is available +with `AnnotationConfigWebApplicationContext`. You can use this implementation when +configuring the Spring `ContextLoaderListener` servlet listener, Spring MVC`DispatcherServlet`, and so forth. The following `web.xml` snippet configures a typical +Spring MVC web application (note the use of the `contextClass` context-param and +init-param): + +``` + + + + contextClass + + org.springframework.web.context.support.AnnotationConfigWebApplicationContext + + + + + + contextConfigLocation + com.acme.AppConfig + + + + + org.springframework.web.context.ContextLoaderListener + + + + + dispatcher + org.springframework.web.servlet.DispatcherServlet + + + contextClass + + org.springframework.web.context.support.AnnotationConfigWebApplicationContext + + + + + contextConfigLocation + com.acme.web.MvcConfig + + + + + + dispatcher + /app/* + + +``` + +| |For programmatic use cases, a `GenericWebApplicationContext` can be used as an
alternative to `AnnotationConfigWebApplicationContext`. See the[`GenericWebApplicationContext`](https://docs.spring.io/spring-framework/docs/5.3.16/javadoc-api/org/springframework/web/context/support/GenericWebApplicationContext.html)javadoc for details.| +|---|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +#### 1.12.3. Using the `@Bean` Annotation + +`@Bean` is a method-level annotation and a direct analog of the XML `` element. +The annotation supports some of the attributes offered by ``, such as: + +* [init-method](#beans-factory-lifecycle-initializingbean) + +* [destroy-method](#beans-factory-lifecycle-disposablebean) + +* [autowiring](#beans-factory-autowire) + +* `name`. + +You can use the `@Bean` annotation in a `@Configuration`-annotated or in a`@Component`-annotated class. + +##### Declaring a Bean + +To declare a bean, you can annotate a method with the `@Bean` annotation. You use this +method to register a bean definition within an `ApplicationContext` of the type +specified as the method’s return value. By default, the bean name is the same as +the method name. The following example shows a `@Bean` method declaration: + +Java + +``` +@Configuration +public class AppConfig { + + @Bean + public TransferServiceImpl transferService() { + return new TransferServiceImpl(); + } +} +``` + +Kotlin + +``` +@Configuration +class AppConfig { + + @Bean + fun transferService() = TransferServiceImpl() +} +``` + +The preceding configuration is exactly equivalent to the following Spring XML: + +``` + + + +``` + +Both declarations make a bean named `transferService` available in the`ApplicationContext`, bound to an object instance of type `TransferServiceImpl`, as the +following text image shows: + +``` +transferService -> com.acme.TransferServiceImpl +``` + +You can also use default methods to define beans. This allows composition of bean +configurations by implementing interfaces with bean definitions on default methods. + +Java + +``` +public interface BaseConfig { + + @Bean + default TransferServiceImpl transferService() { + return new TransferServiceImpl(); + } +} + +@Configuration +public class AppConfig implements BaseConfig { + +} +``` + +You can also declare your `@Bean` method with an interface (or base class) +return type, as the following example shows: + +Java + +``` +@Configuration +public class AppConfig { + + @Bean + public TransferService transferService() { + return new TransferServiceImpl(); + } +} +``` + +Kotlin + +``` +@Configuration +class AppConfig { + + @Bean + fun transferService(): TransferService { + return TransferServiceImpl() + } +} +``` + +However, this limits the visibility for advance type prediction to the specified +interface type (`TransferService`). Then, with the full type (`TransferServiceImpl`) +known to the container only once the affected singleton bean has been instantiated. +Non-lazy singleton beans get instantiated according to their declaration order, +so you may see different type matching results depending on when another component +tries to match by a non-declared type (such as `@Autowired TransferServiceImpl`, +which resolves only once the `transferService` bean has been instantiated). + +| |If you consistently refer to your types by a declared service interface, your`@Bean` return types may safely join that design decision. However, for components
that implement several interfaces or for components potentially referred to by their
implementation type, it is safer to declare the most specific return type possible
(at least as specific as required by the injection points that refer to your bean).| +|---|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +##### Bean Dependencies + +A `@Bean`-annotated method can have an arbitrary number of parameters that describe the +dependencies required to build that bean. For instance, if our `TransferService`requires an `AccountRepository`, we can materialize that dependency with a method +parameter, as the following example shows: + +Java + +``` +@Configuration +public class AppConfig { + + @Bean + public TransferService transferService(AccountRepository accountRepository) { + return new TransferServiceImpl(accountRepository); + } +} +``` + +Kotlin + +``` +@Configuration +class AppConfig { + + @Bean + fun transferService(accountRepository: AccountRepository): TransferService { + return TransferServiceImpl(accountRepository) + } +} +``` + +The resolution mechanism is pretty much identical to constructor-based dependency +injection. See [the relevant section](#beans-constructor-injection) for more details. + +##### Receiving Lifecycle Callbacks + +Any classes defined with the `@Bean` annotation support the regular lifecycle callbacks +and can use the `@PostConstruct` and `@PreDestroy` annotations from JSR-250. See[JSR-250 annotations](#beans-postconstruct-and-predestroy-annotations) for further +details. + +The regular Spring [lifecycle](#beans-factory-nature) callbacks are fully supported as +well. If a bean implements `InitializingBean`, `DisposableBean`, or `Lifecycle`, their +respective methods are called by the container. + +The standard set of `*Aware` interfaces (such as [BeanFactoryAware](#beans-beanfactory),[BeanNameAware](#beans-factory-aware),[MessageSourceAware](#context-functionality-messagesource),[ApplicationContextAware](#beans-factory-aware), and so on) are also fully supported. + +The `@Bean` annotation supports specifying arbitrary initialization and destruction +callback methods, much like Spring XML’s `init-method` and `destroy-method` attributes +on the `bean` element, as the following example shows: + +Java + +``` +public class BeanOne { + + public void init() { + // initialization logic + } +} + +public class BeanTwo { + + public void cleanup() { + // destruction logic + } +} + +@Configuration +public class AppConfig { + + @Bean(initMethod = "init") + public BeanOne beanOne() { + return new BeanOne(); + } + + @Bean(destroyMethod = "cleanup") + public BeanTwo beanTwo() { + return new BeanTwo(); + } +} +``` + +Kotlin + +``` +class BeanOne { + + fun init() { + // initialization logic + } +} + +class BeanTwo { + + fun cleanup() { + // destruction logic + } +} + +@Configuration +class AppConfig { + + @Bean(initMethod = "init") + fun beanOne() = BeanOne() + + @Bean(destroyMethod = "cleanup") + fun beanTwo() = BeanTwo() +} +``` + +| |By default, beans defined with Java configuration that have a public `close` or `shutdown`method are automatically enlisted with a destruction callback. If you have a public`close` or `shutdown` method and you do not wish for it to be called when the container
shuts down, you can add `@Bean(destroyMethod="")` to your bean definition to disable the
default `(inferred)` mode.

You may want to do that by default for a resource that you acquire with JNDI, as its
lifecycle is managed outside the application. In particular, make sure to always do it
for a `DataSource`, as it is known to be problematic on Java EE application servers.

The following example shows how to prevent an automatic destruction callback for a`DataSource`:

Java

```
@Bean(destroyMethod="")
public DataSource dataSource() throws NamingException {
return (DataSource) jndiTemplate.lookup("MyDS");
}
```

Kotlin

```
@Bean(destroyMethod = "")
fun dataSource(): DataSource {
return jndiTemplate.lookup("MyDS") as DataSource
}
```

Also, with `@Bean` methods, you typically use programmatic JNDI lookups, either by
using Spring’s `JndiTemplate` or `JndiLocatorDelegate` helpers or straight JNDI`InitialContext` usage but not the `JndiObjectFactoryBean` variant (which would force
you to declare the return type as the `FactoryBean` type instead of the actual target
type, making it harder to use for cross-reference calls in other `@Bean` methods that
intend to refer to the provided resource here).| +|---|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +In the case of `BeanOne` from the example above the preceding note, it would be equally valid to call the `init()`method directly during construction, as the following example shows: + +Java + +``` +@Configuration +public class AppConfig { + + @Bean + public BeanOne beanOne() { + BeanOne beanOne = new BeanOne(); + beanOne.init(); + return beanOne; + } + + // ... +} +``` + +Kotlin + +``` +@Configuration +class AppConfig { + + @Bean + fun beanOne() = BeanOne().apply { + init() + } + + // ... +} +``` + +| |When you work directly in Java, you can do anything you like with your objects and do
not always need to rely on the container lifecycle.| +|---|---------------------------------------------------------------------------------------------------------------------------------------------| + +##### Specifying Bean Scope + +Spring includes the `@Scope` annotation so that you can specify the scope of a bean. + +###### Using the `@Scope` Annotation + +You can specify that your beans defined with the `@Bean` annotation should have a +specific scope. You can use any of the standard scopes specified in the[Bean Scopes](#beans-factory-scopes) section. + +The default scope is `singleton`, but you can override this with the `@Scope` annotation, +as the following example shows: + +Java + +``` +@Configuration +public class MyConfiguration { + + @Bean + @Scope("prototype") + public Encryptor encryptor() { + // ... + } +} +``` + +Kotlin + +``` +@Configuration +class MyConfiguration { + + @Bean + @Scope("prototype") + fun encryptor(): Encryptor { + // ... + } +} +``` + +###### `@Scope` and `scoped-proxy` + +Spring offers a convenient way of working with scoped dependencies through[scoped proxies](#beans-factory-scopes-other-injection). The easiest way to create +such a proxy when using the XML configuration is the `` element. +Configuring your beans in Java with a `@Scope` annotation offers equivalent support +with the `proxyMode` attribute. The default is `ScopedProxyMode.DEFAULT`, which +typically indicates that no scoped proxy should be created unless a different default +has been configured at the component-scan instruction level. You can specify`ScopedProxyMode.TARGET_CLASS`, `ScopedProxyMode.INTERFACES` or `ScopedProxyMode.NO`. + +If you port the scoped proxy example from the XML reference documentation (see[scoped proxies](#beans-factory-scopes-other-injection)) to our `@Bean` using Java, +it resembles the following: + +Java + +``` +// an HTTP Session-scoped bean exposed as a proxy +@Bean +@SessionScope +public UserPreferences userPreferences() { + return new UserPreferences(); +} + +@Bean +public Service userService() { + UserService service = new SimpleUserService(); + // a reference to the proxied userPreferences bean + service.setUserPreferences(userPreferences()); + return service; +} +``` + +Kotlin + +``` +// an HTTP Session-scoped bean exposed as a proxy +@Bean +@SessionScope +fun userPreferences() = UserPreferences() + +@Bean +fun userService(): Service { + return SimpleUserService().apply { + // a reference to the proxied userPreferences bean + setUserPreferences(userPreferences()) + } +} +``` + +##### Customizing Bean Naming + +By default, configuration classes use a `@Bean` method’s name as the name of the +resulting bean. This functionality can be overridden, however, with the `name` attribute, +as the following example shows: + +Java + +``` +@Configuration +public class AppConfig { + + @Bean("myThing") + public Thing thing() { + return new Thing(); + } +} +``` + +Kotlin + +``` +@Configuration +class AppConfig { + + @Bean("myThing") + fun thing() = Thing() +} +``` + +##### Bean Aliasing + +As discussed in [Naming Beans](#beans-beanname), it is sometimes desirable to give a single bean +multiple names, otherwise known as bean aliasing. The `name` attribute of the `@Bean`annotation accepts a String array for this purpose. The following example shows how to set +a number of aliases for a bean: + +Java + +``` +@Configuration +public class AppConfig { + + @Bean({"dataSource", "subsystemA-dataSource", "subsystemB-dataSource"}) + public DataSource dataSource() { + // instantiate, configure and return DataSource bean... + } +} +``` + +Kotlin + +``` +@Configuration +class AppConfig { + + @Bean("dataSource", "subsystemA-dataSource", "subsystemB-dataSource") + fun dataSource(): DataSource { + // instantiate, configure and return DataSource bean... + } +} +``` + +##### Bean Description + +Sometimes, it is helpful to provide a more detailed textual description of a bean. This can +be particularly useful when beans are exposed (perhaps through JMX) for monitoring purposes. + +To add a description to a `@Bean`, you can use the[`@Description`](https://docs.spring.io/spring-framework/docs/5.3.16/javadoc-api/org/springframework/context/annotation/Description.html)annotation, as the following example shows: + +Java + +``` +@Configuration +public class AppConfig { + + @Bean + @Description("Provides a basic example of a bean") + public Thing thing() { + return new Thing(); + } +} +``` + +Kotlin + +``` +@Configuration +class AppConfig { + + @Bean + @Description("Provides a basic example of a bean") + fun thing() = Thing() +} +``` + +#### 1.12.4. Using the `@Configuration` annotation + +`@Configuration` is a class-level annotation indicating that an object is a source of +bean definitions. `@Configuration` classes declare beans through `@Bean`-annotated +methods. Calls to `@Bean` methods on `@Configuration` classes can also be used to define +inter-bean dependencies. See [Basic Concepts: `@Bean` and `@Configuration`](#beans-java-basic-concepts) for a general introduction. + +##### Injecting Inter-bean Dependencies + +When beans have dependencies on one another, expressing that dependency is as simple +as having one bean method call another, as the following example shows: + +Java + +``` +@Configuration +public class AppConfig { + + @Bean + public BeanOne beanOne() { + return new BeanOne(beanTwo()); + } + + @Bean + public BeanTwo beanTwo() { + return new BeanTwo(); + } +} +``` + +Kotlin + +``` +@Configuration +class AppConfig { + + @Bean + fun beanOne() = BeanOne(beanTwo()) + + @Bean + fun beanTwo() = BeanTwo() +} +``` + +In the preceding example, `beanOne` receives a reference to `beanTwo` through constructor +injection. + +| |This method of declaring inter-bean dependencies works only when the `@Bean` method
is declared within a `@Configuration` class. You cannot declare inter-bean dependencies
by using plain `@Component` classes.| +|---|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +##### Lookup Method Injection + +As noted earlier, [lookup method injection](#beans-factory-method-injection) is an +advanced feature that you should use rarely. It is useful in cases where a +singleton-scoped bean has a dependency on a prototype-scoped bean. Using Java for this +type of configuration provides a natural means for implementing this pattern. The +following example shows how to use lookup method injection: + +Java + +``` +public abstract class CommandManager { + public Object process(Object commandState) { + // grab a new instance of the appropriate Command interface + Command command = createCommand(); + // set the state on the (hopefully brand new) Command instance + command.setState(commandState); + return command.execute(); + } + + // okay... but where is the implementation of this method? + protected abstract Command createCommand(); +} +``` + +Kotlin + +``` +abstract class CommandManager { + fun process(commandState: Any): Any { + // grab a new instance of the appropriate Command interface + val command = createCommand() + // set the state on the (hopefully brand new) Command instance + command.setState(commandState) + return command.execute() + } + + // okay... but where is the implementation of this method? + protected abstract fun createCommand(): Command +} +``` + +By using Java configuration, you can create a subclass of `CommandManager` where +the abstract `createCommand()` method is overridden in such a way that it looks up a new +(prototype) command object. The following example shows how to do so: + +Java + +``` +@Bean +@Scope("prototype") +public AsyncCommand asyncCommand() { + AsyncCommand command = new AsyncCommand(); + // inject dependencies here as required + return command; +} + +@Bean +public CommandManager commandManager() { + // return new anonymous implementation of CommandManager with createCommand() + // overridden to return a new prototype Command object + return new CommandManager() { + protected Command createCommand() { + return asyncCommand(); + } + } +} +``` + +Kotlin + +``` +@Bean +@Scope("prototype") +fun asyncCommand(): AsyncCommand { + val command = AsyncCommand() + // inject dependencies here as required + return command +} + +@Bean +fun commandManager(): CommandManager { + // return new anonymous implementation of CommandManager with createCommand() + // overridden to return a new prototype Command object + return object : CommandManager() { + override fun createCommand(): Command { + return asyncCommand() + } + } +} +``` + +##### Further Information About How Java-based Configuration Works Internally ##### + +Consider the following example, which shows a `@Bean` annotated method being called twice: + +Java + +``` +@Configuration +public class AppConfig { + + @Bean + public ClientService clientService1() { + ClientServiceImpl clientService = new ClientServiceImpl(); + clientService.setClientDao(clientDao()); + return clientService; + } + + @Bean + public ClientService clientService2() { + ClientServiceImpl clientService = new ClientServiceImpl(); + clientService.setClientDao(clientDao()); + return clientService; + } + + @Bean + public ClientDao clientDao() { + return new ClientDaoImpl(); + } +} +``` + +Kotlin + +``` +@Configuration +class AppConfig { + + @Bean + fun clientService1(): ClientService { + return ClientServiceImpl().apply { + clientDao = clientDao() + } + } + + @Bean + fun clientService2(): ClientService { + return ClientServiceImpl().apply { + clientDao = clientDao() + } + } + + @Bean + fun clientDao(): ClientDao { + return ClientDaoImpl() + } +} +``` + +`clientDao()` has been called once in `clientService1()` and once in `clientService2()`. +Since this method creates a new instance of `ClientDaoImpl` and returns it, you would +normally expect to have two instances (one for each service). That definitely would be +problematic: In Spring, instantiated beans have a `singleton` scope by default. This is +where the magic comes in: All `@Configuration` classes are subclassed at startup-time +with `CGLIB`. In the subclass, the child method checks the container first for any +cached (scoped) beans before it calls the parent method and creates a new instance. + +| |The behavior could be different according to the scope of your bean. We are talking
about singletons here.| +|---|--------------------------------------------------------------------------------------------------------------| + +| |As of Spring 3.2, it is no longer necessary to add CGLIB to your classpath because CGLIB
classes have been repackaged under `org.springframework.cglib` and included directly
within the spring-core JAR.| +|---|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +| |There are a few restrictions due to the fact that CGLIB dynamically adds features at
startup-time. In particular, configuration classes must not be final. However, as
of 4.3, any constructors are allowed on configuration classes, including the use of`@Autowired` or a single non-default constructor declaration for default injection.

If you prefer to avoid any CGLIB-imposed limitations, consider declaring your `@Bean`methods on non-`@Configuration` classes (for example, on plain `@Component` classes instead).
Cross-method calls between `@Bean` methods are not then intercepted, so you have
to exclusively rely on dependency injection at the constructor or method level there.| +|---|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +#### 1.12.5. Composing Java-based Configurations + +Spring’s Java-based configuration feature lets you compose annotations, which can reduce +the complexity of your configuration. + +##### Using the `@Import` Annotation + +Much as the `` element is used within Spring XML files to aid in modularizing +configurations, the `@Import` annotation allows for loading `@Bean` definitions from +another configuration class, as the following example shows: + +Java + +``` +@Configuration +public class ConfigA { + + @Bean + public A a() { + return new A(); + } +} + +@Configuration +@Import(ConfigA.class) +public class ConfigB { + + @Bean + public B b() { + return new B(); + } +} +``` + +Kotlin + +``` +@Configuration +class ConfigA { + + @Bean + fun a() = A() +} + +@Configuration +@Import(ConfigA::class) +class ConfigB { + + @Bean + fun b() = B() +} +``` + +Now, rather than needing to specify both `ConfigA.class` and `ConfigB.class` when +instantiating the context, only `ConfigB` needs to be supplied explicitly, as the +following example shows: + +Java + +``` +public static void main(String[] args) { + ApplicationContext ctx = new AnnotationConfigApplicationContext(ConfigB.class); + + // now both beans A and B will be available... + A a = ctx.getBean(A.class); + B b = ctx.getBean(B.class); +} +``` + +Kotlin + +``` +import org.springframework.beans.factory.getBean + +fun main() { + val ctx = AnnotationConfigApplicationContext(ConfigB::class.java) + + // now both beans A and B will be available... + val a = ctx.getBean() + val b = ctx.getBean() +} +``` + +This approach simplifies container instantiation, as only one class needs to be dealt +with, rather than requiring you to remember a potentially large number of`@Configuration` classes during construction. + +| |As of Spring Framework 4.2, `@Import` also supports references to regular component
classes, analogous to the `AnnotationConfigApplicationContext.register` method.
This is particularly useful if you want to avoid component scanning, by using a few
configuration classes as entry points to explicitly define all your components.| +|---|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +###### Injecting Dependencies on Imported `@Bean` Definitions + +The preceding example works but is simplistic. In most practical scenarios, beans have +dependencies on one another across configuration classes. When using XML, this is not an +issue, because no compiler is involved, and you can declare`ref="someBean"` and trust Spring to work it out during container initialization. +When using `@Configuration` classes, the Java compiler places constraints on +the configuration model, in that references to other beans must be valid Java syntax. + +Fortunately, solving this problem is simple. As [we already discussed](#beans-java-dependencies), +a `@Bean` method can have an arbitrary number of parameters that describe the bean +dependencies. Consider the following more real-world scenario with several `@Configuration`classes, each depending on beans declared in the others: + +Java + +``` +@Configuration +public class ServiceConfig { + + @Bean + public TransferService transferService(AccountRepository accountRepository) { + return new TransferServiceImpl(accountRepository); + } +} + +@Configuration +public class RepositoryConfig { + + @Bean + public AccountRepository accountRepository(DataSource dataSource) { + return new JdbcAccountRepository(dataSource); + } +} + +@Configuration +@Import({ServiceConfig.class, RepositoryConfig.class}) +public class SystemTestConfig { + + @Bean + public DataSource dataSource() { + // return new DataSource + } +} + +public static void main(String[] args) { + ApplicationContext ctx = new AnnotationConfigApplicationContext(SystemTestConfig.class); + // everything wires up across configuration classes... + TransferService transferService = ctx.getBean(TransferService.class); + transferService.transfer(100.00, "A123", "C456"); +} +``` + +Kotlin + +``` +import org.springframework.beans.factory.getBean + +@Configuration +class ServiceConfig { + + @Bean + fun transferService(accountRepository: AccountRepository): TransferService { + return TransferServiceImpl(accountRepository) + } +} + +@Configuration +class RepositoryConfig { + + @Bean + fun accountRepository(dataSource: DataSource): AccountRepository { + return JdbcAccountRepository(dataSource) + } +} + +@Configuration +@Import(ServiceConfig::class, RepositoryConfig::class) +class SystemTestConfig { + + @Bean + fun dataSource(): DataSource { + // return new DataSource + } +} + +fun main() { + val ctx = AnnotationConfigApplicationContext(SystemTestConfig::class.java) + // everything wires up across configuration classes... + val transferService = ctx.getBean() + transferService.transfer(100.00, "A123", "C456") +} +``` + +There is another way to achieve the same result. Remember that `@Configuration` classes are +ultimately only another bean in the container: This means that they can take advantage of`@Autowired` and `@Value` injection and other features the same as any other bean. + +| |Make sure that the dependencies you inject that way are of the simplest kind only. `@Configuration`classes are processed quite early during the initialization of the context, and forcing a dependency
to be injected this way may lead to unexpected early initialization. Whenever possible, resort to
parameter-based injection, as in the preceding example.

Also, be particularly careful with `BeanPostProcessor` and `BeanFactoryPostProcessor` definitions
through `@Bean`. Those should usually be declared as `static @Bean` methods, not triggering the
instantiation of their containing configuration class. Otherwise, `@Autowired` and `@Value` may not
work on the configuration class itself, since it is possible to create it as a bean instance earlier than[`AutowiredAnnotationBeanPostProcessor`](https://docs.spring.io/spring-framework/docs/5.3.16/javadoc-api/org/springframework/beans/factory/annotation/AutowiredAnnotationBeanPostProcessor.html).| +|---|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +The following example shows how one bean can be autowired to another bean: + +Java + +``` +@Configuration +public class ServiceConfig { + + @Autowired + private AccountRepository accountRepository; + + @Bean + public TransferService transferService() { + return new TransferServiceImpl(accountRepository); + } +} + +@Configuration +public class RepositoryConfig { + + private final DataSource dataSource; + + public RepositoryConfig(DataSource dataSource) { + this.dataSource = dataSource; + } + + @Bean + public AccountRepository accountRepository() { + return new JdbcAccountRepository(dataSource); + } +} + +@Configuration +@Import({ServiceConfig.class, RepositoryConfig.class}) +public class SystemTestConfig { + + @Bean + public DataSource dataSource() { + // return new DataSource + } +} + +public static void main(String[] args) { + ApplicationContext ctx = new AnnotationConfigApplicationContext(SystemTestConfig.class); + // everything wires up across configuration classes... + TransferService transferService = ctx.getBean(TransferService.class); + transferService.transfer(100.00, "A123", "C456"); +} +``` + +Kotlin + +``` +import org.springframework.beans.factory.getBean + +@Configuration +class ServiceConfig { + + @Autowired + lateinit var accountRepository: AccountRepository + + @Bean + fun transferService(): TransferService { + return TransferServiceImpl(accountRepository) + } +} + +@Configuration +class RepositoryConfig(private val dataSource: DataSource) { + + @Bean + fun accountRepository(): AccountRepository { + return JdbcAccountRepository(dataSource) + } +} + +@Configuration +@Import(ServiceConfig::class, RepositoryConfig::class) +class SystemTestConfig { + + @Bean + fun dataSource(): DataSource { + // return new DataSource + } +} + +fun main() { + val ctx = AnnotationConfigApplicationContext(SystemTestConfig::class.java) + // everything wires up across configuration classes... + val transferService = ctx.getBean() + transferService.transfer(100.00, "A123", "C456") +} +``` + +| |Constructor injection in `@Configuration` classes is only supported as of Spring
Framework 4.3. Note also that there is no need to specify `@Autowired` if the target
bean defines only one constructor.| +|---|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +[]()Fully-qualifying imported beans for ease of navigation + +In the preceding scenario, using `@Autowired` works well and provides the desired +modularity, but determining exactly where the autowired bean definitions are declared is +still somewhat ambiguous. For example, as a developer looking at `ServiceConfig`, how do +you know exactly where the `@Autowired AccountRepository` bean is declared? It is not +explicit in the code, and this may be just fine. Remember that the[Spring Tools for Eclipse](https://spring.io/tools) provides tooling that +can render graphs showing how everything is wired, which may be all you need. Also, +your Java IDE can easily find all declarations and uses of the `AccountRepository` type +and quickly show you the location of `@Bean` methods that return that type. + +In cases where this ambiguity is not acceptable and you wish to have direct navigation +from within your IDE from one `@Configuration` class to another, consider autowiring the +configuration classes themselves. The following example shows how to do so: + +Java + +``` +@Configuration +public class ServiceConfig { + + @Autowired + private RepositoryConfig repositoryConfig; + + @Bean + public TransferService transferService() { + // navigate 'through' the config class to the @Bean method! + return new TransferServiceImpl(repositoryConfig.accountRepository()); + } +} +``` + +Kotlin + +``` +@Configuration +class ServiceConfig { + + @Autowired + private lateinit var repositoryConfig: RepositoryConfig + + @Bean + fun transferService(): TransferService { + // navigate 'through' the config class to the @Bean method! + return TransferServiceImpl(repositoryConfig.accountRepository()) + } +} +``` + +In the preceding situation, where `AccountRepository` is defined is completely explicit. +However, `ServiceConfig` is now tightly coupled to `RepositoryConfig`. That is the +tradeoff. This tight coupling can be somewhat mitigated by using interface-based or +abstract class-based `@Configuration` classes. Consider the following example: + +Java + +``` +@Configuration +public class ServiceConfig { + + @Autowired + private RepositoryConfig repositoryConfig; + + @Bean + public TransferService transferService() { + return new TransferServiceImpl(repositoryConfig.accountRepository()); + } +} + +@Configuration +public interface RepositoryConfig { + + @Bean + AccountRepository accountRepository(); +} + +@Configuration +public class DefaultRepositoryConfig implements RepositoryConfig { + + @Bean + public AccountRepository accountRepository() { + return new JdbcAccountRepository(...); + } +} + +@Configuration +@Import({ServiceConfig.class, DefaultRepositoryConfig.class}) // import the concrete config! +public class SystemTestConfig { + + @Bean + public DataSource dataSource() { + // return DataSource + } + +} + +public static void main(String[] args) { + ApplicationContext ctx = new AnnotationConfigApplicationContext(SystemTestConfig.class); + TransferService transferService = ctx.getBean(TransferService.class); + transferService.transfer(100.00, "A123", "C456"); +} +``` + +Kotlin + +``` +import org.springframework.beans.factory.getBean + +@Configuration +class ServiceConfig { + + @Autowired + private lateinit var repositoryConfig: RepositoryConfig + + @Bean + fun transferService(): TransferService { + return TransferServiceImpl(repositoryConfig.accountRepository()) + } +} + +@Configuration +interface RepositoryConfig { + + @Bean + fun accountRepository(): AccountRepository +} + +@Configuration +class DefaultRepositoryConfig : RepositoryConfig { + + @Bean + fun accountRepository(): AccountRepository { + return JdbcAccountRepository(...) + } +} + +@Configuration +@Import(ServiceConfig::class, DefaultRepositoryConfig::class) // import the concrete config! +class SystemTestConfig { + + @Bean + fun dataSource(): DataSource { + // return DataSource + } + +} + +fun main() { + val ctx = AnnotationConfigApplicationContext(SystemTestConfig::class.java) + val transferService = ctx.getBean() + transferService.transfer(100.00, "A123", "C456") +} +``` + +Now `ServiceConfig` is loosely coupled with respect to the concrete`DefaultRepositoryConfig`, and built-in IDE tooling is still useful: You can easily +get a type hierarchy of `RepositoryConfig` implementations. In this +way, navigating `@Configuration` classes and their dependencies becomes no different +than the usual process of navigating interface-based code. + +| |If you want to influence the startup creation order of certain beans, consider
declaring some of them as `@Lazy` (for creation on first access instead of on startup)
or as `@DependsOn` certain other beans (making sure that specific other beans are
created before the current bean, beyond what the latter’s direct dependencies imply).| +|---|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +##### Conditionally Include `@Configuration` Classes or `@Bean` Methods + +It is often useful to conditionally enable or disable a complete `@Configuration` class +or even individual `@Bean` methods, based on some arbitrary system state. One common +example of this is to use the `@Profile` annotation to activate beans only when a specific +profile has been enabled in the Spring `Environment` (see [Bean Definition Profiles](#beans-definition-profiles)for details). + +The `@Profile` annotation is actually implemented by using a much more flexible annotation +called [`@Conditional`](https://docs.spring.io/spring-framework/docs/5.3.16/javadoc-api/org/springframework/context/annotation/Conditional.html). +The `@Conditional` annotation indicates specific`org.springframework.context.annotation.Condition` implementations that should be +consulted before a `@Bean` is registered. + +Implementations of the `Condition` interface provide a `matches(…​)`method that returns `true` or `false`. For example, the following listing shows the actual`Condition` implementation used for `@Profile`: + +Java + +``` +@Override +public boolean matches(ConditionContext context, AnnotatedTypeMetadata metadata) { + // Read the @Profile annotation attributes + MultiValueMap attrs = metadata.getAllAnnotationAttributes(Profile.class.getName()); + if (attrs != null) { + for (Object value : attrs.get("value")) { + if (context.getEnvironment().acceptsProfiles(((String[]) value))) { + return true; + } + } + return false; + } + return true; +} +``` + +Kotlin + +``` +override fun matches(context: ConditionContext, metadata: AnnotatedTypeMetadata): Boolean { + // Read the @Profile annotation attributes + val attrs = metadata.getAllAnnotationAttributes(Profile::class.java.name) + if (attrs != null) { + for (value in attrs["value"]!!) { + if (context.environment.acceptsProfiles(Profiles.of(*value as Array))) { + return true + } + } + return false + } + return true +} +``` + +See the [`@Conditional`](https://docs.spring.io/spring-framework/docs/5.3.16/javadoc-api/org/springframework/context/annotation/Conditional.html)javadoc for more detail. + +##### Combining Java and XML Configuration + +Spring’s `@Configuration` class support does not aim to be a 100% complete replacement +for Spring XML. Some facilities, such as Spring XML namespaces, remain an ideal way to +configure the container. In cases where XML is convenient or necessary, you have a +choice: either instantiate the container in an “XML-centric” way by using, for example,`ClassPathXmlApplicationContext`, or instantiate it in a “Java-centric” way by using`AnnotationConfigApplicationContext` and the `@ImportResource` annotation to import XML +as needed. + +###### XML-centric Use of `@Configuration` Classes + +It may be preferable to bootstrap the Spring container from XML and include`@Configuration` classes in an ad-hoc fashion. For example, in a large existing codebase +that uses Spring XML, it is easier to create `@Configuration` classes on an +as-needed basis and include them from the existing XML files. Later in this section, we cover the +options for using `@Configuration` classes in this kind of “XML-centric” situation. + +[]()Declaring `@Configuration` classes as plain Spring `` elements + +Remember that `@Configuration` classes are ultimately bean definitions in the +container. In this series examples, we create a `@Configuration` class named `AppConfig` and +include it within `system-test-config.xml` as a `` definition. Because`` is switched on, the container recognizes the`@Configuration` annotation and processes the `@Bean` methods declared in `AppConfig`properly. + +The following example shows an ordinary configuration class in Java: + +Java + +``` +@Configuration +public class AppConfig { + + @Autowired + private DataSource dataSource; + + @Bean + public AccountRepository accountRepository() { + return new JdbcAccountRepository(dataSource); + } + + @Bean + public TransferService transferService() { + return new TransferService(accountRepository()); + } +} +``` + +Kotlin + +``` +@Configuration +class AppConfig { + + @Autowired + private lateinit var dataSource: DataSource + + @Bean + fun accountRepository(): AccountRepository { + return JdbcAccountRepository(dataSource) + } + + @Bean + fun transferService() = TransferService(accountRepository()) +} +``` + +The following example shows part of a sample `system-test-config.xml` file: + +``` + + + + + + + + + + + + + +``` + +The following example shows a possible `jdbc.properties` file: + +``` +jdbc.url=jdbc:hsqldb:hsql://localhost/xdb +jdbc.username=sa +jdbc.password= +``` + +Java + +``` +public static void main(String[] args) { + ApplicationContext ctx = new ClassPathXmlApplicationContext("classpath:/com/acme/system-test-config.xml"); + TransferService transferService = ctx.getBean(TransferService.class); + // ... +} +``` + +Kotlin + +``` +fun main() { + val ctx = ClassPathXmlApplicationContext("classpath:/com/acme/system-test-config.xml") + val transferService = ctx.getBean() + // ... +} +``` + +| |In `system-test-config.xml` file, the `AppConfig` `` does not declare an `id`element. While it would be acceptable to do so, it is unnecessary, given that no other bean
ever refers to it, and it is unlikely to be explicitly fetched from the container by name.
Similarly, the `DataSource` bean is only ever autowired by type, so an explicit bean `id`is not strictly required.| +|---|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +[]() Using \ to pick up `@Configuration` classes + +Because `@Configuration` is meta-annotated with `@Component`, `@Configuration`-annotated +classes are automatically candidates for component scanning. Using the same scenario as +describe in the previous example, we can redefine `system-test-config.xml` to take advantage of component-scanning. +Note that, in this case, we need not explicitly declare``, because `` enables the same +functionality. + +The following example shows the modified `system-test-config.xml` file: + +``` + + + + + + + + + + + +``` + +###### `@Configuration` Class-centric Use of XML with `@ImportResource` ###### + +In applications where `@Configuration` classes are the primary mechanism for configuring +the container, it is still likely necessary to use at least some XML. In these +scenarios, you can use `@ImportResource` and define only as much XML as you need. Doing +so achieves a “Java-centric” approach to configuring the container and keeps XML to a +bare minimum. The following example (which includes a configuration class, an XML file +that defines a bean, a properties file, and the `main` class) shows how to use +the `@ImportResource` annotation to achieve “Java-centric” configuration that uses XML +as needed: + +Java + +``` +@Configuration +@ImportResource("classpath:/com/acme/properties-config.xml") +public class AppConfig { + + @Value("${jdbc.url}") + private String url; + + @Value("${jdbc.username}") + private String username; + + @Value("${jdbc.password}") + private String password; + + @Bean + public DataSource dataSource() { + return new DriverManagerDataSource(url, username, password); + } +} +``` + +Kotlin + +``` +@Configuration +@ImportResource("classpath:/com/acme/properties-config.xml") +class AppConfig { + + @Value("\${jdbc.url}") + private lateinit var url: String + + @Value("\${jdbc.username}") + private lateinit var username: String + + @Value("\${jdbc.password}") + private lateinit var password: String + + @Bean + fun dataSource(): DataSource { + return DriverManagerDataSource(url, username, password) + } +} +``` + +``` +properties-config.xml + + + +``` + +``` +jdbc.properties +jdbc.url=jdbc:hsqldb:hsql://localhost/xdb +jdbc.username=sa +jdbc.password= +``` + +Java + +``` +public static void main(String[] args) { + ApplicationContext ctx = new AnnotationConfigApplicationContext(AppConfig.class); + TransferService transferService = ctx.getBean(TransferService.class); + // ... +} +``` + +Kotlin + +``` +import org.springframework.beans.factory.getBean + +fun main() { + val ctx = AnnotationConfigApplicationContext(AppConfig::class.java) + val transferService = ctx.getBean() + // ... +} +``` + +### 1.13. Environment Abstraction + +The [`Environment`](https://docs.spring.io/spring-framework/docs/5.3.16/javadoc-api/org/springframework/core/env/Environment.html) interface +is an abstraction integrated in the container that models two key +aspects of the application environment: [profiles](#beans-definition-profiles)and [properties](#beans-property-source-abstraction). + +A profile is a named, logical group of bean definitions to be registered with the +container only if the given profile is active. Beans may be assigned to a profile +whether defined in XML or with annotations. The role of the `Environment` object with +relation to profiles is in determining which profiles (if any) are currently active, +and which profiles (if any) should be active by default. + +Properties play an important role in almost all applications and may originate from +a variety of sources: properties files, JVM system properties, system environment +variables, JNDI, servlet context parameters, ad-hoc `Properties` objects, `Map` objects, and so +on. The role of the `Environment` object with relation to properties is to provide the +user with a convenient service interface for configuring property sources and resolving +properties from them. + +#### 1.13.1. Bean Definition Profiles + +Bean definition profiles provide a mechanism in the core container that allows for +registration of different beans in different environments. The word, “environment,” +can mean different things to different users, and this feature can help with many +use cases, including: + +* Working against an in-memory datasource in development versus looking up that same + datasource from JNDI when in QA or production. + +* Registering monitoring infrastructure only when deploying an application into a + performance environment. + +* Registering customized implementations of beans for customer A versus customer + B deployments. + +Consider the first use case in a practical application that requires a`DataSource`. In a test environment, the configuration might resemble the following: + +Java + +``` +@Bean +public DataSource dataSource() { + return new EmbeddedDatabaseBuilder() + .setType(EmbeddedDatabaseType.HSQL) + .addScript("my-schema.sql") + .addScript("my-test-data.sql") + .build(); +} +``` + +Kotlin + +``` +@Bean +fun dataSource(): DataSource { + return EmbeddedDatabaseBuilder() + .setType(EmbeddedDatabaseType.HSQL) + .addScript("my-schema.sql") + .addScript("my-test-data.sql") + .build() +} +``` + +Now consider how this application can be deployed into a QA or production +environment, assuming that the datasource for the application is registered +with the production application server’s JNDI directory. Our `dataSource` bean +now looks like the following listing: + +Java + +``` +@Bean(destroyMethod="") +public DataSource dataSource() throws Exception { + Context ctx = new InitialContext(); + return (DataSource) ctx.lookup("java:comp/env/jdbc/datasource"); +} +``` + +Kotlin + +``` +@Bean(destroyMethod = "") +fun dataSource(): DataSource { + val ctx = InitialContext() + return ctx.lookup("java:comp/env/jdbc/datasource") as DataSource +} +``` + +The problem is how to switch between using these two variations based on the +current environment. Over time, Spring users have devised a number of ways to +get this done, usually relying on a combination of system environment variables +and XML `` statements containing `${placeholder}` tokens that resolve +to the correct configuration file path depending on the value of an environment +variable. Bean definition profiles is a core container feature that provides a +solution to this problem. + +If we generalize the use case shown in the preceding example of environment-specific bean +definitions, we end up with the need to register certain bean definitions in +certain contexts but not in others. You could say that you want to register a +certain profile of bean definitions in situation A and a different profile in +situation B. We start by updating our configuration to reflect this need. + +##### Using `@Profile` + +The [`@Profile`](https://docs.spring.io/spring-framework/docs/5.3.16/javadoc-api/org/springframework/context/annotation/Profile.html)annotation lets you indicate that a component is eligible for registration +when one or more specified profiles are active. Using our preceding example, we +can rewrite the `dataSource` configuration as follows: + +Java + +``` +@Configuration +@Profile("development") +public class StandaloneDataConfig { + + @Bean + public DataSource dataSource() { + return new EmbeddedDatabaseBuilder() + .setType(EmbeddedDatabaseType.HSQL) + .addScript("classpath:com/bank/config/sql/schema.sql") + .addScript("classpath:com/bank/config/sql/test-data.sql") + .build(); + } +} +``` + +Kotlin + +``` +@Configuration +@Profile("development") +class StandaloneDataConfig { + + @Bean + fun dataSource(): DataSource { + return EmbeddedDatabaseBuilder() + .setType(EmbeddedDatabaseType.HSQL) + .addScript("classpath:com/bank/config/sql/schema.sql") + .addScript("classpath:com/bank/config/sql/test-data.sql") + .build() + } +} +``` + +Java + +``` +@Configuration +@Profile("production") +public class JndiDataConfig { + + @Bean(destroyMethod="") + public DataSource dataSource() throws Exception { + Context ctx = new InitialContext(); + return (DataSource) ctx.lookup("java:comp/env/jdbc/datasource"); + } +} +``` + +Kotlin + +``` +@Configuration +@Profile("production") +class JndiDataConfig { + + @Bean(destroyMethod = "") + fun dataSource(): DataSource { + val ctx = InitialContext() + return ctx.lookup("java:comp/env/jdbc/datasource") as DataSource + } +} +``` + +| |As mentioned earlier, with `@Bean` methods, you typically choose to use programmatic
JNDI lookups, by using either Spring’s `JndiTemplate`/`JndiLocatorDelegate` helpers or the
straight JNDI `InitialContext` usage shown earlier but not the `JndiObjectFactoryBean`variant, which would force you to declare the return type as the `FactoryBean` type.| +|---|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +The profile string may contain a simple profile name (for example, `production`) or a +profile expression. A profile expression allows for more complicated profile logic to be +expressed (for example, `production & us-east`). The following operators are supported in +profile expressions: + +* `!`: A logical “not” of the profile + +* `&`: A logical “and” of the profiles + +* `|`: A logical “or” of the profiles + +| |You cannot mix the `&` and `|` operators without using parentheses. For example,`production & us-east | eu-central` is not a valid expression. It must be expressed as`production & (us-east | eu-central)`.| +|---|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +You can use `@Profile` as a [meta-annotation](#beans-meta-annotations) for the purpose +of creating a custom composed annotation. The following example defines a custom`@Production` annotation that you can use as a drop-in replacement for`@Profile("production")`: + +Java + +``` +@Target(ElementType.TYPE) +@Retention(RetentionPolicy.RUNTIME) +@Profile("production") +public @interface Production { +} +``` + +Kotlin + +``` +@Target(AnnotationTarget.TYPE) +@Retention(AnnotationRetention.RUNTIME) +@Profile("production") +annotation class Production +``` + +| |If a `@Configuration` class is marked with `@Profile`, all of the `@Bean` methods and`@Import` annotations associated with that class are bypassed unless one or more of
the specified profiles are active. If a `@Component` or `@Configuration` class is marked
with `@Profile({"p1", "p2"})`, that class is not registered or processed unless
profiles 'p1' or 'p2' have been activated. If a given profile is prefixed with the
NOT operator (`!`), the annotated element is registered only if the profile is not
active. For example, given `@Profile({"p1", "!p2"})`, registration will occur if profile
'p1' is active or if profile 'p2' is not active.| +|---|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +`@Profile` can also be declared at the method level to include only one particular bean +of a configuration class (for example, for alternative variants of a particular bean), as +the following example shows: + +Java + +``` +@Configuration +public class AppConfig { + + @Bean("dataSource") + @Profile("development") (1) + public DataSource standaloneDataSource() { + return new EmbeddedDatabaseBuilder() + .setType(EmbeddedDatabaseType.HSQL) + .addScript("classpath:com/bank/config/sql/schema.sql") + .addScript("classpath:com/bank/config/sql/test-data.sql") + .build(); + } + + @Bean("dataSource") + @Profile("production") (2) + public DataSource jndiDataSource() throws Exception { + Context ctx = new InitialContext(); + return (DataSource) ctx.lookup("java:comp/env/jdbc/datasource"); + } +} +``` + +|**1**|The `standaloneDataSource` method is available only in the `development` profile.| +|-----|---------------------------------------------------------------------------------| +|**2**| The `jndiDataSource` method is available only in the `production` profile. | + +Kotlin + +``` +@Configuration +class AppConfig { + + @Bean("dataSource") + @Profile("development") (1) + fun standaloneDataSource(): DataSource { + return EmbeddedDatabaseBuilder() + .setType(EmbeddedDatabaseType.HSQL) + .addScript("classpath:com/bank/config/sql/schema.sql") + .addScript("classpath:com/bank/config/sql/test-data.sql") + .build() + } + + @Bean("dataSource") + @Profile("production") (2) + fun jndiDataSource() = + InitialContext().lookup("java:comp/env/jdbc/datasource") as DataSource +} +``` + +|**1**|The `standaloneDataSource` method is available only in the `development` profile.| +|-----|---------------------------------------------------------------------------------| +|**2**| The `jndiDataSource` method is available only in the `production` profile. | + +| |With `@Profile` on `@Bean` methods, a special scenario may apply: In the case of
overloaded `@Bean` methods of the same Java method name (analogous to constructor
overloading), a `@Profile` condition needs to be consistently declared on all
overloaded methods. If the conditions are inconsistent, only the condition on the
first declaration among the overloaded methods matters. Therefore, `@Profile` can
not be used to select an overloaded method with a particular argument signature over
another. Resolution between all factory methods for the same bean follows Spring’s
constructor resolution algorithm at creation time.

If you want to define alternative beans with different profile conditions,
use distinct Java method names that point to the same bean name by using the `@Bean` name
attribute, as shown in the preceding example. If the argument signatures are all
the same (for example, all of the variants have no-arg factory methods), this is the only
way to represent such an arrangement in a valid Java class in the first place
(since there can only be one method of a particular name and argument signature).| +|---|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +##### XML Bean Definition Profiles + +The XML counterpart is the `profile` attribute of the `` element. Our preceding sample +configuration can be rewritten in two XML files, as follows: + +``` + + + + + + + +``` + +``` + + + + +``` + +It is also possible to avoid that split and nest `` elements within the same file, +as the following example shows: + +``` + + + + + + + + + + + + + + + +``` + +The `spring-bean.xsd` has been constrained to allow such elements only as the +last ones in the file. This should help provide flexibility without incurring +clutter in the XML files. + +| |The XML counterpart does not support the profile expressions described earlier. It is possible,
however, to negate a profile by using the `!` operator. It is also possible to apply a logical
“and” by nesting the profiles, as the following example shows:

```
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xmlns:jdbc="http://www.springframework.org/schema/jdbc"
xmlns:jee="http://www.springframework.org/schema/jee"
xsi:schemaLocation="...">









```

In the preceding example, the `dataSource` bean is exposed if both the `production` and`us-east` profiles are active.| +|---|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +##### Activating a Profile + +Now that we have updated our configuration, we still need to instruct Spring which +profile is active. If we started our sample application right now, we would see +a `NoSuchBeanDefinitionException` thrown, because the container could not find +the Spring bean named `dataSource`. + +Activating a profile can be done in several ways, but the most straightforward is to do +it programmatically against the `Environment` API which is available through an`ApplicationContext`. The following example shows how to do so: + +Java + +``` +AnnotationConfigApplicationContext ctx = new AnnotationConfigApplicationContext(); +ctx.getEnvironment().setActiveProfiles("development"); +ctx.register(SomeConfig.class, StandaloneDataConfig.class, JndiDataConfig.class); +ctx.refresh(); +``` + +Kotlin + +``` +val ctx = AnnotationConfigApplicationContext().apply { + environment.setActiveProfiles("development") + register(SomeConfig::class.java, StandaloneDataConfig::class.java, JndiDataConfig::class.java) + refresh() +} +``` + +In addition, you can also declaratively activate profiles through the`spring.profiles.active` property, which may be specified through system environment +variables, JVM system properties, servlet context parameters in `web.xml`, or even as an +entry in JNDI (see [`PropertySource` Abstraction](#beans-property-source-abstraction)). In integration tests, active +profiles can be declared by using the `@ActiveProfiles` annotation in the `spring-test`module (see [context configuration with environment profiles](testing.html#testcontext-ctx-management-env-profiles)). + +Note that profiles are not an “either-or” proposition. You can activate multiple +profiles at once. Programmatically, you can provide multiple profile names to the`setActiveProfiles()` method, which accepts `String…​` varargs. The following example +activates multiple profiles: + +Java + +``` +ctx.getEnvironment().setActiveProfiles("profile1", "profile2"); +``` + +Kotlin + +``` +ctx.getEnvironment().setActiveProfiles("profile1", "profile2") +``` + +Declaratively, `spring.profiles.active` may accept a comma-separated list of profile names, +as the following example shows: + +``` + -Dspring.profiles.active="profile1,profile2" +``` + +##### Default Profile + +The default profile represents the profile that is enabled by default. Consider the +following example: + +Java + +``` +@Configuration +@Profile("default") +public class DefaultDataConfig { + + @Bean + public DataSource dataSource() { + return new EmbeddedDatabaseBuilder() + .setType(EmbeddedDatabaseType.HSQL) + .addScript("classpath:com/bank/config/sql/schema.sql") + .build(); + } +} +``` + +Kotlin + +``` +@Configuration +@Profile("default") +class DefaultDataConfig { + + @Bean + fun dataSource(): DataSource { + return EmbeddedDatabaseBuilder() + .setType(EmbeddedDatabaseType.HSQL) + .addScript("classpath:com/bank/config/sql/schema.sql") + .build() + } +} +``` + +If no profile is active, the `dataSource` is created. You can see this +as a way to provide a default definition for one or more beans. If any +profile is enabled, the default profile does not apply. + +You can change the name of the default profile by using `setDefaultProfiles()` on +the `Environment` or, declaratively, by using the `spring.profiles.default` property. + +#### 1.13.2. `PropertySource` Abstraction + +Spring’s `Environment` abstraction provides search operations over a configurable +hierarchy of property sources. Consider the following listing: + +Java + +``` +ApplicationContext ctx = new GenericApplicationContext(); +Environment env = ctx.getEnvironment(); +boolean containsMyProperty = env.containsProperty("my-property"); +System.out.println("Does my environment contain the 'my-property' property? " + containsMyProperty); +``` + +Kotlin + +``` +val ctx = GenericApplicationContext() +val env = ctx.environment +val containsMyProperty = env.containsProperty("my-property") +println("Does my environment contain the 'my-property' property? $containsMyProperty") +``` + +In the preceding snippet, we see a high-level way of asking Spring whether the `my-property` property is +defined for the current environment. To answer this question, the `Environment` object performs +a search over a set of [`PropertySource`](https://docs.spring.io/spring-framework/docs/5.3.16/javadoc-api/org/springframework/core/env/PropertySource.html)objects. A `PropertySource` is a simple abstraction over any source of key-value pairs, and +Spring’s [`StandardEnvironment`](https://docs.spring.io/spring-framework/docs/5.3.16/javadoc-api/org/springframework/core/env/StandardEnvironment.html)is configured with two PropertySource objects — one representing the set of JVM system properties +(`System.getProperties()`) and one representing the set of system environment variables +(`System.getenv()`). + +| |These default property sources are present for `StandardEnvironment`, for use in standalone
applications. [`StandardServletEnvironment`](https://docs.spring.io/spring-framework/docs/5.3.16/javadoc-api/org/springframework/web/context/support/StandardServletEnvironment.html)is populated with additional default property sources including servlet config and servlet
context parameters. It can optionally enable a [`JndiPropertySource`](https://docs.spring.io/spring-framework/docs/5.3.16/javadoc-api/org/springframework/jndi/JndiPropertySource.html).
See the javadoc for details.| +|---|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +Concretely, when you use the `StandardEnvironment`, the call to `env.containsProperty("my-property")`returns true if a `my-property` system property or `my-property` environment variable is present at +runtime. + +| |The search performed is hierarchical. By default, system properties have precedence over
environment variables. So, if the `my-property` property happens to be set in both places during
a call to `env.getProperty("my-property")`, the system property value “wins” and is returned.
Note that property values are not merged
but rather completely overridden by a preceding entry.

For a common `StandardServletEnvironment`, the full hierarchy is as follows, with the
highest-precedence entries at the top:

1. ServletConfig parameters (if applicable — for example, in case of a `DispatcherServlet` context)

2. ServletContext parameters (web.xml context-param entries)

3. JNDI environment variables (`java:comp/env/` entries)

4. JVM system properties (`-D` command-line arguments)

5. JVM system environment (operating system environment variables)| +|---|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +Most importantly, the entire mechanism is configurable. Perhaps you have a custom source +of properties that you want to integrate into this search. To do so, implement +and instantiate your own `PropertySource` and add it to the set of `PropertySources` for the +current `Environment`. The following example shows how to do so: + +Java + +``` +ConfigurableApplicationContext ctx = new GenericApplicationContext(); +MutablePropertySources sources = ctx.getEnvironment().getPropertySources(); +sources.addFirst(new MyPropertySource()); +``` + +Kotlin + +``` +val ctx = GenericApplicationContext() +val sources = ctx.environment.propertySources +sources.addFirst(MyPropertySource()) +``` + +In the preceding code, `MyPropertySource` has been added with highest precedence in the +search. If it contains a `my-property` property, the property is detected and returned, in favor of +any `my-property` property in any other `PropertySource`. The[`MutablePropertySources`](https://docs.spring.io/spring-framework/docs/5.3.16/javadoc-api/org/springframework/core/env/MutablePropertySources.html)API exposes a number of methods that allow for precise manipulation of the set of +property sources. + +#### 1.13.3. Using `@PropertySource` + +The [`@PropertySource`](https://docs.spring.io/spring-framework/docs/5.3.16/javadoc-api/org/springframework/context/annotation/PropertySource.html)annotation provides a convenient and declarative mechanism for adding a `PropertySource`to Spring’s `Environment`. + +Given a file called `app.properties` that contains the key-value pair `testbean.name=myTestBean`, +the following `@Configuration` class uses `@PropertySource` in such a way that +a call to `testBean.getName()` returns `myTestBean`: + +Java + +``` +@Configuration +@PropertySource("classpath:/com/myco/app.properties") +public class AppConfig { + + @Autowired + Environment env; + + @Bean + public TestBean testBean() { + TestBean testBean = new TestBean(); + testBean.setName(env.getProperty("testbean.name")); + return testBean; + } +} +``` + +Kotlin + +``` +@Configuration +@PropertySource("classpath:/com/myco/app.properties") +class AppConfig { + + @Autowired + private lateinit var env: Environment + + @Bean + fun testBean() = TestBean().apply { + name = env.getProperty("testbean.name")!! + } +} +``` + +Any `${…​}` placeholders present in a `@PropertySource` resource location are +resolved against the set of property sources already registered against the +environment, as the following example shows: + +Java + +``` +@Configuration +@PropertySource("classpath:/com/${my.placeholder:default/path}/app.properties") +public class AppConfig { + + @Autowired + Environment env; + + @Bean + public TestBean testBean() { + TestBean testBean = new TestBean(); + testBean.setName(env.getProperty("testbean.name")); + return testBean; + } +} +``` + +Kotlin + +``` +@Configuration +@PropertySource("classpath:/com/\${my.placeholder:default/path}/app.properties") +class AppConfig { + + @Autowired + private lateinit var env: Environment + + @Bean + fun testBean() = TestBean().apply { + name = env.getProperty("testbean.name")!! + } +} +``` + +Assuming that `my.placeholder` is present in one of the property sources already +registered (for example, system properties or environment variables), the placeholder is +resolved to the corresponding value. If not, then `default/path` is used +as a default. If no default is specified and a property cannot be resolved, an`IllegalArgumentException` is thrown. + +| |The `@PropertySource` annotation is repeatable, according to Java 8 conventions.
However, all such `@PropertySource` annotations need to be declared at the same
level, either directly on the configuration class or as meta-annotations within the
same custom annotation. Mixing direct annotations and meta-annotations is not
recommended, since direct annotations effectively override meta-annotations.| +|---|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +#### 1.13.4. Placeholder Resolution in Statements + +Historically, the value of placeholders in elements could be resolved only against +JVM system properties or environment variables. This is no longer the case. Because +the `Environment` abstraction is integrated throughout the container, it is easy to +route resolution of placeholders through it. This means that you may configure the +resolution process in any way you like. You can change the precedence of searching through +system properties and environment variables or remove them entirely. You can also add your +own property sources to the mix, as appropriate. + +Concretely, the following statement works regardless of where the `customer`property is defined, as long as it is available in the `Environment`: + +``` + + + +``` + +### 1.14. Registering a `LoadTimeWeaver` + +The `LoadTimeWeaver` is used by Spring to dynamically transform classes as they are +loaded into the Java virtual machine (JVM). + +To enable load-time weaving, you can add the `@EnableLoadTimeWeaving` to one of your`@Configuration` classes, as the following example shows: + +Java + +``` +@Configuration +@EnableLoadTimeWeaving +public class AppConfig { +} +``` + +Kotlin + +``` +@Configuration +@EnableLoadTimeWeaving +class AppConfig +``` + +Alternatively, for XML configuration, you can use the `context:load-time-weaver` element: + +``` + + + +``` + +Once configured for the `ApplicationContext`, any bean within that `ApplicationContext`may implement `LoadTimeWeaverAware`, thereby receiving a reference to the load-time +weaver instance. This is particularly useful in combination with[Spring’s JPA support](data-access.html#orm-jpa) where load-time weaving may be +necessary for JPA class transformation. +Consult the[`LocalContainerEntityManagerFactoryBean`](https://docs.spring.io/spring-framework/docs/5.3.16/javadoc-api/org/springframework/orm/jpa/LocalContainerEntityManagerFactoryBean.html)javadoc for more detail. For more on AspectJ load-time weaving, see [Load-time Weaving with AspectJ in the Spring Framework](#aop-aj-ltw). + +### 1.15. Additional Capabilities of the `ApplicationContext` + +As discussed in the [chapter introduction](#beans), the `org.springframework.beans.factory`package provides basic functionality for managing and manipulating beans, including in a +programmatic way. The `org.springframework.context` package adds the[`ApplicationContext`](https://docs.spring.io/spring-framework/docs/5.3.16/javadoc-api/org/springframework/context/ApplicationContext.html)interface, which extends the `BeanFactory` interface, in addition to extending other +interfaces to provide additional functionality in a more application +framework-oriented style. Many people use the `ApplicationContext` in a completely +declarative fashion, not even creating it programmatically, but instead relying on +support classes such as `ContextLoader` to automatically instantiate an`ApplicationContext` as part of the normal startup process of a Java EE web application. + +To enhance `BeanFactory` functionality in a more framework-oriented style, the context +package also provides the following functionality: + +* Access to messages in i18n-style, through the `MessageSource` interface. + +* Access to resources, such as URLs and files, through the `ResourceLoader` interface. + +* Event publication, namely to beans that implement the `ApplicationListener` interface, + through the use of the `ApplicationEventPublisher` interface. + +* Loading of multiple (hierarchical) contexts, letting each be focused on one + particular layer, such as the web layer of an application, through the`HierarchicalBeanFactory` interface. + +#### 1.15.1. Internationalization using `MessageSource` + +The `ApplicationContext` interface extends an interface called `MessageSource` and, +therefore, provides internationalization (“i18n”) functionality. Spring also provides the`HierarchicalMessageSource` interface, which can resolve messages hierarchically. +Together, these interfaces provide the foundation upon which Spring effects message +resolution. The methods defined on these interfaces include: + +* `String getMessage(String code, Object[] args, String default, Locale loc)`: The basic + method used to retrieve a message from the `MessageSource`. When no message is found + for the specified locale, the default message is used. Any arguments passed in become + replacement values, using the `MessageFormat` functionality provided by the standard + library. + +* `String getMessage(String code, Object[] args, Locale loc)`: Essentially the same as + the previous method but with one difference: No default message can be specified. If + the message cannot be found, a `NoSuchMessageException` is thrown. + +* `String getMessage(MessageSourceResolvable resolvable, Locale locale)`: All properties + used in the preceding methods are also wrapped in a class named`MessageSourceResolvable`, which you can use with this method. + +When an `ApplicationContext` is loaded, it automatically searches for a `MessageSource`bean defined in the context. The bean must have the name `messageSource`. If such a bean +is found, all calls to the preceding methods are delegated to the message source. If no +message source is found, the `ApplicationContext` attempts to find a parent containing a +bean with the same name. If it does, it uses that bean as the `MessageSource`. If the`ApplicationContext` cannot find any source for messages, an empty`DelegatingMessageSource` is instantiated in order to be able to accept calls to the +methods defined above. + +Spring provides three `MessageSource` implementations, `ResourceBundleMessageSource`, `ReloadableResourceBundleMessageSource`and `StaticMessageSource`. All of them implement `HierarchicalMessageSource` in order to do nested +messaging. The `StaticMessageSource` is rarely used but provides programmatic ways to +add messages to the source. The following example shows `ResourceBundleMessageSource`: + +``` + + + + + format + exceptions + windows + + + + +``` + +The example assumes that you have three resource bundles called `format`, `exceptions` and `windows`defined in your classpath. Any request to resolve a message is +handled in the JDK-standard way of resolving messages through `ResourceBundle` objects. For the +purposes of the example, assume the contents of two of the above resource bundle files +are as follows: + +``` + # in format.properties + message=Alligators rock! +``` + +``` + # in exceptions.properties + argument.required=The {0} argument is required. +``` + +The next example shows a program to run the `MessageSource` functionality. +Remember that all `ApplicationContext` implementations are also `MessageSource`implementations and so can be cast to the `MessageSource` interface. + +Java + +``` +public static void main(String[] args) { + MessageSource resources = new ClassPathXmlApplicationContext("beans.xml"); + String message = resources.getMessage("message", null, "Default", Locale.ENGLISH); + System.out.println(message); +} +``` + +Kotlin + +``` +fun main() { + val resources = ClassPathXmlApplicationContext("beans.xml") + val message = resources.getMessage("message", null, "Default", Locale.ENGLISH) + println(message) +} +``` + +The resulting output from the above program is as follows: + +``` +Alligators rock! +``` + +To summarize, the `MessageSource` is defined in a file called `beans.xml`, which +exists at the root of your classpath. The `messageSource` bean definition refers to a +number of resource bundles through its `basenames` property. The three files that are +passed in the list to the `basenames` property exist as files at the root of your +classpath and are called `format.properties`, `exceptions.properties`, and`windows.properties`, respectively. + +The next example shows arguments passed to the message lookup. These arguments are +converted into `String` objects and inserted into placeholders in the lookup message. + +``` + + + + + + + + + + + + + +``` + +Java + +``` +public class Example { + + private MessageSource messages; + + public void setMessages(MessageSource messages) { + this.messages = messages; + } + + public void execute() { + String message = this.messages.getMessage("argument.required", + new Object [] {"userDao"}, "Required", Locale.ENGLISH); + System.out.println(message); + } +} +``` + +Kotlin + +``` + class Example { + + lateinit var messages: MessageSource + + fun execute() { + val message = messages.getMessage("argument.required", + arrayOf("userDao"), "Required", Locale.ENGLISH) + println(message) + } +} +``` + +The resulting output from the invocation of the `execute()` method is as follows: + +``` +The userDao argument is required. +``` + +With regard to internationalization (“i18n”), Spring’s various `MessageSource`implementations follow the same locale resolution and fallback rules as the standard JDK`ResourceBundle`. In short, and continuing with the example `messageSource` defined +previously, if you want to resolve messages against the British (`en-GB`) locale, you +would create files called `format_en_GB.properties`, `exceptions_en_GB.properties`, and`windows_en_GB.properties`, respectively. + +Typically, locale resolution is managed by the surrounding environment of the +application. In the following example, the locale against which (British) messages are +resolved is specified manually: + +``` +# in exceptions_en_GB.properties +argument.required=Ebagum lad, the ''{0}'' argument is required, I say, required. +``` + +Java + +``` +public static void main(final String[] args) { + MessageSource resources = new ClassPathXmlApplicationContext("beans.xml"); + String message = resources.getMessage("argument.required", + new Object [] {"userDao"}, "Required", Locale.UK); + System.out.println(message); +} +``` + +Kotlin + +``` +fun main() { + val resources = ClassPathXmlApplicationContext("beans.xml") + val message = resources.getMessage("argument.required", + arrayOf("userDao"), "Required", Locale.UK) + println(message) +} +``` + +The resulting output from the running of the above program is as follows: + +``` +Ebagum lad, the 'userDao' argument is required, I say, required. +``` + +You can also use the `MessageSourceAware` interface to acquire a reference to any`MessageSource` that has been defined. Any bean that is defined in an`ApplicationContext` that implements the `MessageSourceAware` interface is injected with +the application context’s `MessageSource` when the bean is created and configured. + +| |Because Spring’s `MessageSource` is based on Java’s `ResourceBundle`, it does not merge
bundles with the same base name, but will only use the first bundle found.
Subsequent message bundles with the same base name are ignored.| +|---|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +| |As an alternative to `ResourceBundleMessageSource`, Spring provides a`ReloadableResourceBundleMessageSource` class. This variant supports the same bundle
file format but is more flexible than the standard JDK based`ResourceBundleMessageSource` implementation. In particular, it allows for reading
files from any Spring resource location (not only from the classpath) and supports hot
reloading of bundle property files (while efficiently caching them in between).
See the [`ReloadableResourceBundleMessageSource`](https://docs.spring.io/spring-framework/docs/5.3.16/javadoc-api/org/springframework/context/support/ReloadableResourceBundleMessageSource.html)javadoc for details.| +|---|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +#### 1.15.2. Standard and Custom Events + +Event handling in the `ApplicationContext` is provided through the `ApplicationEvent`class and the `ApplicationListener` interface. If a bean that implements the`ApplicationListener` interface is deployed into the context, every time an`ApplicationEvent` gets published to the `ApplicationContext`, that bean is notified. +Essentially, this is the standard Observer design pattern. + +| |As of Spring 4.2, the event infrastructure has been significantly improved and offers
an [annotation-based model](#context-functionality-events-annotation) as well as the
ability to publish any arbitrary event (that is, an object that does not necessarily
extend from `ApplicationEvent`). When such an object is published, we wrap it in an
event for you.| +|---|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +The following table describes the standard events that Spring provides: + +| Event | Explanation | +|----------------------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| `ContextRefreshedEvent` |Published when the `ApplicationContext` is initialized or refreshed (for example, by
using the `refresh()` method on the `ConfigurableApplicationContext` interface).
Here, “initialized” means that all beans are loaded, post-processor beans are detected
and activated, singletons are pre-instantiated, and the `ApplicationContext` object is
ready for use. As long as the context has not been closed, a refresh can be triggered
multiple times, provided that the chosen `ApplicationContext` actually supports such
“hot” refreshes. For example, `XmlWebApplicationContext` supports hot refreshes, but`GenericApplicationContext` does not.| +| `ContextStartedEvent` | Published when the `ApplicationContext` is started by using the `start()` method on the`ConfigurableApplicationContext` interface. Here, “started” means that all `Lifecycle`beans receive an explicit start signal. Typically, this signal is used to restart beans
after an explicit stop, but it may also be used to start components that have not been
configured for autostart (for example, components that have not already started on
initialization). | +| `ContextStoppedEvent` | Published when the `ApplicationContext` is stopped by using the `stop()` method on the`ConfigurableApplicationContext` interface. Here, “stopped” means that all `Lifecycle`beans receive an explicit stop signal. A stopped context may be restarted through a`start()` call. | +| `ContextClosedEvent` | Published when the `ApplicationContext` is being closed by using the `close()` method
on the `ConfigurableApplicationContext` interface or via a JVM shutdown hook. Here,
"closed" means that all singleton beans will be destroyed. Once the context is closed,
it reaches its end of life and cannot be refreshed or restarted. | +| `RequestHandledEvent` | A web-specific event telling all beans that an HTTP request has been serviced. This
event is published after the request is complete. This event is only applicable to
web applications that use Spring’s `DispatcherServlet`. | +|`ServletRequestHandledEvent`| A subclass of `RequestHandledEvent` that adds Servlet-specific context information. | + +You can also create and publish your own custom events. The following example shows a +simple class that extends Spring’s `ApplicationEvent` base class: + +Java + +``` +public class BlockedListEvent extends ApplicationEvent { + + private final String address; + private final String content; + + public BlockedListEvent(Object source, String address, String content) { + super(source); + this.address = address; + this.content = content; + } + + // accessor and other methods... +} +``` + +Kotlin + +``` +class BlockedListEvent(source: Any, + val address: String, + val content: String) : ApplicationEvent(source) +``` + +To publish a custom `ApplicationEvent`, call the `publishEvent()` method on an`ApplicationEventPublisher`. Typically, this is done by creating a class that implements`ApplicationEventPublisherAware` and registering it as a Spring bean. The following +example shows such a class: + +Java + +``` +public class EmailService implements ApplicationEventPublisherAware { + + private List blockedList; + private ApplicationEventPublisher publisher; + + public void setBlockedList(List blockedList) { + this.blockedList = blockedList; + } + + public void setApplicationEventPublisher(ApplicationEventPublisher publisher) { + this.publisher = publisher; + } + + public void sendEmail(String address, String content) { + if (blockedList.contains(address)) { + publisher.publishEvent(new BlockedListEvent(this, address, content)); + return; + } + // send email... + } +} +``` + +Kotlin + +``` +class EmailService : ApplicationEventPublisherAware { + + private lateinit var blockedList: List + private lateinit var publisher: ApplicationEventPublisher + + fun setBlockedList(blockedList: List) { + this.blockedList = blockedList + } + + override fun setApplicationEventPublisher(publisher: ApplicationEventPublisher) { + this.publisher = publisher + } + + fun sendEmail(address: String, content: String) { + if (blockedList!!.contains(address)) { + publisher!!.publishEvent(BlockedListEvent(this, address, content)) + return + } + // send email... + } +} +``` + +At configuration time, the Spring container detects that `EmailService` implements`ApplicationEventPublisherAware` and automatically calls`setApplicationEventPublisher()`. In reality, the parameter passed in is the Spring +container itself. You are interacting with the application context through its`ApplicationEventPublisher` interface. + +To receive the custom `ApplicationEvent`, you can create a class that implements`ApplicationListener` and register it as a Spring bean. The following example +shows such a class: + +Java + +``` +public class BlockedListNotifier implements ApplicationListener { + + private String notificationAddress; + + public void setNotificationAddress(String notificationAddress) { + this.notificationAddress = notificationAddress; + } + + public void onApplicationEvent(BlockedListEvent event) { + // notify appropriate parties via notificationAddress... + } +} +``` + +Kotlin + +``` +class BlockedListNotifier : ApplicationListener { + + lateinit var notificationAddres: String + + override fun onApplicationEvent(event: BlockedListEvent) { + // notify appropriate parties via notificationAddress... + } +} +``` + +Notice that `ApplicationListener` is generically parameterized with the type of your +custom event (`BlockedListEvent` in the preceding example). This means that the`onApplicationEvent()` method can remain type-safe, avoiding any need for downcasting. +You can register as many event listeners as you wish, but note that, by default, event +listeners receive events synchronously. This means that the `publishEvent()` method +blocks until all listeners have finished processing the event. One advantage of this +synchronous and single-threaded approach is that, when a listener receives an event, it +operates inside the transaction context of the publisher if a transaction context is +available. If another strategy for event publication becomes necessary, see the javadoc +for Spring’s[`ApplicationEventMulticaster`](https://docs.spring.io/spring-framework/docs/5.3.16/javadoc-api/org/springframework/context/event/ApplicationEventMulticaster.html) interface +and [`SimpleApplicationEventMulticaster`](https://docs.spring.io/spring-framework/docs/5.3.16/javadoc-api/org/springframework/context/event/SimpleApplicationEventMulticaster.html)implementation for configuration options. + +The following example shows the bean definitions used to register and configure each of +the classes above: + +``` + + + + [email protected] + [email protected] + [email protected] + + + + + + + +``` + +Putting it all together, when the `sendEmail()` method of the `emailService` bean is +called, if there are any email messages that should be blocked, a custom event of type`BlockedListEvent` is published. The `blockedListNotifier` bean is registered as an`ApplicationListener` and receives the `BlockedListEvent`, at which point it can +notify appropriate parties. + +| |Spring’s eventing mechanism is designed for simple communication between Spring beans
within the same application context. However, for more sophisticated enterprise
integration needs, the separately maintained[Spring Integration](https://projects.spring.io/spring-integration/) project provides
complete support for building lightweight,[pattern-oriented](https://www.enterpriseintegrationpatterns.com), event-driven
architectures that build upon the well-known Spring programming model.| +|---|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +##### Annotation-based Event Listeners + +You can register an event listener on any method of a managed bean by using the`@EventListener` annotation. The `BlockedListNotifier` can be rewritten as follows: + +Java + +``` +public class BlockedListNotifier { + + private String notificationAddress; + + public void setNotificationAddress(String notificationAddress) { + this.notificationAddress = notificationAddress; + } + + @EventListener + public void processBlockedListEvent(BlockedListEvent event) { + // notify appropriate parties via notificationAddress... + } +} +``` + +Kotlin + +``` +class BlockedListNotifier { + + lateinit var notificationAddress: String + + @EventListener + fun processBlockedListEvent(event: BlockedListEvent) { + // notify appropriate parties via notificationAddress... + } +} +``` + +The method signature once again declares the event type to which it listens, +but, this time, with a flexible name and without implementing a specific listener interface. +The event type can also be narrowed through generics as long as the actual event type +resolves your generic parameter in its implementation hierarchy. + +If your method should listen to several events or if you want to define it with no +parameter at all, the event types can also be specified on the annotation itself. The +following example shows how to do so: + +Java + +``` +@EventListener({ContextStartedEvent.class, ContextRefreshedEvent.class}) +public void handleContextStart() { + // ... +} +``` + +Kotlin + +``` +@EventListener(ContextStartedEvent::class, ContextRefreshedEvent::class) +fun handleContextStart() { + // ... +} +``` + +It is also possible to add additional runtime filtering by using the `condition` attribute +of the annotation that defines a [`SpEL` expression](#expressions), which should match +to actually invoke the method for a particular event. + +The following example shows how our notifier can be rewritten to be invoked only if the`content` attribute of the event is equal to `my-event`: + +Java + +``` +@EventListener(condition = "#blEvent.content == 'my-event'") +public void processBlockedListEvent(BlockedListEvent blEvent) { + // notify appropriate parties via notificationAddress... +} +``` + +Kotlin + +``` +@EventListener(condition = "#blEvent.content == 'my-event'") +fun processBlockedListEvent(blEvent: BlockedListEvent) { + // notify appropriate parties via notificationAddress... +} +``` + +Each `SpEL` expression evaluates against a dedicated context. The following table lists the +items made available to the context so that you can use them for conditional event processing: + +| Name | Location | Description | Example | +|---------------|------------------|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|-----------------------------------------------------------------------------------------| +| Event | root object | The actual `ApplicationEvent`. | `#root.event` or `event` | +|Arguments array| root object | The arguments (as an object array) used to invoke the method. | `#root.args` or `args`; `args[0]` to access the first argument, etc. | +|*Argument name*|evaluation context|The name of any of the method arguments. If, for some reason, the names are not available
(for example, because there is no debug information in the compiled byte code), individual
arguments are also available using the `#a<#arg>` syntax where `<#arg>` stands for the
argument index (starting from 0).|`#blEvent` or `#a0` (you can also use `#p0` or `#p<#arg>` parameter notation as an alias)| + +Note that `#root.event` gives you access to the underlying event, even if your method +signature actually refers to an arbitrary object that was published. + +If you need to publish an event as the result of processing another event, you can change the +method signature to return the event that should be published, as the following example shows: + +Java + +``` +@EventListener +public ListUpdateEvent handleBlockedListEvent(BlockedListEvent event) { + // notify appropriate parties via notificationAddress and + // then publish a ListUpdateEvent... +} +``` + +Kotlin + +``` +@EventListener +fun handleBlockedListEvent(event: BlockedListEvent): ListUpdateEvent { + // notify appropriate parties via notificationAddress and + // then publish a ListUpdateEvent... +} +``` + +| |This feature is not supported for[asynchronous listeners](#context-functionality-events-async).| +|---|-----------------------------------------------------------------------------------------------| + +The `handleBlockedListEvent()` method publishes a new `ListUpdateEvent` for every`BlockedListEvent` that it handles. If you need to publish several events, you can return +a `Collection` or an array of events instead. + +##### Asynchronous Listeners + +If you want a particular listener to process events asynchronously, you can reuse the[regular `@Async` support](integration.html#scheduling-annotation-support-async). +The following example shows how to do so: + +Java + +``` +@EventListener +@Async +public void processBlockedListEvent(BlockedListEvent event) { + // BlockedListEvent is processed in a separate thread +} +``` + +Kotlin + +``` +@EventListener +@Async +fun processBlockedListEvent(event: BlockedListEvent) { + // BlockedListEvent is processed in a separate thread +} +``` + +Be aware of the following limitations when using asynchronous events: + +* If an asynchronous event listener throws an `Exception`, it is not propagated to the + caller. See[`AsyncUncaughtExceptionHandler`](https://docs.spring.io/spring-framework/docs/5.3.16/javadoc-api/org/springframework/aop/interceptor/AsyncUncaughtExceptionHandler.html)for more details. + +* Asynchronous event listener methods cannot publish a subsequent event by returning a + value. If you need to publish another event as the result of the processing, inject an[`ApplicationEventPublisher`](https://docs.spring.io/spring-framework/docs/5.3.16/javadoc-api/org/springframework/context/ApplicationEventPublisher.html)to publish the event manually. + +##### Ordering Listeners + +If you need one listener to be invoked before another one, you can add the `@Order`annotation to the method declaration, as the following example shows: + +Java + +``` +@EventListener +@Order(42) +public void processBlockedListEvent(BlockedListEvent event) { + // notify appropriate parties via notificationAddress... +} +``` + +Kotlin + +``` +@EventListener +@Order(42) +fun processBlockedListEvent(event: BlockedListEvent) { + // notify appropriate parties via notificationAddress... +} +``` + +##### Generic Events + +You can also use generics to further define the structure of your event. Consider using an`EntityCreatedEvent` where `T` is the type of the actual entity that got created. For example, you +can create the following listener definition to receive only `EntityCreatedEvent` for a`Person`: + +Java + +``` +@EventListener +public void onPersonCreated(EntityCreatedEvent event) { + // ... +} +``` + +Kotlin + +``` +@EventListener +fun onPersonCreated(event: EntityCreatedEvent) { + // ... +} +``` + +Due to type erasure, this works only if the event that is fired resolves the generic +parameters on which the event listener filters (that is, something like`class PersonCreatedEvent extends EntityCreatedEvent { …​ }`). + +In certain circumstances, this may become quite tedious if all events follow the same +structure (as should be the case for the event in the preceding example). In such a case, +you can implement `ResolvableTypeProvider` to guide the framework beyond what the runtime +environment provides. The following event shows how to do so: + +Java + +``` +public class EntityCreatedEvent extends ApplicationEvent implements ResolvableTypeProvider { + + public EntityCreatedEvent(T entity) { + super(entity); + } + + @Override + public ResolvableType getResolvableType() { + return ResolvableType.forClassWithGenerics(getClass(), ResolvableType.forInstance(getSource())); + } +} +``` + +Kotlin + +``` +class EntityCreatedEvent(entity: T) : ApplicationEvent(entity), ResolvableTypeProvider { + + override fun getResolvableType(): ResolvableType? { + return ResolvableType.forClassWithGenerics(javaClass, ResolvableType.forInstance(getSource())) + } +} +``` + +| |This works not only for `ApplicationEvent` but any arbitrary object that you send as
an event.| +|---|--------------------------------------------------------------------------------------------------| + +#### 1.15.3. Convenient Access to Low-level Resources + +For optimal usage and understanding of application contexts, you should familiarize +yourself with Spring’s `Resource` abstraction, as described in [Resources](#resources). + +An application context is a `ResourceLoader`, which can be used to load `Resource` objects. +A `Resource` is essentially a more feature rich version of the JDK `java.net.URL` class. +In fact, the implementations of the `Resource` wrap an instance of `java.net.URL`, where +appropriate. A `Resource` can obtain low-level resources from almost any location in a +transparent fashion, including from the classpath, a filesystem location, anywhere +describable with a standard URL, and some other variations. If the resource location +string is a simple path without any special prefixes, where those resources come from is +specific and appropriate to the actual application context type. + +You can configure a bean deployed into the application context to implement the special +callback interface, `ResourceLoaderAware`, to be automatically called back at +initialization time with the application context itself passed in as the `ResourceLoader`. +You can also expose properties of type `Resource`, to be used to access static resources. +They are injected into it like any other properties. You can specify those `Resource`properties as simple `String` paths and rely on automatic conversion from those text +strings to actual `Resource` objects when the bean is deployed. + +The location path or paths supplied to an `ApplicationContext` constructor are actually +resource strings and, in simple form, are treated appropriately according to the specific +context implementation. For example `ClassPathXmlApplicationContext` treats a simple +location path as a classpath location. You can also use location paths (resource strings) +with special prefixes to force loading of definitions from the classpath or a URL, +regardless of the actual context type. + +#### 1.15.4. Application Startup Tracking + +The `ApplicationContext` manages the lifecycle of Spring applications and provides a rich +programming model around components. As a result, complex applications can have equally +complex component graphs and startup phases. + +Tracking the application startup steps with specific metrics can help understand where +time is being spent during the startup phase, but it can also be used as a way to better +understand the context lifecycle as a whole. + +The `AbstractApplicationContext` (and its subclasses) is instrumented with an`ApplicationStartup`, which collects `StartupStep` data about various startup phases: + +* application context lifecycle (base packages scanning, config classes management) + +* beans lifecycle (instantiation, smart initialization, post processing) + +* application events processing + +Here is an example of instrumentation in the `AnnotationConfigApplicationContext`: + +Java + +``` +// create a startup step and start recording +StartupStep scanPackages = this.getApplicationStartup().start("spring.context.base-packages.scan"); +// add tagging information to the current step +scanPackages.tag("packages", () -> Arrays.toString(basePackages)); +// perform the actual phase we're instrumenting +this.scanner.scan(basePackages); +// end the current step +scanPackages.end(); +``` + +Kotlin + +``` +// create a startup step and start recording +val scanPackages = this.getApplicationStartup().start("spring.context.base-packages.scan") +// add tagging information to the current step +scanPackages.tag("packages", () -> Arrays.toString(basePackages)) +// perform the actual phase we're instrumenting +this.scanner.scan(basePackages) +// end the current step +scanPackages.end() +``` + +The application context is already instrumented with multiple steps. +Once recorded, these startup steps can be collected, displayed and analyzed with specific tools. +For a complete list of existing startup steps, you can check out the[dedicated appendix section](#application-startup-steps). + +The default `ApplicationStartup` implementation is a no-op variant, for minimal overhead. +This means no metrics will be collected during application startup by default. +Spring Framework ships with an implementation for tracking startup steps with Java Flight Recorder:`FlightRecorderApplicationStartup`. To use this variant, you must configure an instance of it +to the `ApplicationContext` as soon as it’s been created. + +Developers can also use the `ApplicationStartup` infrastructure if they’re providing their own`AbstractApplicationContext` subclass, or if they wish to collect more precise data. + +| |`ApplicationStartup` is meant to be only used during application startup and for
the core container; this is by no means a replacement for Java profilers or
metrics libraries like [Micrometer](https://micrometer.io).| +|---|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +To start collecting custom `StartupStep`, components can either get the `ApplicationStartup`instance from the application context directly, make their component implement `ApplicationStartupAware`, +or ask for the `ApplicationStartup` type on any injection point. + +| |Developers should not use the `"spring.*"` namespace when creating custom startup steps.
This namespace is reserved for internal Spring usage and is subject to change.| +|---|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +#### 1.15.5. Convenient ApplicationContext Instantiation for Web Applications + +You can create `ApplicationContext` instances declaratively by using, for example, a`ContextLoader`. Of course, you can also create `ApplicationContext` instances +programmatically by using one of the `ApplicationContext` implementations. + +You can register an `ApplicationContext` by using the `ContextLoaderListener`, as the +following example shows: + +``` + + contextConfigLocation + /WEB-INF/daoContext.xml /WEB-INF/applicationContext.xml + + + + org.springframework.web.context.ContextLoaderListener + +``` + +The listener inspects the `contextConfigLocation` parameter. If the parameter does not +exist, the listener uses `/WEB-INF/applicationContext.xml` as a default. When the +parameter does exist, the listener separates the `String` by using predefined +delimiters (comma, semicolon, and whitespace) and uses the values as locations where +application contexts are searched. Ant-style path patterns are supported as well. +Examples are `/WEB-INF/*Context.xml` (for all files with names that end with`Context.xml` and that reside in the `WEB-INF` directory) and `/WEB-INF/**/*Context.xml`(for all such files in any subdirectory of `WEB-INF`). + +#### 1.15.6. Deploying a Spring `ApplicationContext` as a Java EE RAR File + +It is possible to deploy a Spring `ApplicationContext` as a RAR file, encapsulating the +context and all of its required bean classes and library JARs in a Java EE RAR deployment +unit. This is the equivalent of bootstrapping a stand-alone `ApplicationContext` (only hosted +in Java EE environment) being able to access the Java EE servers facilities. RAR deployment +is a more natural alternative to a scenario of deploying a headless WAR file — in effect, +a WAR file without any HTTP entry points that is used only for bootstrapping a Spring`ApplicationContext` in a Java EE environment. + +RAR deployment is ideal for application contexts that do not need HTTP entry points but +rather consist only of message endpoints and scheduled jobs. Beans in such a context can +use application server resources such as the JTA transaction manager and JNDI-bound JDBC`DataSource` instances and JMS `ConnectionFactory` instances and can also register with +the platform’s JMX server — all through Spring’s standard transaction management and JNDI +and JMX support facilities. Application components can also interact with the application +server’s JCA `WorkManager` through Spring’s `TaskExecutor` abstraction. + +See the javadoc of the[`SpringContextResourceAdapter`](https://docs.spring.io/spring-framework/docs/5.3.16/javadoc-api/org/springframework/jca/context/SpringContextResourceAdapter.html)class for the configuration details involved in RAR deployment. + +For a simple deployment of a Spring ApplicationContext as a Java EE RAR file: + +1. Package + all application classes into a RAR file (which is a standard JAR file with a different + file extension). + +2. Add all required library JARs into the root of the RAR archive. + +3. Add a`META-INF/ra.xml` deployment descriptor (as shown in the [javadoc for `SpringContextResourceAdapter`](https://docs.spring.io/spring-framework/docs/5.3.16/javadoc-api/org/springframework/jca/context/SpringContextResourceAdapter.html)) + and the corresponding Spring XML bean definition file(s) (typically`META-INF/applicationContext.xml`). + +4. Drop the resulting RAR file into your + application server’s deployment directory. + +| |Such RAR deployment units are usually self-contained. They do not expose components
to the outside world, not even to other modules of the same application. Interaction with a
RAR-based `ApplicationContext` usually occurs through JMS destinations that it shares with
other modules. A RAR-based `ApplicationContext` may also, for example, schedule some jobs
or react to new files in the file system (or the like). If it needs to allow synchronous
access from the outside, it could (for example) export RMI endpoints, which may be used
by other application modules on the same machine.| +|---|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +### 1.16. The `BeanFactory` + +The `BeanFactory` API provides the underlying basis for Spring’s IoC functionality. +Its specific contracts are mostly used in integration with other parts of Spring and +related third-party frameworks, and its `DefaultListableBeanFactory` implementation +is a key delegate within the higher-level `GenericApplicationContext` container. + +`BeanFactory` and related interfaces (such as `BeanFactoryAware`, `InitializingBean`,`DisposableBean`) are important integration points for other framework components. +By not requiring any annotations or even reflection, they allow for very efficient +interaction between the container and its components. Application-level beans may +use the same callback interfaces but typically prefer declarative dependency +injection instead, either through annotations or through programmatic configuration. + +Note that the core `BeanFactory` API level and its `DefaultListableBeanFactory`implementation do not make assumptions about the configuration format or any +component annotations to be used. All of these flavors come in through extensions +(such as `XmlBeanDefinitionReader` and `AutowiredAnnotationBeanPostProcessor`) and +operate on shared `BeanDefinition` objects as a core metadata representation. +This is the essence of what makes Spring’s container so flexible and extensible. + +#### 1.16.1. `BeanFactory` or `ApplicationContext`? + +This section explains the differences between the `BeanFactory` and`ApplicationContext` container levels and the implications on bootstrapping. + +You should use an `ApplicationContext` unless you have a good reason for not doing so, with`GenericApplicationContext` and its subclass `AnnotationConfigApplicationContext`as the common implementations for custom bootstrapping. These are the primary entry +points to Spring’s core container for all common purposes: loading of configuration +files, triggering a classpath scan, programmatically registering bean definitions +and annotated classes, and (as of 5.0) registering functional bean definitions. + +Because an `ApplicationContext` includes all the functionality of a `BeanFactory`, it is +generally recommended over a plain `BeanFactory`, except for scenarios where full +control over bean processing is needed. Within an `ApplicationContext` (such as the`GenericApplicationContext` implementation), several kinds of beans are detected +by convention (that is, by bean name or by bean type — in particular, post-processors), +while a plain `DefaultListableBeanFactory` is agnostic about any special beans. + +For many extended container features, such as annotation processing and AOP proxying, +the [`BeanPostProcessor` extension point](#beans-factory-extension-bpp) is essential. +If you use only a plain `DefaultListableBeanFactory`, such post-processors do not +get detected and activated by default. This situation could be confusing, because +nothing is actually wrong with your bean configuration. Rather, in such a scenario, +the container needs to be fully bootstrapped through additional setup. + +The following table lists features provided by the `BeanFactory` and`ApplicationContext` interfaces and implementations. + +| Feature |`BeanFactory`|`ApplicationContext`| +|------------------------------------------------------------|-------------|--------------------| +| Bean instantiation/wiring | Yes | Yes | +| Integrated lifecycle management | No | Yes | +| Automatic `BeanPostProcessor` registration | No | Yes | +| Automatic `BeanFactoryPostProcessor` registration | No | Yes | +|Convenient `MessageSource` access (for internationalization)| No | Yes | +| Built-in `ApplicationEvent` publication mechanism | No | Yes | + +To explicitly register a bean post-processor with a `DefaultListableBeanFactory`, +you need to programmatically call `addBeanPostProcessor`, as the following example shows: + +Java + +``` +DefaultListableBeanFactory factory = new DefaultListableBeanFactory(); +// populate the factory with bean definitions + +// now register any needed BeanPostProcessor instances +factory.addBeanPostProcessor(new AutowiredAnnotationBeanPostProcessor()); +factory.addBeanPostProcessor(new MyBeanPostProcessor()); + +// now start using the factory +``` + +Kotlin + +``` +val factory = DefaultListableBeanFactory() +// populate the factory with bean definitions + +// now register any needed BeanPostProcessor instances +factory.addBeanPostProcessor(AutowiredAnnotationBeanPostProcessor()) +factory.addBeanPostProcessor(MyBeanPostProcessor()) + +// now start using the factory +``` + +To apply a `BeanFactoryPostProcessor` to a plain `DefaultListableBeanFactory`, +you need to call its `postProcessBeanFactory` method, as the following example shows: + +Java + +``` +DefaultListableBeanFactory factory = new DefaultListableBeanFactory(); +XmlBeanDefinitionReader reader = new XmlBeanDefinitionReader(factory); +reader.loadBeanDefinitions(new FileSystemResource("beans.xml")); + +// bring in some property values from a Properties file +PropertySourcesPlaceholderConfigurer cfg = new PropertySourcesPlaceholderConfigurer(); +cfg.setLocation(new FileSystemResource("jdbc.properties")); + +// now actually do the replacement +cfg.postProcessBeanFactory(factory); +``` + +Kotlin + +``` +val factory = DefaultListableBeanFactory() +val reader = XmlBeanDefinitionReader(factory) +reader.loadBeanDefinitions(FileSystemResource("beans.xml")) + +// bring in some property values from a Properties file +val cfg = PropertySourcesPlaceholderConfigurer() +cfg.setLocation(FileSystemResource("jdbc.properties")) + +// now actually do the replacement +cfg.postProcessBeanFactory(factory) +``` + +In both cases, the explicit registration steps are inconvenient, which is +why the various `ApplicationContext` variants are preferred over a plain`DefaultListableBeanFactory` in Spring-backed applications, especially when +relying on `BeanFactoryPostProcessor` and `BeanPostProcessor` instances for extended +container functionality in a typical enterprise setup. + +| |An `AnnotationConfigApplicationContext` has all common annotation post-processors
registered and may bring in additional processors underneath the
covers through configuration annotations, such as `@EnableTransactionManagement`.
At the abstraction level of Spring’s annotation-based configuration model,
the notion of bean post-processors becomes a mere internal container detail.| +|---|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +## 2. Resources + +This chapter covers how Spring handles resources and how you can work with resources in +Spring. It includes the following topics: + +* [Introduction](#resources-introduction) + +* [The `Resource` Interface](#resources-resource) + +* [Built-in `Resource` Implementations](#resources-implementations) + +* [The `ResourceLoader` Interface](#resources-resourceloader) + +* [The `ResourcePatternResolver` Interface](#resources-resourcepatternresolver) + +* [The `ResourceLoaderAware` Interface](#resources-resourceloaderaware) + +* [Resources as Dependencies](#resources-as-dependencies) + +* [Application Contexts and Resource Paths](#resources-app-ctx) + +### 2.1. Introduction + +Java’s standard `java.net.URL` class and standard handlers for various URL prefixes, +unfortunately, are not quite adequate enough for all access to low-level resources. For +example, there is no standardized `URL` implementation that may be used to access a +resource that needs to be obtained from the classpath or relative to a`ServletContext`. While it is possible to register new handlers for specialized `URL`prefixes (similar to existing handlers for prefixes such as `http:`), this is generally +quite complicated, and the `URL` interface still lacks some desirable functionality, +such as a method to check for the existence of the resource being pointed to. + +### 2.2. The `Resource` Interface + +Spring’s `Resource` interface located in the `org.springframework.core.io.` package is +meant to be a more capable interface for abstracting access to low-level resources. The +following listing provides an overview of the `Resource` interface. See the[`Resource`](https://docs.spring.io/spring-framework/docs/5.3.16/javadoc-api/org/springframework/core/io/Resource.html) javadoc for further details. + +``` +public interface Resource extends InputStreamSource { + + boolean exists(); + + boolean isReadable(); + + boolean isOpen(); + + boolean isFile(); + + URL getURL() throws IOException; + + URI getURI() throws IOException; + + File getFile() throws IOException; + + ReadableByteChannel readableChannel() throws IOException; + + long contentLength() throws IOException; + + long lastModified() throws IOException; + + Resource createRelative(String relativePath) throws IOException; + + String getFilename(); + + String getDescription(); +} +``` + +As the definition of the `Resource` interface shows, it extends the `InputStreamSource`interface. The following listing shows the definition of the `InputStreamSource`interface: + +``` +public interface InputStreamSource { + + InputStream getInputStream() throws IOException; +} +``` + +Some of the most important methods from the `Resource` interface are: + +* `getInputStream()`: Locates and opens the resource, returning an `InputStream` for + reading from the resource. It is expected that each invocation returns a fresh`InputStream`. It is the responsibility of the caller to close the stream. + +* `exists()`: Returns a `boolean` indicating whether this resource actually exists in + physical form. + +* `isOpen()`: Returns a `boolean` indicating whether this resource represents a handle + with an open stream. If `true`, the `InputStream` cannot be read multiple times and + must be read once only and then closed to avoid resource leaks. Returns `false` for + all usual resource implementations, with the exception of `InputStreamResource`. + +* `getDescription()`: Returns a description for this resource, to be used for error + output when working with the resource. This is often the fully qualified file name or + the actual URL of the resource. + +Other methods let you obtain an actual `URL` or `File` object representing the +resource (if the underlying implementation is compatible and supports that +functionality). + +Some implementations of the `Resource` interface also implement the extended[`WritableResource`](https://docs.spring.io/spring-framework/docs/5.3.16/javadoc-api/org/springframework/core/io/WritableResource.html) interface +for a resource that supports writing to it. + +Spring itself uses the `Resource` abstraction extensively, as an argument type in +many method signatures when a resource is needed. Other methods in some Spring APIs +(such as the constructors to various `ApplicationContext` implementations) take a`String` which in unadorned or simple form is used to create a `Resource` appropriate to +that context implementation or, via special prefixes on the `String` path, let the +caller specify that a specific `Resource` implementation must be created and used. + +While the `Resource` interface is used a lot with Spring and by Spring, it is actually +very convenient to use as a general utility class by itself in your own code, for access +to resources, even when your code does not know or care about any other parts of Spring. +While this couples your code to Spring, it really only couples it to this small set of +utility classes, which serves as a more capable replacement for `URL` and can be +considered equivalent to any other library you would use for this purpose. + +| |The `Resource` abstraction does not replace functionality. It wraps it where
possible. For example, a `UrlResource` wraps a URL and uses the wrapped `URL` to do its
work.| +|---|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +### 2.3. Built-in `Resource` Implementations + +Spring includes several built-in `Resource` implementations: + +* [`UrlResource`](#resources-implementations-urlresource) + +* [`ClassPathResource`](#resources-implementations-classpathresource) + +* [`FileSystemResource`](#resources-implementations-filesystemresource) + +* [`PathResource`](#resources-implementations-pathresource) + +* [`ServletContextResource`](#resources-implementations-servletcontextresource) + +* [`InputStreamResource`](#resources-implementations-inputstreamresource) + +* [`ByteArrayResource`](#resources-implementations-bytearrayresource) + +For a complete list of `Resource` implementations available in Spring, consult the +"All Known Implementing Classes" section of the[`Resource`](https://docs.spring.io/spring-framework/docs/5.3.16/javadoc-api/org/springframework/core/io/Resource.html) javadoc. + +#### 2.3.1. `UrlResource` + +`UrlResource` wraps a `java.net.URL` and can be used to access any object that is +normally accessible with a URL, such as files, an HTTPS target, an FTP target, and +others. All URLs have a standardized `String` representation, such that appropriate +standardized prefixes are used to indicate one URL type from another. This includes`file:` for accessing filesystem paths, `https:` for accessing resources through the +HTTPS protocol, `ftp:` for accessing resources through FTP, and others. + +A `UrlResource` is created by Java code by explicitly using the `UrlResource` constructor +but is often created implicitly when you call an API method that takes a `String`argument meant to represent a path. For the latter case, a JavaBeans `PropertyEditor`ultimately decides which type of `Resource` to create. If the path string contains a +well-known (to property editor, that is) prefix (such as `classpath:`), it creates an +appropriate specialized `Resource` for that prefix. However, if it does not recognize the +prefix, it assumes the string is a standard URL string and creates a `UrlResource`. + +#### 2.3.2. `ClassPathResource` + +This class represents a resource that should be obtained from the classpath. It uses +either the thread context class loader, a given class loader, or a given class for +loading resources. + +This `Resource` implementation supports resolution as a `java.io.File` if the class path +resource resides in the file system but not for classpath resources that reside in a +jar and have not been expanded (by the servlet engine or whatever the environment is) +to the filesystem. To address this, the various `Resource` implementations always support +resolution as a `java.net.URL`. + +A `ClassPathResource` is created by Java code by explicitly using the `ClassPathResource`constructor but is often created implicitly when you call an API method that takes a`String` argument meant to represent a path. For the latter case, a JavaBeans`PropertyEditor` recognizes the special prefix, `classpath:`, on the string path and +creates a `ClassPathResource` in that case. + +#### 2.3.3. `FileSystemResource` + +This is a `Resource` implementation for `java.io.File` handles. It also supports`java.nio.file.Path` handles, applying Spring’s standard String-based path +transformations but performing all operations via the `java.nio.file.Files` API. For pure`java.nio.path.Path` based support use a `PathResource` instead. `FileSystemResource`supports resolution as a `File` and as a `URL`. + +#### 2.3.4. `PathResource` + +This is a `Resource` implementation for `java.nio.file.Path` handles, performing all +operations and transformations via the `Path` API. It supports resolution as a `File` and +as a `URL` and also implements the extended `WritableResource` interface. `PathResource`is effectively a pure `java.nio.path.Path` based alternative to `FileSystemResource` with +different `createRelative` behavior. + +#### 2.3.5. `ServletContextResource` + +This is a `Resource` implementation for `ServletContext` resources that interprets +relative paths within the relevant web application’s root directory. + +It always supports stream access and URL access but allows `java.io.File` access only +when the web application archive is expanded and the resource is physically on the +filesystem. Whether or not it is expanded and on the filesystem or accessed +directly from the JAR or somewhere else like a database (which is conceivable) is actually +dependent on the Servlet container. + +#### 2.3.6. `InputStreamResource` + +An `InputStreamResource` is a `Resource` implementation for a given `InputStream`. It +should be used only if no specific `Resource` implementation is applicable. In +particular, prefer `ByteArrayResource` or any of the file-based `Resource`implementations where possible. + +In contrast to other `Resource` implementations, this is a descriptor for an +already-opened resource. Therefore, it returns `true` from `isOpen()`. Do not use it if +you need to keep the resource descriptor somewhere or if you need to read a stream +multiple times. + +#### 2.3.7. `ByteArrayResource` + +This is a `Resource` implementation for a given byte array. It creates a`ByteArrayInputStream` for the given byte array. + +It is useful for loading content from any given byte array without having to resort to a +single-use `InputStreamResource`. + +### 2.4. The `ResourceLoader` Interface + +The `ResourceLoader` interface is meant to be implemented by objects that can return +(that is, load) `Resource` instances. The following listing shows the `ResourceLoader`interface definition: + +``` +public interface ResourceLoader { + + Resource getResource(String location); + + ClassLoader getClassLoader(); +} +``` + +All application contexts implement the `ResourceLoader` interface. Therefore, all +application contexts may be used to obtain `Resource` instances. + +When you call `getResource()` on a specific application context, and the location path +specified doesn’t have a specific prefix, you get back a `Resource` type that is +appropriate to that particular application context. For example, assume the following +snippet of code was run against a `ClassPathXmlApplicationContext` instance: + +Java + +``` +Resource template = ctx.getResource("some/resource/path/myTemplate.txt"); +``` + +Kotlin + +``` +val template = ctx.getResource("some/resource/path/myTemplate.txt") +``` + +Against a `ClassPathXmlApplicationContext`, that code returns a `ClassPathResource`. If +the same method were run against a `FileSystemXmlApplicationContext` instance, it would +return a `FileSystemResource`. For a `WebApplicationContext`, it would return a`ServletContextResource`. It would similarly return appropriate objects for each context. + +As a result, you can load resources in a fashion appropriate to the particular application +context. + +On the other hand, you may also force `ClassPathResource` to be used, regardless of the +application context type, by specifying the special `classpath:` prefix, as the following +example shows: + +Java + +``` +Resource template = ctx.getResource("classpath:some/resource/path/myTemplate.txt"); +``` + +Kotlin + +``` +val template = ctx.getResource("classpath:some/resource/path/myTemplate.txt") +``` + +Similarly, you can force a `UrlResource` to be used by specifying any of the standard`java.net.URL` prefixes. The following examples use the `file` and `https` prefixes: + +Java + +``` +Resource template = ctx.getResource("file:///some/resource/path/myTemplate.txt"); +``` + +Kotlin + +``` +val template = ctx.getResource("file:///some/resource/path/myTemplate.txt") +``` + +Java + +``` +Resource template = ctx.getResource("https://myhost.com/resource/path/myTemplate.txt"); +``` + +Kotlin + +``` +val template = ctx.getResource("https://myhost.com/resource/path/myTemplate.txt") +``` + +The following table summarizes the strategy for converting `String` objects to `Resource`objects: + +| Prefix | Example | Explanation | +|----------|--------------------------------|----------------------------------------------------------------------------------------------------------------------| +|classpath:|`classpath:com/myapp/config.xml`| Loaded from the classpath. | +| file: | `file:///data/config.xml` |Loaded as a `URL` from the filesystem. See also [`FileSystemResource` Caveats](#resources-filesystemresource-caveats).| +| https: | `https://myserver/logo.png` | Loaded as a `URL`. | +| (none) | `/data/config.xml` | Depends on the underlying `ApplicationContext`. | + +### 2.5. The `ResourcePatternResolver` Interface + +The `ResourcePatternResolver` interface is an extension to the `ResourceLoader` interface +which defines a strategy for resolving a location pattern (for example, an Ant-style path +pattern) into `Resource` objects. + +``` +public interface ResourcePatternResolver extends ResourceLoader { + + String CLASSPATH_ALL_URL_PREFIX = "classpath*:"; + + Resource[] getResources(String locationPattern) throws IOException; +} +``` + +As can be seen above, this interface also defines a special `classpath*:` resource prefix +for all matching resources from the class path. Note that the resource location is +expected to be a path without placeholders in this case — for example,`classpath*:/config/beans.xml`. JAR files or different directories in the class path can +contain multiple files with the same path and the same name. See[Wildcards in Application Context Constructor Resource Paths](#resources-app-ctx-wildcards-in-resource-paths) and its subsections for further details +on wildcard support with the `classpath*:` resource prefix. + +A passed-in `ResourceLoader` (for example, one supplied via[`ResourceLoaderAware`](#resources-resourceloaderaware) semantics) can be checked whether +it implements this extended interface too. + +`PathMatchingResourcePatternResolver` is a standalone implementation that is usable +outside an `ApplicationContext` and is also used by `ResourceArrayPropertyEditor` for +populating `Resource[]` bean properties. `PathMatchingResourcePatternResolver` is able to +resolve a specified resource location path into one or more matching `Resource` objects. +The source path may be a simple path which has a one-to-one mapping to a target`Resource`, or alternatively may contain the special `classpath*:` prefix and/or internal +Ant-style regular expressions (matched using Spring’s`org.springframework.util.AntPathMatcher` utility). Both of the latter are effectively +wildcards. + +| |The default `ResourceLoader` in any standard `ApplicationContext` is in fact an instance
of `PathMatchingResourcePatternResolver` which implements the `ResourcePatternResolver`interface. The same is true for the `ApplicationContext` instance itself which also
implements the `ResourcePatternResolver` interface and delegates to the default`PathMatchingResourcePatternResolver`.| +|---|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +### 2.6. The `ResourceLoaderAware` Interface + +The `ResourceLoaderAware` interface is a special callback interface which identifies +components that expect to be provided a `ResourceLoader` reference. The following listing +shows the definition of the `ResourceLoaderAware` interface: + +``` +public interface ResourceLoaderAware { + + void setResourceLoader(ResourceLoader resourceLoader); +} +``` + +When a class implements `ResourceLoaderAware` and is deployed into an application context +(as a Spring-managed bean), it is recognized as `ResourceLoaderAware` by the application +context. The application context then invokes `setResourceLoader(ResourceLoader)`, +supplying itself as the argument (remember, all application contexts in Spring implement +the `ResourceLoader` interface). + +Since an `ApplicationContext` is a `ResourceLoader`, the bean could also implement the`ApplicationContextAware` interface and use the supplied application context directly to +load resources. However, in general, it is better to use the specialized `ResourceLoader`interface if that is all you need. The code would be coupled only to the resource loading +interface (which can be considered a utility interface) and not to the whole Spring`ApplicationContext` interface. + +In application components, you may also rely upon autowiring of the `ResourceLoader` as +an alternative to implementing the `ResourceLoaderAware` interface. The *traditional*`constructor` and `byType` autowiring modes (as described in [Autowiring Collaborators](#beans-factory-autowire)) +are capable of providing a `ResourceLoader` for either a constructor argument or a +setter method parameter, respectively. For more flexibility (including the ability to +autowire fields and multiple parameter methods), consider using the annotation-based +autowiring features. In that case, the `ResourceLoader` is autowired into a field, +constructor argument, or method parameter that expects the `ResourceLoader` type as long +as the field, constructor, or method in question carries the `@Autowired` annotation. +For more information, see [Using `@Autowired`](#beans-autowired-annotation). + +| |To load one or more `Resource` objects for a resource path that contains wildcards
or makes use of the special `classpath*:` resource prefix, consider having an instance of[`ResourcePatternResolver`](#resources-resourcepatternresolver) autowired into your
application components instead of `ResourceLoader`.| +|---|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +### 2.7. Resources as Dependencies + +If the bean itself is going to determine and supply the resource path through some sort +of dynamic process, it probably makes sense for the bean to use the `ResourceLoader` or`ResourcePatternResolver` interface to load resources. For example, consider the loading +of a template of some sort, where the specific resource that is needed depends on the +role of the user. If the resources are static, it makes sense to eliminate the use of the`ResourceLoader` interface (or `ResourcePatternResolver` interface) completely, have the +bean expose the `Resource` properties it needs, and expect them to be injected into it. + +What makes it trivial to then inject these properties is that all application contexts +register and use a special JavaBeans `PropertyEditor`, which can convert `String` paths +to `Resource` objects. For example, the following `MyBean` class has a `template`property of type `Resource`. + +Java + +``` +package example; + +public class MyBean { + + private Resource template; + + public setTemplate(Resource template) { + this.template = template; + } + + // ... +} +``` + +Kotlin + +``` +class MyBean(var template: Resource) +``` + +In an XML configuration file, the `template` property can be configured with a simple +string for that resource, as the following example shows: + +``` + + + +``` + +Note that the resource path has no prefix. Consequently, because the application context +itself is going to be used as the `ResourceLoader`, the resource is loaded through a`ClassPathResource`, a `FileSystemResource`, or a `ServletContextResource`, depending on +the exact type of the application context. + +If you need to force a specific `Resource` type to be used, you can use a prefix. The +following two examples show how to force a `ClassPathResource` and a `UrlResource` (the +latter being used to access a file in the filesystem): + +``` + +``` + +``` + +``` + +If the `MyBean` class is refactored for use with annotation-driven configuration, the +path to `myTemplate.txt` can be stored under a key named `template.path` — for example, +in a properties file made available to the Spring `Environment` (see[Environment Abstraction](#beans-environment)). The template path can then be referenced via the `@Value`annotation using a property placeholder (see [Using `@Value`](#beans-value-annotations)). Spring will +retrieve the value of the template path as a string, and a special `PropertyEditor` will +convert the string to a `Resource` object to be injected into the `MyBean` constructor. +The following example demonstrates how to achieve this. + +Java + +``` +@Component +public class MyBean { + + private final Resource template; + + public MyBean(@Value("${template.path}") Resource template) { + this.template = template; + } + + // ... +} +``` + +Kotlin + +``` +@Component +class MyBean(@Value("\${template.path}") private val template: Resource) +``` + +If we want to support multiple templates discovered under the same path in multiple +locations in the classpath — for example, in multiple jars in the classpath — we can +use the special `classpath*:` prefix and wildcarding to define a `templates.path` key as`classpath*:/config/templates/*.txt`. If we redefine the `MyBean` class as follows, +Spring will convert the template path pattern into an array of `Resource` objects that +can be injected into the `MyBean` constructor. + +Java + +``` +@Component +public class MyBean { + + private final Resource[] templates; + + public MyBean(@Value("${templates.path}") Resource[] templates) { + this.templates = templates; + } + + // ... +} +``` + +Kotlin + +``` +@Component +class MyBean(@Value("\${templates.path}") private val templates: Resource[]) +``` + +### 2.8. Application Contexts and Resource Paths + +This section covers how to create application contexts with resources, including shortcuts +that work with XML, how to use wildcards, and other details. + +#### 2.8.1. Constructing Application Contexts + +An application context constructor (for a specific application context type) generally +takes a string or array of strings as the location paths of the resources, such as +XML files that make up the definition of the context. + +When such a location path does not have a prefix, the specific `Resource` type built from +that path and used to load the bean definitions depends on and is appropriate to the +specific application context. For example, consider the following example, which creates a`ClassPathXmlApplicationContext`: + +Java + +``` +ApplicationContext ctx = new ClassPathXmlApplicationContext("conf/appContext.xml"); +``` + +Kotlin + +``` +val ctx = ClassPathXmlApplicationContext("conf/appContext.xml") +``` + +The bean definitions are loaded from the classpath, because a `ClassPathResource` is +used. However, consider the following example, which creates a `FileSystemXmlApplicationContext`: + +Java + +``` +ApplicationContext ctx = + new FileSystemXmlApplicationContext("conf/appContext.xml"); +``` + +Kotlin + +``` +val ctx = FileSystemXmlApplicationContext("conf/appContext.xml") +``` + +Now the bean definitions are loaded from a filesystem location (in this case, relative to +the current working directory). + +Note that the use of the special `classpath` prefix or a standard URL prefix on the +location path overrides the default type of `Resource` created to load the bean +definitions. Consider the following example: + +Java + +``` +ApplicationContext ctx = + new FileSystemXmlApplicationContext("classpath:conf/appContext.xml"); +``` + +Kotlin + +``` +val ctx = FileSystemXmlApplicationContext("classpath:conf/appContext.xml") +``` + +Using `FileSystemXmlApplicationContext` loads the bean definitions from the classpath. +However, it is still a `FileSystemXmlApplicationContext`. If it is subsequently used as a`ResourceLoader`, any unprefixed paths are still treated as filesystem paths. + +##### Constructing `ClassPathXmlApplicationContext` Instances — Shortcuts + +The `ClassPathXmlApplicationContext` exposes a number of constructors to enable +convenient instantiation. The basic idea is that you can supply merely a string array +that contains only the filenames of the XML files themselves (without the leading path +information) and also supply a `Class`. The `ClassPathXmlApplicationContext` then derives +the path information from the supplied class. + +Consider the following directory layout: + +``` +com/ + example/ + services.xml + repositories.xml + MessengerService.class +``` + +The following example shows how a `ClassPathXmlApplicationContext` instance composed of +the beans defined in files named `services.xml` and `repositories.xml` (which are on the +classpath) can be instantiated: + +Java + +``` +ApplicationContext ctx = new ClassPathXmlApplicationContext( + new String[] {"services.xml", "repositories.xml"}, MessengerService.class); +``` + +Kotlin + +``` +val ctx = ClassPathXmlApplicationContext(arrayOf("services.xml", "repositories.xml"), MessengerService::class.java) +``` + +See the [`ClassPathXmlApplicationContext`](https://docs.spring.io/spring-framework/docs/5.3.16/javadoc-api/org/springframework/context/support/ClassPathXmlApplicationContext.html)javadoc for details on the various constructors. + +#### 2.8.2. Wildcards in Application Context Constructor Resource Paths #### + +The resource paths in application context constructor values may be simple paths (as +shown earlier), each of which has a one-to-one mapping to a target `Resource` or, +alternately, may contain the special `classpath*:` prefix or internal Ant-style patterns +(matched by using Spring’s `PathMatcher` utility). Both of the latter are effectively +wildcards. + +One use for this mechanism is when you need to do component-style application assembly. All +components can *publish* context definition fragments to a well-known location path, and, +when the final application context is created using the same path prefixed with`classpath*:`, all component fragments are automatically picked up. + +Note that this wildcarding is specific to the use of resource paths in application context +constructors (or when you use the `PathMatcher` utility class hierarchy directly) and is +resolved at construction time. It has nothing to do with the `Resource` type itself. +You cannot use the `classpath*:` prefix to construct an actual `Resource`, as +a resource points to just one resource at a time. + +##### Ant-style Patterns + +Path locations can contain Ant-style patterns, as the following example shows: + +``` +/WEB-INF/*-context.xml +com/mycompany/**/applicationContext.xml +file:C:/some/path/*-context.xml +classpath:com/mycompany/**/applicationContext.xml +``` + +When the path location contains an Ant-style pattern, the resolver follows a more complex +procedure to try to resolve the wildcard. It produces a `Resource` for the path up to the +last non-wildcard segment and obtains a URL from it. If this URL is not a `jar:` URL or +container-specific variant (such as `zip:` in WebLogic, `wsjar` in WebSphere, and so on), +a `java.io.File` is obtained from it and used to resolve the wildcard by traversing the +filesystem. In the case of a jar URL, the resolver either gets a`java.net.JarURLConnection` from it or manually parses the jar URL and then traverses the +contents of the jar file to resolve the wildcards. + +###### Implications on Portability + +If the specified path is already a `file` URL (either implicitly because the base`ResourceLoader` is a filesystem one or explicitly), wildcarding is guaranteed to +work in a completely portable fashion. + +If the specified path is a `classpath` location, the resolver must obtain the last +non-wildcard path segment URL by making a `Classloader.getResource()` call. Since this +is just a node of the path (not the file at the end), it is actually undefined (in the`ClassLoader` javadoc) exactly what sort of a URL is returned in this case. In practice, +it is always a `java.io.File` representing the directory (where the classpath resource +resolves to a filesystem location) or a jar URL of some sort (where the classpath resource +resolves to a jar location). Still, there is a portability concern on this operation. + +If a jar URL is obtained for the last non-wildcard segment, the resolver must be able to +get a `java.net.JarURLConnection` from it or manually parse the jar URL, to be able to +walk the contents of the jar and resolve the wildcard. This does work in most environments +but fails in others, and we strongly recommend that the wildcard resolution of resources +coming from jars be thoroughly tested in your specific environment before you rely on it. + +##### The `classpath*:` Prefix + +When constructing an XML-based application context, a location string may use the +special `classpath*:` prefix, as the following example shows: + +Java + +``` +ApplicationContext ctx = + new ClassPathXmlApplicationContext("classpath*:conf/appContext.xml"); +``` + +Kotlin + +``` +val ctx = ClassPathXmlApplicationContext("classpath*:conf/appContext.xml") +``` + +This special prefix specifies that all classpath resources that match the given name +must be obtained (internally, this essentially happens through a call to`ClassLoader.getResources(…​)`) and then merged to form the final application +context definition. + +| |The wildcard classpath relies on the `getResources()` method of the underlying`ClassLoader`. As most application servers nowadays supply their own `ClassLoader`implementation, the behavior might differ, especially when dealing with jar files. A
simple test to check if `classpath*` works is to use the `ClassLoader` to load a file from
within a jar on the classpath:`getClass().getClassLoader().getResources("")`. Try this test with
files that have the same name but reside in two different locations — for example, files
with the same name and same path but in different jars on the classpath. In case an
inappropriate result is returned, check the application server documentation for settings
that might affect the `ClassLoader` behavior.| +|---|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +You can also combine the `classpath*:` prefix with a `PathMatcher` pattern in the +rest of the location path (for example, `classpath*:META-INF/*-beans.xml`). In this +case, the resolution strategy is fairly simple: A `ClassLoader.getResources()` call is +used on the last non-wildcard path segment to get all the matching resources in the +class loader hierarchy and then, off each resource, the same `PathMatcher` resolution +strategy described earlier is used for the wildcard subpath. + +##### Other Notes Relating to Wildcards + +Note that `classpath*:`, when combined with Ant-style patterns, only works +reliably with at least one root directory before the pattern starts, unless the actual +target files reside in the file system. This means that a pattern such as`classpath*:*.xml` might not retrieve files from the root of jar files but rather only +from the root of expanded directories. + +Spring’s ability to retrieve classpath entries originates from the JDK’s`ClassLoader.getResources()` method, which only returns file system locations for an +empty string (indicating potential roots to search). Spring evaluates`URLClassLoader` runtime configuration and the `java.class.path` manifest in jar files +as well, but this is not guaranteed to lead to portable behavior. + +| |The scanning of classpath packages requires the presence of corresponding directory
entries in the classpath. When you build JARs with Ant, do not activate the `files-only`switch of the JAR task. Also, classpath directories may not get exposed based on security
policies in some environments — for example, stand-alone applications on JDK 1.7.0\_45
and higher (which requires 'Trusted-Library' to be set up in your manifests. See[https://stackoverflow.com/questions/19394570/java-jre-7u45-breaks-classloader-getresources](https://stackoverflow.com/questions/19394570/java-jre-7u45-breaks-classloader-getresources)).

On JDK 9’s module path (Jigsaw), Spring’s classpath scanning generally works as expected.
Putting resources into a dedicated directory is highly recommendable here as well,
avoiding the aforementioned portability problems with searching the jar file root level.| +|---|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +Ant-style patterns with `classpath:` resources are not guaranteed to find matching +resources if the root package to search is available in multiple classpath locations. +Consider the following example of a resource location: + +``` +com/mycompany/package1/service-context.xml +``` + +Now consider an Ant-style path that someone might use to try to find that file: + +``` +classpath:com/mycompany/**/service-context.xml +``` + +Such a resource may exist in only one location in the classpath, but when a path such as +the preceding example is used to try to resolve it, the resolver works off the (first) +URL returned by `getResource("com/mycompany");`. If this base package node exists in +multiple `ClassLoader` locations, the desired resource may not exist in the first +location found. Therefore, in such cases you should prefer using `classpath*:` with the +same Ant-style pattern, which searches all classpath locations that contain the`com.mycompany` base package: `classpath*:com/mycompany/**/service-context.xml`. + +#### 2.8.3. `FileSystemResource` Caveats + +A `FileSystemResource` that is not attached to a `FileSystemApplicationContext` (that +is, when a `FileSystemApplicationContext` is not the actual `ResourceLoader`) treats +absolute and relative paths as you would expect. Relative paths are relative to the +current working directory, while absolute paths are relative to the root of the +filesystem. + +For backwards compatibility (historical) reasons however, this changes when the`FileSystemApplicationContext` is the `ResourceLoader`. The`FileSystemApplicationContext` forces all attached `FileSystemResource` instances +to treat all location paths as relative, whether they start with a leading slash or not. +In practice, this means the following examples are equivalent: + +Java + +``` +ApplicationContext ctx = + new FileSystemXmlApplicationContext("conf/context.xml"); +``` + +Kotlin + +``` +val ctx = FileSystemXmlApplicationContext("conf/context.xml") +``` + +Java + +``` +ApplicationContext ctx = + new FileSystemXmlApplicationContext("/conf/context.xml"); +``` + +Kotlin + +``` +val ctx = FileSystemXmlApplicationContext("/conf/context.xml") +``` + +The following examples are also equivalent (even though it would make sense for them to be different, as one +case is relative and the other absolute): + +Java + +``` +FileSystemXmlApplicationContext ctx = ...; +ctx.getResource("some/resource/path/myTemplate.txt"); +``` + +Kotlin + +``` +val ctx: FileSystemXmlApplicationContext = ... +ctx.getResource("some/resource/path/myTemplate.txt") +``` + +Java + +``` +FileSystemXmlApplicationContext ctx = ...; +ctx.getResource("/some/resource/path/myTemplate.txt"); +``` + +Kotlin + +``` +val ctx: FileSystemXmlApplicationContext = ... +ctx.getResource("/some/resource/path/myTemplate.txt") +``` + +In practice, if you need true absolute filesystem paths, you should avoid using +absolute paths with `FileSystemResource` or `FileSystemXmlApplicationContext` and +force the use of a `UrlResource` by using the `file:` URL prefix. The following examples +show how to do so: + +Java + +``` +// actual context type doesn't matter, the Resource will always be UrlResource +ctx.getResource("file:///some/resource/path/myTemplate.txt"); +``` + +Kotlin + +``` +// actual context type doesn't matter, the Resource will always be UrlResource +ctx.getResource("file:///some/resource/path/myTemplate.txt") +``` + +Java + +``` +// force this FileSystemXmlApplicationContext to load its definition via a UrlResource +ApplicationContext ctx = + new FileSystemXmlApplicationContext("file:///conf/context.xml"); +``` + +Kotlin + +``` +// force this FileSystemXmlApplicationContext to load its definition via a UrlResource +val ctx = FileSystemXmlApplicationContext("file:///conf/context.xml") +``` + +## 3. Validation, Data Binding, and Type Conversion# + +There are pros and cons for considering validation as business logic, and Spring offers +a design for validation (and data binding) that does not exclude either one of them. +Specifically, validation should not be tied to the web tier and should be easy to localize, +and it should be possible to plug in any available validator. Considering these concerns, +Spring provides a `Validator` contract that is both basic and eminently usable +in every layer of an application. + +Data binding is useful for letting user input be dynamically bound to the domain +model of an application (or whatever objects you use to process user input). Spring +provides the aptly named `DataBinder` to do exactly that. The `Validator` and the`DataBinder` make up the `validation` package, which is primarily used in but not +limited to the web layer. + +The `BeanWrapper` is a fundamental concept in the Spring Framework and is used in a lot +of places. However, you probably do not need to use the `BeanWrapper`directly. Because this is reference documentation, however, we felt that some explanation +might be in order. We explain the `BeanWrapper` in this chapter, since, if you are +going to use it at all, you are most likely do so when trying to bind data to objects. + +Spring’s `DataBinder` and the lower-level `BeanWrapper` both use `PropertyEditorSupport`implementations to parse and format property values. The `PropertyEditor` and`PropertyEditorSupport` types are part of the JavaBeans specification and are also +explained in this chapter. Spring 3 introduced a `core.convert` package that provides a +general type conversion facility, as well as a higher-level “format” package for +formatting UI field values. You can use these packages as simpler alternatives to`PropertyEditorSupport` implementations. They are also discussed in this chapter. + +Spring supports Java Bean Validation through setup infrastructure and an adaptor to +Spring’s own `Validator` contract. Applications can enable Bean Validation once globally, +as described in [Java Bean Validation](#validation-beanvalidation), and use it exclusively for all validation +needs. In the web layer, applications can further register controller-local Spring`Validator` instances per `DataBinder`, as described in [Configuring a `DataBinder`](#validation-binder), which can +be useful for plugging in custom validation logic. + +### 3.1. Validation by Using Spring’s Validator Interface + +Spring features a `Validator` interface that you can use to validate objects. The`Validator` interface works by using an `Errors` object so that, while validating, +validators can report validation failures to the `Errors` object. + +Consider the following example of a small data object: + +Java + +``` +public class Person { + + private String name; + private int age; + + // the usual getters and setters... +} +``` + +Kotlin + +``` +class Person(val name: String, val age: Int) +``` + +The next example provides validation behavior for the `Person` class by implementing the +following two methods of the `org.springframework.validation.Validator` interface: + +* `supports(Class)`: Can this `Validator` validate instances of the supplied `Class`? + +* `validate(Object, org.springframework.validation.Errors)`: Validates the given object + and, in case of validation errors, registers those with the given `Errors` object. + +Implementing a `Validator` is fairly straightforward, especially when you know of the`ValidationUtils` helper class that the Spring Framework also provides. The following +example implements `Validator` for `Person` instances: + +Java + +``` +public class PersonValidator implements Validator { + + /** + * This Validator validates only Person instances + */ + public boolean supports(Class clazz) { + return Person.class.equals(clazz); + } + + public void validate(Object obj, Errors e) { + ValidationUtils.rejectIfEmpty(e, "name", "name.empty"); + Person p = (Person) obj; + if (p.getAge() < 0) { + e.rejectValue("age", "negativevalue"); + } else if (p.getAge() > 110) { + e.rejectValue("age", "too.darn.old"); + } + } +} +``` + +Kotlin + +``` +class PersonValidator : Validator { + + /** + * This Validator validates only Person instances + */ + override fun supports(clazz: Class<*>): Boolean { + return Person::class.java == clazz + } + + override fun validate(obj: Any, e: Errors) { + ValidationUtils.rejectIfEmpty(e, "name", "name.empty") + val p = obj as Person + if (p.age < 0) { + e.rejectValue("age", "negativevalue") + } else if (p.age > 110) { + e.rejectValue("age", "too.darn.old") + } + } +} +``` + +The `static` `rejectIfEmpty(..)` method on the `ValidationUtils` class is used to +reject the `name` property if it is `null` or the empty string. Have a look at the[`ValidationUtils`](https://docs.spring.io/spring-framework/docs/5.3.16/javadoc-api/org/springframework/validation/ValidationUtils.html) javadoc +to see what functionality it provides besides the example shown previously. + +While it is certainly possible to implement a single `Validator` class to validate each +of the nested objects in a rich object, it may be better to encapsulate the validation +logic for each nested class of object in its own `Validator` implementation. A simple +example of a “rich” object would be a `Customer` that is composed of two `String`properties (a first and a second name) and a complex `Address` object. `Address` objects +may be used independently of `Customer` objects, so a distinct `AddressValidator`has been implemented. If you want your `CustomerValidator` to reuse the logic contained +within the `AddressValidator` class without resorting to copy-and-paste, you can +dependency-inject or instantiate an `AddressValidator` within your `CustomerValidator`, +as the following example shows: + +Java + +``` +public class CustomerValidator implements Validator { + + private final Validator addressValidator; + + public CustomerValidator(Validator addressValidator) { + if (addressValidator == null) { + throw new IllegalArgumentException("The supplied [Validator] is " + + "required and must not be null."); + } + if (!addressValidator.supports(Address.class)) { + throw new IllegalArgumentException("The supplied [Validator] must " + + "support the validation of [Address] instances."); + } + this.addressValidator = addressValidator; + } + + /** + * This Validator validates Customer instances, and any subclasses of Customer too + */ + public boolean supports(Class clazz) { + return Customer.class.isAssignableFrom(clazz); + } + + public void validate(Object target, Errors errors) { + ValidationUtils.rejectIfEmptyOrWhitespace(errors, "firstName", "field.required"); + ValidationUtils.rejectIfEmptyOrWhitespace(errors, "surname", "field.required"); + Customer customer = (Customer) target; + try { + errors.pushNestedPath("address"); + ValidationUtils.invokeValidator(this.addressValidator, customer.getAddress(), errors); + } finally { + errors.popNestedPath(); + } + } +} +``` + +Kotlin + +``` +class CustomerValidator(private val addressValidator: Validator) : Validator { + + init { + if (addressValidator == null) { + throw IllegalArgumentException("The supplied [Validator] is required and must not be null.") + } + if (!addressValidator.supports(Address::class.java)) { + throw IllegalArgumentException("The supplied [Validator] must support the validation of [Address] instances.") + } + } + + /* + * This Validator validates Customer instances, and any subclasses of Customer too + */ + override fun supports(clazz: Class<>): Boolean { + return Customer::class.java.isAssignableFrom(clazz) + } + + override fun validate(target: Any, errors: Errors) { + ValidationUtils.rejectIfEmptyOrWhitespace(errors, "firstName", "field.required") + ValidationUtils.rejectIfEmptyOrWhitespace(errors, "surname", "field.required") + val customer = target as Customer + try { + errors.pushNestedPath("address") + ValidationUtils.invokeValidator(this.addressValidator, customer.address, errors) + } finally { + errors.popNestedPath() + } + } +} +``` + +Validation errors are reported to the `Errors` object passed to the validator. In the case +of Spring Web MVC, you can use the `` tag to inspect the error messages, but +you can also inspect the `Errors` object yourself. More information about the +methods it offers can be found in the [javadoc](https://docs.spring.io/spring-framework/docs/5.3.16/javadoc-api/org/springframework/validation/Errors.html). + +### 3.2. Resolving Codes to Error Messages + +We covered databinding and validation. This section covers outputting messages that correspond +to validation errors. In the example shown in the [preceding section](#validator), +we rejected the `name` and `age` fields. If we want to output the error messages by using a`MessageSource`, we can do so using the error code we provide when rejecting the field +('name' and 'age' in this case). When you call (either directly, or indirectly, by using, +for example, the `ValidationUtils` class) `rejectValue` or one of the other `reject` methods +from the `Errors` interface, the underlying implementation not only registers the code you +passed in but also registers a number of additional error codes. The `MessageCodesResolver`determines which error codes the `Errors` interface registers. By default, the`DefaultMessageCodesResolver` is used, which (for example) not only registers a message +with the code you gave but also registers messages that include the field name you passed +to the reject method. So, if you reject a field by using `rejectValue("age", "too.darn.old")`, +apart from the `too.darn.old` code, Spring also registers `too.darn.old.age` and`too.darn.old.age.int` (the first includes the field name and the second includes the type +of the field). This is done as a convenience to aid developers when targeting error messages. + +More information on the `MessageCodesResolver` and the default strategy can be found +in the javadoc of[`MessageCodesResolver`](https://docs.spring.io/spring-framework/docs/5.3.16/javadoc-api/org/springframework/validation/MessageCodesResolver.html) and[`DefaultMessageCodesResolver`](https://docs.spring.io/spring-framework/docs/5.3.16/javadoc-api/org/springframework/validation/DefaultMessageCodesResolver.html), +respectively. + +### 3.3. Bean Manipulation and the `BeanWrapper` + +The `org.springframework.beans` package adheres to the JavaBeans standard. +A JavaBean is a class with a default no-argument constructor and that follows +a naming convention where (for example) a property named `bingoMadness` would +have a setter method `setBingoMadness(..)` and a getter method `getBingoMadness()`. For +more information about JavaBeans and the specification, see[javabeans](https://docs.oracle.com/javase/8/docs/api/java/beans/package-summary.html). + +One quite important class in the beans package is the `BeanWrapper` interface and its +corresponding implementation (`BeanWrapperImpl`). As quoted from the javadoc, the`BeanWrapper` offers functionality to set and get property values (individually or in +bulk), get property descriptors, and query properties to determine if they are +readable or writable. Also, the `BeanWrapper` offers support for nested properties, +enabling the setting of properties on sub-properties to an unlimited depth. The`BeanWrapper` also supports the ability to add standard JavaBeans `PropertyChangeListeners`and `VetoableChangeListeners`, without the need for supporting code in the target class. +Last but not least, the `BeanWrapper` provides support for setting indexed properties. +The `BeanWrapper` usually is not used by application code directly but is used by the`DataBinder` and the `BeanFactory`. + +The way the `BeanWrapper` works is partly indicated by its name: it wraps a bean to +perform actions on that bean, such as setting and retrieving properties. + +#### 3.3.1. Setting and Getting Basic and Nested Properties + +Setting and getting properties is done through the `setPropertyValue` and`getPropertyValue` overloaded method variants of `BeanWrapper`. See their Javadoc for +details. The below table shows some examples of these conventions: + +| Expression | Explanation | +|----------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| `name` | Indicates the property `name` that corresponds to the `getName()` or `isName()`and `setName(..)` methods. | +| `account.name` |Indicates the nested property `name` of the property `account` that corresponds to
(for example) the `getAccount().setName()` or `getAccount().getName()` methods.| +| `account[2]` | Indicates the *third* element of the indexed property `account`. Indexed properties
can be of type `array`, `list`, or other naturally ordered collection. | +|`account[COMPANYNAME]`| Indicates the value of the map entry indexed by the `COMPANYNAME` key of the `account` `Map`property. | + +(This next section is not vitally important to you if you do not plan to work with +the `BeanWrapper` directly. If you use only the `DataBinder` and the `BeanFactory`and their default implementations, you should skip ahead to the[section on `PropertyEditors`](#beans-beans-conversion).) + +The following two example classes use the `BeanWrapper` to get and set +properties: + +Java + +``` +public class Company { + + private String name; + private Employee managingDirector; + + public String getName() { + return this.name; + } + + public void setName(String name) { + this.name = name; + } + + public Employee getManagingDirector() { + return this.managingDirector; + } + + public void setManagingDirector(Employee managingDirector) { + this.managingDirector = managingDirector; + } +} +``` + +Kotlin + +``` +class Company { + var name: String? = null + var managingDirector: Employee? = null +} +``` + +Java + +``` +public class Employee { + + private String name; + + private float salary; + + public String getName() { + return this.name; + } + + public void setName(String name) { + this.name = name; + } + + public float getSalary() { + return salary; + } + + public void setSalary(float salary) { + this.salary = salary; + } +} +``` + +Kotlin + +``` +class Employee { + var name: String? = null + var salary: Float? = null +} +``` + +The following code snippets show some examples of how to retrieve and manipulate some of +the properties of instantiated `Company`s and `Employee`s: + +Java + +``` +BeanWrapper company = new BeanWrapperImpl(new Company()); +// setting the company name.. +company.setPropertyValue("name", "Some Company Inc."); +// ... can also be done like this: +PropertyValue value = new PropertyValue("name", "Some Company Inc."); +company.setPropertyValue(value); + +// ok, let's create the director and tie it to the company: +BeanWrapper jim = new BeanWrapperImpl(new Employee()); +jim.setPropertyValue("name", "Jim Stravinsky"); +company.setPropertyValue("managingDirector", jim.getWrappedInstance()); + +// retrieving the salary of the managingDirector through the company +Float salary = (Float) company.getPropertyValue("managingDirector.salary"); +``` + +Kotlin + +``` +val company = BeanWrapperImpl(Company()) +// setting the company name.. +company.setPropertyValue("name", "Some Company Inc.") +// ... can also be done like this: +val value = PropertyValue("name", "Some Company Inc.") +company.setPropertyValue(value) + +// ok, let's create the director and tie it to the company: +val jim = BeanWrapperImpl(Employee()) +jim.setPropertyValue("name", "Jim Stravinsky") +company.setPropertyValue("managingDirector", jim.wrappedInstance) + +// retrieving the salary of the managingDirector through the company +val salary = company.getPropertyValue("managingDirector.salary") as Float? +``` + +#### 3.3.2. Built-in `PropertyEditor` Implementations + +Spring uses the concept of a `PropertyEditor` to effect the conversion between an`Object` and a `String`. It can be handy +to represent properties in a different way than the object itself. For example, a `Date`can be represented in a human readable way (as the `String`: `'2007-14-09'`), while +we can still convert the human readable form back to the original date (or, even +better, convert any date entered in a human readable form back to `Date` objects). This +behavior can be achieved by registering custom editors of type`java.beans.PropertyEditor`. Registering custom editors on a `BeanWrapper` or, +alternatively, in a specific IoC container (as mentioned in the previous chapter), gives it +the knowledge of how to convert properties to the desired type. For more about`PropertyEditor`, see [the javadoc of the `java.beans` package from Oracle](https://docs.oracle.com/javase/8/docs/api/java/beans/package-summary.html). + +A couple of examples where property editing is used in Spring: + +* Setting properties on beans is done by using `PropertyEditor` implementations. + When you use `String` as the value of a property of some bean that you declare + in an XML file, Spring (if the setter of the corresponding property has a `Class`parameter) uses `ClassEditor` to try to resolve the parameter to a `Class` object. + +* Parsing HTTP request parameters in Spring’s MVC framework is done by using all kinds + of `PropertyEditor` implementations that you can manually bind in all subclasses of the`CommandController`. + +Spring has a number of built-in `PropertyEditor` implementations to make life easy. +They are all located in the `org.springframework.beans.propertyeditors`package. Most, (but not all, as indicated in the following table) are, by default, registered by`BeanWrapperImpl`. Where the property editor is configurable in some fashion, you can +still register your own variant to override the default one. The following table describes +the various `PropertyEditor` implementations that Spring provides: + +| Class | Explanation | +|-------------------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +|`ByteArrayPropertyEditor`| Editor for byte arrays. Converts strings to their corresponding byte
representations. Registered by default by `BeanWrapperImpl`. | +| `ClassEditor` | Parses Strings that represent classes to actual classes and vice-versa. When a
class is not found, an `IllegalArgumentException` is thrown. By default, registered by`BeanWrapperImpl`. | +| `CustomBooleanEditor` | Customizable property editor for `Boolean` properties. By default, registered by`BeanWrapperImpl` but can be overridden by registering a custom instance of it as a
custom editor. | +|`CustomCollectionEditor` | Property editor for collections, converting any source `Collection` to a given target`Collection` type. | +| `CustomDateEditor` | Customizable property editor for `java.util.Date`, supporting a custom `DateFormat`. NOT
registered by default. Must be user-registered with the appropriate format as needed. | +| `CustomNumberEditor` | Customizable property editor for any `Number` subclass, such as `Integer`, `Long`, `Float`, or`Double`. By default, registered by `BeanWrapperImpl` but can be overridden by
registering a custom instance of it as a custom editor. | +| `FileEditor` | Resolves strings to `java.io.File` objects. By default, registered by`BeanWrapperImpl`. | +| `InputStreamEditor` |One-way property editor that can take a string and produce (through an
intermediate `ResourceEditor` and `Resource`) an `InputStream` so that `InputStream`properties may be directly set as strings. Note that the default usage does not close
the `InputStream` for you. By default, registered by `BeanWrapperImpl`.| +| `LocaleEditor` | Can resolve strings to `Locale` objects and vice-versa (the string format is`[language]_[country]_[variant]`, same as the `toString()` method of`Locale`). Also accepts spaces as separators, as an alternative to underscores.
By default, registered by `BeanWrapperImpl`. | +| `PatternEditor` | Can resolve strings to `java.util.regex.Pattern` objects and vice-versa. | +| `PropertiesEditor` | Can convert strings (formatted with the format defined in the javadoc of the`java.util.Properties` class) to `Properties` objects. By default, registered
by `BeanWrapperImpl`. | +| `StringTrimmerEditor` | Property editor that trims strings. Optionally allows transforming an empty string
into a `null` value. NOT registered by default — must be user-registered. | +| `URLEditor` | Can resolve a string representation of a URL to an actual `URL` object.
By default, registered by `BeanWrapperImpl`. | + +Spring uses the `java.beans.PropertyEditorManager` to set the search path for property +editors that might be needed. The search path also includes `sun.bean.editors`, which +includes `PropertyEditor` implementations for types such as `Font`, `Color`, and most of +the primitive types. Note also that the standard JavaBeans infrastructure +automatically discovers `PropertyEditor` classes (without you having to register them +explicitly) if they are in the same package as the class they handle and have the same +name as that class, with `Editor` appended. For example, one could have the following +class and package structure, which would be sufficient for the `SomethingEditor` class to be +recognized and used as the `PropertyEditor` for `Something`-typed properties. + +``` +com + chank + pop + Something + SomethingEditor // the PropertyEditor for the Something class +``` + +Note that you can also use the standard `BeanInfo` JavaBeans mechanism here as well +(described to some extent[here](https://docs.oracle.com/javase/tutorial/javabeans/advanced/customization.html)). The +following example uses the `BeanInfo` mechanism to explicitly register one or more`PropertyEditor` instances with the properties of an associated class: + +``` +com + chank + pop + Something + SomethingBeanInfo // the BeanInfo for the Something class +``` + +The following Java source code for the referenced `SomethingBeanInfo` class +associates a `CustomNumberEditor` with the `age` property of the `Something` class: + +Java + +``` +public class SomethingBeanInfo extends SimpleBeanInfo { + + public PropertyDescriptor[] getPropertyDescriptors() { + try { + final PropertyEditor numberPE = new CustomNumberEditor(Integer.class, true); + PropertyDescriptor ageDescriptor = new PropertyDescriptor("age", Something.class) { + @Override + public PropertyEditor createPropertyEditor(Object bean) { + return numberPE; + } + }; + return new PropertyDescriptor[] { ageDescriptor }; + } + catch (IntrospectionException ex) { + throw new Error(ex.toString()); + } + } +} +``` + +Kotlin + +``` +class SomethingBeanInfo : SimpleBeanInfo() { + + override fun getPropertyDescriptors(): Array { + try { + val numberPE = CustomNumberEditor(Int::class.java, true) + val ageDescriptor = object : PropertyDescriptor("age", Something::class.java) { + override fun createPropertyEditor(bean: Any): PropertyEditor { + return numberPE + } + } + return arrayOf(ageDescriptor) + } catch (ex: IntrospectionException) { + throw Error(ex.toString()) + } + + } +} +``` + +##### Registering Additional Custom `PropertyEditor` Implementations ##### + +When setting bean properties as string values, a Spring IoC container ultimately uses +standard JavaBeans `PropertyEditor` implementations to convert these strings to the complex type of the +property. Spring pre-registers a number of custom `PropertyEditor` implementations (for example, to +convert a class name expressed as a string into a `Class` object). Additionally, +Java’s standard JavaBeans `PropertyEditor` lookup mechanism lets a `PropertyEditor`for a class be named appropriately and placed in the same package as the class +for which it provides support, so that it can be found automatically. + +If there is a need to register other custom `PropertyEditors`, several mechanisms are +available. The most manual approach, which is not normally convenient or +recommended, is to use the `registerCustomEditor()` method of the`ConfigurableBeanFactory` interface, assuming you have a `BeanFactory` reference. +Another (slightly more convenient) mechanism is to use a special bean factory +post-processor called `CustomEditorConfigurer`. Although you can use bean factory post-processors +with `BeanFactory` implementations, the `CustomEditorConfigurer` has a +nested property setup, so we strongly recommend that you use it with the`ApplicationContext`, where you can deploy it in similar fashion to any other bean and +where it can be automatically detected and applied. + +Note that all bean factories and application contexts automatically use a number of +built-in property editors, through their use of a `BeanWrapper` to +handle property conversions. The standard property editors that the `BeanWrapper`registers are listed in the [previous section](#beans-beans-conversion). +Additionally, `ApplicationContext`s also override or add additional editors to handle +resource lookups in a manner appropriate to the specific application context type. + +Standard JavaBeans `PropertyEditor` instances are used to convert property values +expressed as strings to the actual complex type of the property. You can use`CustomEditorConfigurer`, a bean factory post-processor, to conveniently add +support for additional `PropertyEditor` instances to an `ApplicationContext`. + +Consider the following example, which defines a user class called `ExoticType` and +another class called `DependsOnExoticType`, which needs `ExoticType` set as a property: + +Java + +``` +package example; + +public class ExoticType { + + private String name; + + public ExoticType(String name) { + this.name = name; + } +} + +public class DependsOnExoticType { + + private ExoticType type; + + public void setType(ExoticType type) { + this.type = type; + } +} +``` + +Kotlin + +``` +package example + +class ExoticType(val name: String) + +class DependsOnExoticType { + + var type: ExoticType? = null +} +``` + +When things are properly set up, we want to be able to assign the type property as a +string, which a `PropertyEditor` converts into an actual`ExoticType` instance. The following bean definition shows how to set up this relationship: + +``` + + + +``` + +The `PropertyEditor` implementation could look similar to the following: + +Java + +``` +// converts string representation to ExoticType object +package example; + +public class ExoticTypeEditor extends PropertyEditorSupport { + + public void setAsText(String text) { + setValue(new ExoticType(text.toUpperCase())); + } +} +``` + +Kotlin + +``` +// converts string representation to ExoticType object +package example + +import java.beans.PropertyEditorSupport + +class ExoticTypeEditor : PropertyEditorSupport() { + + override fun setAsText(text: String) { + value = ExoticType(text.toUpperCase()) + } +} +``` + +Finally, the following example shows how to use `CustomEditorConfigurer` to register the new `PropertyEditor` with the`ApplicationContext`, which will then be able to use it as needed: + +``` + + + + + + + +``` + +###### Using `PropertyEditorRegistrar` + +Another mechanism for registering property editors with the Spring container is to +create and use a `PropertyEditorRegistrar`. This interface is particularly useful when +you need to use the same set of property editors in several different situations. +You can write a corresponding registrar and reuse it in each case.`PropertyEditorRegistrar` instances work in conjunction with an interface called`PropertyEditorRegistry`, an interface that is implemented by the Spring `BeanWrapper`(and `DataBinder`). `PropertyEditorRegistrar` instances are particularly convenient +when used in conjunction with `CustomEditorConfigurer` (described[here](#beans-beans-conversion-customeditor-registration)), which exposes a property +called `setPropertyEditorRegistrars(..)`. `PropertyEditorRegistrar` instances added +to a `CustomEditorConfigurer` in this fashion can easily be shared with `DataBinder` and +Spring MVC controllers. Furthermore, it avoids the need for synchronization on custom +editors: A `PropertyEditorRegistrar` is expected to create fresh `PropertyEditor`instances for each bean creation attempt. + +The following example shows how to create your own `PropertyEditorRegistrar` implementation: + +Java + +``` +package com.foo.editors.spring; + +public final class CustomPropertyEditorRegistrar implements PropertyEditorRegistrar { + + public void registerCustomEditors(PropertyEditorRegistry registry) { + + // it is expected that new PropertyEditor instances are created + registry.registerCustomEditor(ExoticType.class, new ExoticTypeEditor()); + + // you could register as many custom property editors as are required here... + } +} +``` + +Kotlin + +``` +package com.foo.editors.spring + +import org.springframework.beans.PropertyEditorRegistrar +import org.springframework.beans.PropertyEditorRegistry + +class CustomPropertyEditorRegistrar : PropertyEditorRegistrar { + + override fun registerCustomEditors(registry: PropertyEditorRegistry) { + + // it is expected that new PropertyEditor instances are created + registry.registerCustomEditor(ExoticType::class.java, ExoticTypeEditor()) + + // you could register as many custom property editors as are required here... + } +} +``` + +See also the `org.springframework.beans.support.ResourceEditorRegistrar` for an example`PropertyEditorRegistrar` implementation. Notice how in its implementation of the`registerCustomEditors(..)` method, it creates new instances of each property editor. + +The next example shows how to configure a `CustomEditorConfigurer` and inject an instance of our`CustomPropertyEditorRegistrar` into it: + +``` + + + + + + + + + +``` + +Finally (and in a bit of a departure from the focus of this chapter for those of you +using [Spring’s MVC web framework](web.html#mvc)), using `PropertyEditorRegistrars` in +conjunction with data-binding `Controllers` (such as `SimpleFormController`) can be very +convenient. The following example uses a `PropertyEditorRegistrar` in the +implementation of an `initBinder(..)` method: + +Java + +``` +public final class RegisterUserController extends SimpleFormController { + + private final PropertyEditorRegistrar customPropertyEditorRegistrar; + + public RegisterUserController(PropertyEditorRegistrar propertyEditorRegistrar) { + this.customPropertyEditorRegistrar = propertyEditorRegistrar; + } + + protected void initBinder(HttpServletRequest request, + ServletRequestDataBinder binder) throws Exception { + this.customPropertyEditorRegistrar.registerCustomEditors(binder); + } + + // other methods to do with registering a User +} +``` + +Kotlin + +``` +class RegisterUserController( + private val customPropertyEditorRegistrar: PropertyEditorRegistrar) : SimpleFormController() { + + protected fun initBinder(request: HttpServletRequest, + binder: ServletRequestDataBinder) { + this.customPropertyEditorRegistrar.registerCustomEditors(binder) + } + + // other methods to do with registering a User +} +``` + +This style of `PropertyEditor` registration can lead to concise code (the implementation +of `initBinder(..)` is only one line long) and lets common `PropertyEditor`registration code be encapsulated in a class and then shared amongst as many`Controllers` as needed. + +### 3.4. Spring Type Conversion + +Spring 3 introduced a `core.convert` package that provides a general type conversion +system. The system defines an SPI to implement type conversion logic and an API +to perform type conversions at runtime. Within a Spring container, you can use this system +as an alternative to `PropertyEditor` implementations to convert externalized bean property value +strings to the required property types. You can also use the public API anywhere in your +application where type conversion is needed. + +#### 3.4.1. Converter SPI + +The SPI to implement type conversion logic is simple and strongly typed, as the following +interface definition shows: + +``` +package org.springframework.core.convert.converter; + +public interface Converter { + + T convert(S source); +} +``` + +To create your own converter, implement the `Converter` interface and parameterize `S`as the type you are converting from and `T` as the type you are converting to. You can also transparently apply such a +converter if a collection or array of `S` needs to be +converted to an array or collection of `T`, provided that a delegating array or collection +converter has been registered as well (which `DefaultConversionService` does by default). + +For each call to `convert(S)`, the source argument is guaranteed to not be null. Your`Converter` may throw any unchecked exception if conversion fails. Specifically, it should throw an`IllegalArgumentException` to report an invalid source value. +Take care to ensure that your `Converter` implementation is thread-safe. + +Several converter implementations are provided in the `core.convert.support` package as +a convenience. These include converters from strings to numbers and other common types. +The following listing shows the `StringToInteger` class, which is a typical `Converter` implementation: + +``` +package org.springframework.core.convert.support; + +final class StringToInteger implements Converter { + + public Integer convert(String source) { + return Integer.valueOf(source); + } +} +``` + +#### 3.4.2. Using `ConverterFactory` + +When you need to centralize the conversion logic for an entire class hierarchy +(for example, when converting from `String` to `Enum` objects), you can implement`ConverterFactory`, as the following example shows: + +``` +package org.springframework.core.convert.converter; + +public interface ConverterFactory { + + Converter getConverter(Class targetType); +} +``` + +Parameterize S to be the type you are converting from and R to be the base type defining +the *range* of classes you can convert to. Then implement `getConverter(Class)`, +where T is a subclass of R. + +Consider the `StringToEnumConverterFactory` as an example: + +``` +package org.springframework.core.convert.support; + +final class StringToEnumConverterFactory implements ConverterFactory { + + public Converter getConverter(Class targetType) { + return new StringToEnumConverter(targetType); + } + + private final class StringToEnumConverter implements Converter { + + private Class enumType; + + public StringToEnumConverter(Class enumType) { + this.enumType = enumType; + } + + public T convert(String source) { + return (T) Enum.valueOf(this.enumType, source.trim()); + } + } +} +``` + +#### 3.4.3. Using `GenericConverter` + +When you require a sophisticated `Converter` implementation, consider using the`GenericConverter` interface. With a more flexible but less strongly typed signature +than `Converter`, a `GenericConverter` supports converting between multiple source and +target types. In addition, a `GenericConverter` makes available source and target field +context that you can use when you implement your conversion logic. Such context lets a +type conversion be driven by a field annotation or by generic information declared on a +field signature. The following listing shows the interface definition of `GenericConverter`: + +``` +package org.springframework.core.convert.converter; + +public interface GenericConverter { + + public Set getConvertibleTypes(); + + Object convert(Object source, TypeDescriptor sourceType, TypeDescriptor targetType); +} +``` + +To implement a `GenericConverter`, have `getConvertibleTypes()` return the supported +source→target type pairs. Then implement `convert(Object, TypeDescriptor, +TypeDescriptor)` to contain your conversion logic. The source `TypeDescriptor` provides +access to the source field that holds the value being converted. The target `TypeDescriptor`provides access to the target field where the converted value is to be set. + +A good example of a `GenericConverter` is a converter that converts between a Java array +and a collection. Such an `ArrayToCollectionConverter` introspects the field that declares +the target collection type to resolve the collection’s element type. This lets each +element in the source array be converted to the collection element type before the +collection is set on the target field. + +| |Because `GenericConverter` is a more complex SPI interface, you should use
it only when you need it. Favor `Converter` or `ConverterFactory` for basic type
conversion needs.| +|---|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +##### Using `ConditionalGenericConverter` + +Sometimes, you want a `Converter` to run only if a specific condition holds true. For +example, you might want to run a `Converter` only if a specific annotation is present +on the target field, or you might want to run a `Converter` only if a specific method +(such as a `static valueOf` method) is defined on the target class.`ConditionalGenericConverter` is the union of the `GenericConverter` and`ConditionalConverter` interfaces that lets you define such custom matching criteria: + +``` +public interface ConditionalConverter { + + boolean matches(TypeDescriptor sourceType, TypeDescriptor targetType); +} + +public interface ConditionalGenericConverter extends GenericConverter, ConditionalConverter { +} +``` + +A good example of a `ConditionalGenericConverter` is an `IdToEntityConverter` that converts +between a persistent entity identifier and an entity reference. Such an `IdToEntityConverter`might match only if the target entity type declares a static finder method (for example,`findAccount(Long)`). You might perform such a finder method check in the implementation of`matches(TypeDescriptor, TypeDescriptor)`. + +#### 3.4.4. The `ConversionService` API + +`ConversionService` defines a unified API for executing type conversion logic at +runtime. Converters are often run behind the following facade interface: + +``` +package org.springframework.core.convert; + +public interface ConversionService { + + boolean canConvert(Class sourceType, Class targetType); + + T convert(Object source, Class targetType); + + boolean canConvert(TypeDescriptor sourceType, TypeDescriptor targetType); + + Object convert(Object source, TypeDescriptor sourceType, TypeDescriptor targetType); +} +``` + +Most `ConversionService` implementations also implement `ConverterRegistry`, which +provides an SPI for registering converters. Internally, a `ConversionService`implementation delegates to its registered converters to carry out type conversion logic. + +A robust `ConversionService` implementation is provided in the `core.convert.support`package. `GenericConversionService` is the general-purpose implementation suitable for +use in most environments. `ConversionServiceFactory` provides a convenient factory for +creating common `ConversionService` configurations. + +#### 3.4.5. Configuring a `ConversionService` + +A `ConversionService` is a stateless object designed to be instantiated at application +startup and then shared between multiple threads. In a Spring application, you typically +configure a `ConversionService` instance for each Spring container (or `ApplicationContext`). +Spring picks up that `ConversionService` and uses it whenever a type +conversion needs to be performed by the framework. You can also inject this`ConversionService` into any of your beans and invoke it directly. + +| |If no `ConversionService` is registered with Spring, the original `PropertyEditor`-based
system is used.| +|---|------------------------------------------------------------------------------------------------------------| + +To register a default `ConversionService` with Spring, add the following bean definition +with an `id` of `conversionService`: + +``` + +``` + +A default `ConversionService` can convert between strings, numbers, enums, collections, +maps, and other common types. To supplement or override the default converters with your +own custom converters, set the `converters` property. Property values can implement +any of the `Converter`, `ConverterFactory`, or `GenericConverter` interfaces. + +``` + + + + + + + +``` + +It is also common to use a `ConversionService` within a Spring MVC application. See[Conversion and Formatting](web.html#mvc-config-conversion) in the Spring MVC chapter. + +In certain situations, you may wish to apply formatting during conversion. See[The `FormatterRegistry` SPI](#format-FormatterRegistry-SPI) for details on using `FormattingConversionServiceFactoryBean`. + +#### 3.4.6. Using a `ConversionService` Programmatically + +To work with a `ConversionService` instance programmatically, you can inject a reference to +it like you would for any other bean. The following example shows how to do so: + +Java + +``` +@Service +public class MyService { + + public MyService(ConversionService conversionService) { + this.conversionService = conversionService; + } + + public void doIt() { + this.conversionService.convert(...) + } +} +``` + +Kotlin + +``` +@Service +class MyService(private val conversionService: ConversionService) { + + fun doIt() { + conversionService.convert(...) + } +} +``` + +For most use cases, you can use the `convert` method that specifies the `targetType`, but it +does not work with more complex types, such as a collection of a parameterized element. +For example, if you want to convert a `List` of `Integer` to a `List` of `String` programmatically, +you need to provide a formal definition of the source and target types. + +Fortunately, `TypeDescriptor` provides various options to make doing so straightforward, +as the following example shows: + +Java + +``` +DefaultConversionService cs = new DefaultConversionService(); + +List input = ... +cs.convert(input, + TypeDescriptor.forObject(input), // List type descriptor + TypeDescriptor.collection(List.class, TypeDescriptor.valueOf(String.class))); +``` + +Kotlin + +``` +val cs = DefaultConversionService() + +val input: List = ... +cs.convert(input, + TypeDescriptor.forObject(input), // List type descriptor + TypeDescriptor.collection(List::class.java, TypeDescriptor.valueOf(String::class.java))) +``` + +Note that `DefaultConversionService` automatically registers converters that are +appropriate for most environments. This includes collection converters, scalar +converters, and basic `Object`-to-`String` converters. You can register the same converters +with any `ConverterRegistry` by using the static `addDefaultConverters`method on the `DefaultConversionService` class. + +Converters for value types are reused for arrays and collections, so there is +no need to create a specific converter to convert from a `Collection` of `S` to a`Collection` of `T`, assuming that standard collection handling is appropriate. + +### 3.5. Spring Field Formatting + +As discussed in the previous section, [`core.convert`](#core-convert) is a +general-purpose type conversion system. It provides a unified `ConversionService` API as +well as a strongly typed `Converter` SPI for implementing conversion logic from one type +to another. A Spring container uses this system to bind bean property values. In +addition, both the Spring Expression Language (SpEL) and `DataBinder` use this system to +bind field values. For example, when SpEL needs to coerce a `Short` to a `Long` to +complete an `expression.setValue(Object bean, Object value)` attempt, the `core.convert`system performs the coercion. + +Now consider the type conversion requirements of a typical client environment, such as a +web or desktop application. In such environments, you typically convert from `String`to support the client postback process, as well as back to `String` to support the +view rendering process. In addition, you often need to localize `String` values. The more +general `core.convert` `Converter` SPI does not address such formatting requirements +directly. To directly address them, Spring 3 introduced a convenient `Formatter` SPI that +provides a simple and robust alternative to `PropertyEditor` implementations for client environments. + +In general, you can use the `Converter` SPI when you need to implement general-purpose type +conversion logic — for example, for converting between a `java.util.Date` and a `Long`. +You can use the `Formatter` SPI when you work in a client environment (such as a web +application) and need to parse and print localized field values. The `ConversionService`provides a unified type conversion API for both SPIs. + +#### 3.5.1. The `Formatter` SPI + +The `Formatter` SPI to implement field formatting logic is simple and strongly typed. The +following listing shows the `Formatter` interface definition: + +``` +package org.springframework.format; + +public interface Formatter extends Printer, Parser { +} +``` + +`Formatter` extends from the `Printer` and `Parser` building-block interfaces. The +following listing shows the definitions of those two interfaces: + +``` +public interface Printer { + + String print(T fieldValue, Locale locale); +} +``` + +``` +import java.text.ParseException; + +public interface Parser { + + T parse(String clientValue, Locale locale) throws ParseException; +} +``` + +To create your own `Formatter`, implement the `Formatter` interface shown earlier. +Parameterize `T` to be the type of object you wish to format — for example,`java.util.Date`. Implement the `print()` operation to print an instance of `T` for +display in the client locale. Implement the `parse()` operation to parse an instance of`T` from the formatted representation returned from the client locale. Your `Formatter`should throw a `ParseException` or an `IllegalArgumentException` if a parse attempt fails. Take +care to ensure that your `Formatter` implementation is thread-safe. + +The `format` subpackages provide several `Formatter` implementations as a convenience. +The `number` package provides `NumberStyleFormatter`, `CurrencyStyleFormatter`, and`PercentStyleFormatter` to format `Number` objects that use a `java.text.NumberFormat`. +The `datetime` package provides a `DateFormatter` to format `java.util.Date` objects with +a `java.text.DateFormat`. + +The following `DateFormatter` is an example `Formatter` implementation: + +Java + +``` +package org.springframework.format.datetime; + +public final class DateFormatter implements Formatter { + + private String pattern; + + public DateFormatter(String pattern) { + this.pattern = pattern; + } + + public String print(Date date, Locale locale) { + if (date == null) { + return ""; + } + return getDateFormat(locale).format(date); + } + + public Date parse(String formatted, Locale locale) throws ParseException { + if (formatted.length() == 0) { + return null; + } + return getDateFormat(locale).parse(formatted); + } + + protected DateFormat getDateFormat(Locale locale) { + DateFormat dateFormat = new SimpleDateFormat(this.pattern, locale); + dateFormat.setLenient(false); + return dateFormat; + } +} +``` + +Kotlin + +``` +class DateFormatter(private val pattern: String) : Formatter { + + override fun print(date: Date, locale: Locale) + = getDateFormat(locale).format(date) + + @Throws(ParseException::class) + override fun parse(formatted: String, locale: Locale) + = getDateFormat(locale).parse(formatted) + + protected fun getDateFormat(locale: Locale): DateFormat { + val dateFormat = SimpleDateFormat(this.pattern, locale) + dateFormat.isLenient = false + return dateFormat + } +} +``` + +The Spring team welcomes community-driven `Formatter` contributions. See[GitHub Issues](https://github.com/spring-projects/spring-framework/issues) to contribute. + +#### 3.5.2. Annotation-driven Formatting + +Field formatting can be configured by field type or annotation. To bind +an annotation to a `Formatter`, implement `AnnotationFormatterFactory`. The following +listing shows the definition of the `AnnotationFormatterFactory` interface: + +``` +package org.springframework.format; + +public interface AnnotationFormatterFactory
{ + + Set> getFieldTypes(); + + Printer getPrinter(A annotation, Class fieldType); + + Parser getParser(A annotation, Class fieldType); +} +``` + +To create an implementation: + +1. Parameterize A to be the field `annotationType` with which you wish to associate + formatting logic — for example `org.springframework.format.annotation.DateTimeFormat`. + +2. Have `getFieldTypes()` return the types of fields on which the annotation can be used. + +3. Have `getPrinter()` return a `Printer` to print the value of an annotated field. + +4. Have `getParser()` return a `Parser` to parse a `clientValue` for an annotated field. + +The following example `AnnotationFormatterFactory` implementation binds the `@NumberFormat`annotation to a formatter to let a number style or pattern be +specified: + +Java + +``` +public final class NumberFormatAnnotationFormatterFactory + implements AnnotationFormatterFactory { + + public Set> getFieldTypes() { + return new HashSet>(asList(new Class[] { + Short.class, Integer.class, Long.class, Float.class, + Double.class, BigDecimal.class, BigInteger.class })); + } + + public Printer getPrinter(NumberFormat annotation, Class fieldType) { + return configureFormatterFrom(annotation, fieldType); + } + + public Parser getParser(NumberFormat annotation, Class fieldType) { + return configureFormatterFrom(annotation, fieldType); + } + + private Formatter configureFormatterFrom(NumberFormat annotation, Class fieldType) { + if (!annotation.pattern().isEmpty()) { + return new NumberStyleFormatter(annotation.pattern()); + } else { + Style style = annotation.style(); + if (style == Style.PERCENT) { + return new PercentStyleFormatter(); + } else if (style == Style.CURRENCY) { + return new CurrencyStyleFormatter(); + } else { + return new NumberStyleFormatter(); + } + } + } +} +``` + +Kotlin + +``` +class NumberFormatAnnotationFormatterFactory : AnnotationFormatterFactory { + + override fun getFieldTypes(): Set> { + return setOf(Short::class.java, Int::class.java, Long::class.java, Float::class.java, Double::class.java, BigDecimal::class.java, BigInteger::class.java) + } + + override fun getPrinter(annotation: NumberFormat, fieldType: Class<*>): Printer { + return configureFormatterFrom(annotation, fieldType) + } + + override fun getParser(annotation: NumberFormat, fieldType: Class<*>): Parser { + return configureFormatterFrom(annotation, fieldType) + } + + private fun configureFormatterFrom(annotation: NumberFormat, fieldType: Class<*>): Formatter { + return if (annotation.pattern.isNotEmpty()) { + NumberStyleFormatter(annotation.pattern) + } else { + val style = annotation.style + when { + style === NumberFormat.Style.PERCENT -> PercentStyleFormatter() + style === NumberFormat.Style.CURRENCY -> CurrencyStyleFormatter() + else -> NumberStyleFormatter() + } + } + } +} +``` + +To trigger formatting, you can annotate fields with @NumberFormat, as the following +example shows: + +Java + +``` +public class MyModel { + + @NumberFormat(style=Style.CURRENCY) + private BigDecimal decimal; +} +``` + +Kotlin + +``` +class MyModel( + @field:NumberFormat(style = Style.CURRENCY) private val decimal: BigDecimal +) +``` + +##### Format Annotation API + +A portable format annotation API exists in the `org.springframework.format.annotation`package. You can use `@NumberFormat` to format `Number` fields such as `Double` and`Long`, and `@DateTimeFormat` to format `java.util.Date`, `java.util.Calendar`, `Long`(for millisecond timestamps) as well as JSR-310 `java.time`. + +The following example uses `@DateTimeFormat` to format a `java.util.Date` as an ISO Date +(yyyy-MM-dd): + +Java + +``` +public class MyModel { + + @DateTimeFormat(iso=ISO.DATE) + private Date date; +} +``` + +Kotlin + +``` +class MyModel( + @DateTimeFormat(iso=ISO.DATE) private val date: Date +) +``` + +#### 3.5.3. The `FormatterRegistry` SPI + +The `FormatterRegistry` is an SPI for registering formatters and converters.`FormattingConversionService` is an implementation of `FormatterRegistry` suitable for +most environments. You can programmatically or declaratively configure this variant +as a Spring bean, e.g. by using `FormattingConversionServiceFactoryBean`. Because this +implementation also implements `ConversionService`, you can directly configure it +for use with Spring’s `DataBinder` and the Spring Expression Language (SpEL). + +The following listing shows the `FormatterRegistry` SPI: + +``` +package org.springframework.format; + +public interface FormatterRegistry extends ConverterRegistry { + + void addPrinter(Printer printer); + + void addParser(Parser parser); + + void addFormatter(Formatter formatter); + + void addFormatterForFieldType(Class fieldType, Formatter formatter); + + void addFormatterForFieldType(Class fieldType, Printer printer, Parser parser); + + void addFormatterForFieldAnnotation(AnnotationFormatterFactory annotationFormatterFactory); +} +``` + +As shown in the preceding listing, you can register formatters by field type or by annotation. + +The `FormatterRegistry` SPI lets you configure formatting rules centrally, instead of +duplicating such configuration across your controllers. For example, you might want to +enforce that all date fields are formatted a certain way or that fields with a specific +annotation are formatted in a certain way. With a shared `FormatterRegistry`, you define +these rules once, and they are applied whenever formatting is needed. + +#### 3.5.4. The `FormatterRegistrar` SPI + +`FormatterRegistrar` is an SPI for registering formatters and converters through the +FormatterRegistry. The following listing shows its interface definition: + +``` +package org.springframework.format; + +public interface FormatterRegistrar { + + void registerFormatters(FormatterRegistry registry); +} +``` + +A `FormatterRegistrar` is useful when registering multiple related converters and +formatters for a given formatting category, such as date formatting. It can also be +useful where declarative registration is insufficient — for example, when a formatter +needs to be indexed under a specific field type different from its own `` or when +registering a `Printer`/`Parser` pair. The next section provides more information on +converter and formatter registration. + +#### 3.5.5. Configuring Formatting in Spring MVC + +See [Conversion and Formatting](web.html#mvc-config-conversion) in the Spring MVC chapter. + +### 3.6. Configuring a Global Date and Time Format + +By default, date and time fields not annotated with `@DateTimeFormat` are converted from +strings by using the `DateFormat.SHORT` style. If you prefer, you can change this by +defining your own global format. + +To do that, ensure that Spring does not register default formatters. Instead, register +formatters manually with the help of: + +* `org.springframework.format.datetime.standard.DateTimeFormatterRegistrar` + +* `org.springframework.format.datetime.DateFormatterRegistrar` + +For example, the following Java configuration registers a global `yyyyMMdd` format: + +Java + +``` +@Configuration +public class AppConfig { + + @Bean + public FormattingConversionService conversionService() { + + // Use the DefaultFormattingConversionService but do not register defaults + DefaultFormattingConversionService conversionService = new DefaultFormattingConversionService(false); + + // Ensure @NumberFormat is still supported + conversionService.addFormatterForFieldAnnotation(new NumberFormatAnnotationFormatterFactory()); + + // Register JSR-310 date conversion with a specific global format + DateTimeFormatterRegistrar registrar = new DateTimeFormatterRegistrar(); + registrar.setDateFormatter(DateTimeFormatter.ofPattern("yyyyMMdd")); + registrar.registerFormatters(conversionService); + + // Register date conversion with a specific global format + DateFormatterRegistrar registrar = new DateFormatterRegistrar(); + registrar.setFormatter(new DateFormatter("yyyyMMdd")); + registrar.registerFormatters(conversionService); + + return conversionService; + } +} +``` + +Kotlin + +``` +@Configuration +class AppConfig { + + @Bean + fun conversionService(): FormattingConversionService { + // Use the DefaultFormattingConversionService but do not register defaults + return DefaultFormattingConversionService(false).apply { + + // Ensure @NumberFormat is still supported + addFormatterForFieldAnnotation(NumberFormatAnnotationFormatterFactory()) + + // Register JSR-310 date conversion with a specific global format + val registrar = DateTimeFormatterRegistrar() + registrar.setDateFormatter(DateTimeFormatter.ofPattern("yyyyMMdd")) + registrar.registerFormatters(this) + + // Register date conversion with a specific global format + val registrar = DateFormatterRegistrar() + registrar.setFormatter(DateFormatter("yyyyMMdd")) + registrar.registerFormatters(this) + } + } +} +``` + +If you prefer XML-based configuration, you can use a`FormattingConversionServiceFactoryBean`. The following example shows how to do so: + +``` + + + + + + + + + + + + + + + + + + + +
+
+``` + +Note there are extra considerations when configuring date and time formats in web +applications. Please see[WebMVC Conversion and Formatting](web.html#mvc-config-conversion) or[WebFlux Conversion and Formatting](web-reactive.html#webflux-config-conversion). + +### 3.7. Java Bean Validation + +The Spring Framework provides support for the[Java Bean Validation](https://beanvalidation.org/) API. + +#### 3.7.1. Overview of Bean Validation + +Bean Validation provides a common way of validation through constraint declaration and +metadata for Java applications. To use it, you annotate domain model properties with +declarative validation constraints which are then enforced by the runtime. There are +built-in constraints, and you can also define your own custom constraints. + +Consider the following example, which shows a simple `PersonForm` model with two properties: + +Java + +``` +public class PersonForm { + private String name; + private int age; +} +``` + +Kotlin + +``` +class PersonForm( + private val name: String, + private val age: Int +) +``` + +Bean Validation lets you declare constraints as the following example shows: + +Java + +``` +public class PersonForm { + + @NotNull + @Size(max=64) + private String name; + + @Min(0) + private int age; +} +``` + +Kotlin + +``` +class PersonForm( + @get:NotNull @get:Size(max=64) + private val name: String, + @get:Min(0) + private val age: Int +) +``` + +A Bean Validation validator then validates instances of this class based on the declared +constraints. See [Bean Validation](https://beanvalidation.org/) for general information about +the API. See the [Hibernate Validator](https://hibernate.org/validator/) documentation for +specific constraints. To learn how to set up a bean validation provider as a Spring +bean, keep reading. + +#### 3.7.2. Configuring a Bean Validation Provider + +Spring provides full support for the Bean Validation API including the bootstrapping of a +Bean Validation provider as a Spring bean. This lets you inject a`javax.validation.ValidatorFactory` or `javax.validation.Validator` wherever validation is +needed in your application. + +You can use the `LocalValidatorFactoryBean` to configure a default Validator as a Spring +bean, as the following example shows: + +Java + +``` +import org.springframework.validation.beanvalidation.LocalValidatorFactoryBean; + +@Configuration +public class AppConfig { + + @Bean + public LocalValidatorFactoryBean validator() { + return new LocalValidatorFactoryBean(); + } +} +``` + +XML + +``` + +``` + +The basic configuration in the preceding example triggers bean validation to initialize by +using its default bootstrap mechanism. A Bean Validation provider, such as the Hibernate +Validator, is expected to be present in the classpath and is automatically detected. + +##### Injecting a Validator + +`LocalValidatorFactoryBean` implements both `javax.validation.ValidatorFactory` and`javax.validation.Validator`, as well as Spring’s `org.springframework.validation.Validator`. +You can inject a reference to either of these interfaces into beans that need to invoke +validation logic. + +You can inject a reference to `javax.validation.Validator` if you prefer to work with the Bean +Validation API directly, as the following example shows: + +Java + +``` +import javax.validation.Validator; + +@Service +public class MyService { + + @Autowired + private Validator validator; +} +``` + +Kotlin + +``` +import javax.validation.Validator; + +@Service +class MyService(@Autowired private val validator: Validator) +``` + +You can inject a reference to `org.springframework.validation.Validator` if your bean +requires the Spring Validation API, as the following example shows: + +Java + +``` +import org.springframework.validation.Validator; + +@Service +public class MyService { + + @Autowired + private Validator validator; +} +``` + +Kotlin + +``` +import org.springframework.validation.Validator + +@Service +class MyService(@Autowired private val validator: Validator) +``` + +##### Configuring Custom Constraints + +Each bean validation constraint consists of two parts: + +* A `@Constraint` annotation that declares the constraint and its configurable properties. + +* An implementation of the `javax.validation.ConstraintValidator` interface that implements + the constraint’s behavior. + +To associate a declaration with an implementation, each `@Constraint` annotation +references a corresponding `ConstraintValidator` implementation class. At runtime, a`ConstraintValidatorFactory` instantiates the referenced implementation when the +constraint annotation is encountered in your domain model. + +By default, the `LocalValidatorFactoryBean` configures a `SpringConstraintValidatorFactory`that uses Spring to create `ConstraintValidator` instances. This lets your custom`ConstraintValidators` benefit from dependency injection like any other Spring bean. + +The following example shows a custom `@Constraint` declaration followed by an associated`ConstraintValidator` implementation that uses Spring for dependency injection: + +Java + +``` +@Target({ElementType.METHOD, ElementType.FIELD}) +@Retention(RetentionPolicy.RUNTIME) +@Constraint(validatedBy=MyConstraintValidator.class) +public @interface MyConstraint { +} +``` + +Kotlin + +``` +@Target(AnnotationTarget.FUNCTION, AnnotationTarget.FIELD) +@Retention(AnnotationRetention.RUNTIME) +@Constraint(validatedBy = MyConstraintValidator::class) +annotation class MyConstraint +``` + +Java + +``` +import javax.validation.ConstraintValidator; + +public class MyConstraintValidator implements ConstraintValidator { + + @Autowired; + private Foo aDependency; + + // ... +} +``` + +Kotlin + +``` +import javax.validation.ConstraintValidator + +class MyConstraintValidator(private val aDependency: Foo) : ConstraintValidator { + + // ... +} +``` + +As the preceding example shows, a `ConstraintValidator` implementation can have its dependencies`@Autowired` as any other Spring bean. + +##### Spring-driven Method Validation + +You can integrate the method validation feature supported by Bean Validation 1.1 (and, as +a custom extension, also by Hibernate Validator 4.3) into a Spring context through a`MethodValidationPostProcessor` bean definition: + +Java + +``` +import org.springframework.validation.beanvalidation.MethodValidationPostProcessor; + +@Configuration +public class AppConfig { + + @Bean + public MethodValidationPostProcessor validationPostProcessor() { + return new MethodValidationPostProcessor(); + } +} +``` + +XML + +``` + +``` + +To be eligible for Spring-driven method validation, all target classes need to be annotated +with Spring’s `@Validated` annotation, which can optionally also declare the validation +groups to use. See[`MethodValidationPostProcessor`](https://docs.spring.io/spring-framework/docs/5.3.16/javadoc-api/org/springframework/validation/beanvalidation/MethodValidationPostProcessor.html)for setup details with the Hibernate Validator and Bean Validation 1.1 providers. + +| |Method validation relies on [AOP Proxies](#aop-introduction-proxies) around the
target classes, either JDK dynamic proxies for methods on interfaces or CGLIB proxies.
There are certain limitations with the use of proxies, some of which are described in[Understanding AOP Proxies](#aop-understanding-aop-proxies). In addition remember
to always use methods and accessors on proxied classes; direct field access will not work.| +|---|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +##### Additional Configuration Options + +The default `LocalValidatorFactoryBean` configuration suffices for most +cases. There are a number of configuration options for various Bean Validation +constructs, from message interpolation to traversal resolution. See the[`LocalValidatorFactoryBean`](https://docs.spring.io/spring-framework/docs/5.3.16/javadoc-api/org/springframework/validation/beanvalidation/LocalValidatorFactoryBean.html)javadoc for more information on these options. + +#### 3.7.3. Configuring a `DataBinder` + +Since Spring 3, you can configure a `DataBinder` instance with a `Validator`. Once +configured, you can invoke the `Validator` by calling `binder.validate()`. Any validation`Errors` are automatically added to the binder’s `BindingResult`. + +The following example shows how to use a `DataBinder` programmatically to invoke validation +logic after binding to a target object: + +Java + +``` +Foo target = new Foo(); +DataBinder binder = new DataBinder(target); +binder.setValidator(new FooValidator()); + +// bind to the target object +binder.bind(propertyValues); + +// validate the target object +binder.validate(); + +// get BindingResult that includes any validation errors +BindingResult results = binder.getBindingResult(); +``` + +Kotlin + +``` +val target = Foo() +val binder = DataBinder(target) +binder.validator = FooValidator() + +// bind to the target object +binder.bind(propertyValues) + +// validate the target object +binder.validate() + +// get BindingResult that includes any validation errors +val results = binder.bindingResult +``` + +You can also configure a `DataBinder` with multiple `Validator` instances through`dataBinder.addValidators` and `dataBinder.replaceValidators`. This is useful when +combining globally configured bean validation with a Spring `Validator` configured +locally on a DataBinder instance. See[Spring MVC Validation Configuration](web.html#mvc-config-validation). + +#### 3.7.4. Spring MVC 3 Validation + +See [Validation](web.html#mvc-config-validation) in the Spring MVC chapter. + +## 4. Spring Expression Language (SpEL) + +The Spring Expression Language (“SpEL” for short) is a powerful expression language that +supports querying and manipulating an object graph at runtime. The language syntax is +similar to Unified EL but offers additional features, most notably method invocation and +basic string templating functionality. + +While there are several other Java expression languages available — OGNL, MVEL, and JBoss +EL, to name a few — the Spring Expression Language was created to provide the Spring +community with a single well supported expression language that can be used across all +the products in the Spring portfolio. Its language features are driven by the +requirements of the projects in the Spring portfolio, including tooling requirements +for code completion support within the [Spring Tools for Eclipse](https://spring.io/tools). +That said, SpEL is based on a technology-agnostic API that lets other expression language +implementations be integrated, should the need arise. + +While SpEL serves as the foundation for expression evaluation within the Spring +portfolio, it is not directly tied to Spring and can be used independently. To +be self contained, many of the examples in this chapter use SpEL as if it were an +independent expression language. This requires creating a few bootstrapping +infrastructure classes, such as the parser. Most Spring users need not deal with +this infrastructure and can, instead, author only expression strings for evaluation. +An example of this typical use is the integration of SpEL into creating XML or +annotation-based bean definitions, as shown in[Expression support for defining bean definitions](#expressions-beandef). + +This chapter covers the features of the expression language, its API, and its language +syntax. In several places, `Inventor` and `Society` classes are used as the target +objects for expression evaluation. These class declarations and the data used to +populate them are listed at the end of the chapter. + +The expression language supports the following functionality: + +* Literal expressions + +* Boolean and relational operators + +* Regular expressions + +* Class expressions + +* Accessing properties, arrays, lists, and maps + +* Method invocation + +* Relational operators + +* Assignment + +* Calling constructors + +* Bean references + +* Array construction + +* Inline lists + +* Inline maps + +* Ternary operator + +* Variables + +* User-defined functions + +* Collection projection + +* Collection selection + +* Templated expressions + +### 4.1. Evaluation + +This section introduces the simple use of SpEL interfaces and its expression language. +The complete language reference can be found in[Language Reference](#expressions-language-ref). + +The following code introduces the SpEL API to evaluate the literal string expression,`Hello World`. + +Java + +``` +ExpressionParser parser = new SpelExpressionParser(); +Expression exp = parser.parseExpression("'Hello World'"); (1) +String message = (String) exp.getValue(); +``` + +|**1**|The value of the message variable is `'Hello World'`.| +|-----|-----------------------------------------------------| + +Kotlin + +``` +val parser = SpelExpressionParser() +val exp = parser.parseExpression("'Hello World'") (1) +val message = exp.value as String +``` + +|**1**|The value of the message variable is `'Hello World'`.| +|-----|-----------------------------------------------------| + +The SpEL classes and interfaces you are most likely to use are located in the`org.springframework.expression` package and its sub-packages, such as `spel.support`. + +The `ExpressionParser` interface is responsible for parsing an expression string. In +the preceding example, the expression string is a string literal denoted by the surrounding single +quotation marks. The `Expression` interface is responsible for evaluating the previously defined +expression string. Two exceptions that can be thrown, `ParseException` and`EvaluationException`, when calling `parser.parseExpression` and `exp.getValue`, +respectively. + +SpEL supports a wide range of features, such as calling methods, accessing properties, +and calling constructors. + +In the following example of method invocation, we call the `concat` method on the string literal: + +Java + +``` +ExpressionParser parser = new SpelExpressionParser(); +Expression exp = parser.parseExpression("'Hello World'.concat('!')"); (1) +String message = (String) exp.getValue(); +``` + +|**1**|The value of `message` is now 'Hello World!'.| +|-----|---------------------------------------------| + +Kotlin + +``` +val parser = SpelExpressionParser() +val exp = parser.parseExpression("'Hello World'.concat('!')") (1) +val message = exp.value as String +``` + +|**1**|The value of `message` is now 'Hello World!'.| +|-----|---------------------------------------------| + +The following example of calling a JavaBean property calls the `String` property `Bytes`: + +Java + +``` +ExpressionParser parser = new SpelExpressionParser(); + +// invokes 'getBytes()' +Expression exp = parser.parseExpression("'Hello World'.bytes"); (1) +byte[] bytes = (byte[]) exp.getValue(); +``` + +|**1**|This line converts the literal to a byte array.| +|-----|-----------------------------------------------| + +Kotlin + +``` +val parser = SpelExpressionParser() + +// invokes 'getBytes()' +val exp = parser.parseExpression("'Hello World'.bytes") (1) +val bytes = exp.value as ByteArray +``` + +|**1**|This line converts the literal to a byte array.| +|-----|-----------------------------------------------| + +SpEL also supports nested properties by using the standard dot notation (such as`prop1.prop2.prop3`) and also the corresponding setting of property values. +Public fields may also be accessed. + +The following example shows how to use dot notation to get the length of a literal: + +Java + +``` +ExpressionParser parser = new SpelExpressionParser(); + +// invokes 'getBytes().length' +Expression exp = parser.parseExpression("'Hello World'.bytes.length"); (1) +int length = (Integer) exp.getValue(); +``` + +|**1**|`'Hello World'.bytes.length` gives the length of the literal.| +|-----|-------------------------------------------------------------| + +Kotlin + +``` +val parser = SpelExpressionParser() + +// invokes 'getBytes().length' +val exp = parser.parseExpression("'Hello World'.bytes.length") (1) +val length = exp.value as Int +``` + +|**1**|`'Hello World'.bytes.length` gives the length of the literal.| +|-----|-------------------------------------------------------------| + +The String’s constructor can be called instead of using a string literal, as the following +example shows: + +Java + +``` +ExpressionParser parser = new SpelExpressionParser(); +Expression exp = parser.parseExpression("new String('hello world').toUpperCase()"); (1) +String message = exp.getValue(String.class); +``` + +|**1**|Construct a new `String` from the literal and make it be upper case.| +|-----|--------------------------------------------------------------------| + +Kotlin + +``` +val parser = SpelExpressionParser() +val exp = parser.parseExpression("new String('hello world').toUpperCase()") (1) +val message = exp.getValue(String::class.java) +``` + +|**1**|Construct a new `String` from the literal and make it be upper case.| +|-----|--------------------------------------------------------------------| + +Note the use of the generic method: `public T getValue(Class desiredResultType)`. +Using this method removes the need to cast the value of the expression to the desired +result type. An `EvaluationException` is thrown if the value cannot be cast to the +type `T` or converted by using the registered type converter. + +The more common usage of SpEL is to provide an expression string that is evaluated +against a specific object instance (called the root object). The following example shows +how to retrieve the `name` property from an instance of the `Inventor` class or +create a boolean condition: + +Java + +``` +// Create and set a calendar +GregorianCalendar c = new GregorianCalendar(); +c.set(1856, 7, 9); + +// The constructor arguments are name, birthday, and nationality. +Inventor tesla = new Inventor("Nikola Tesla", c.getTime(), "Serbian"); + +ExpressionParser parser = new SpelExpressionParser(); + +Expression exp = parser.parseExpression("name"); // Parse name as an expression +String name = (String) exp.getValue(tesla); +// name == "Nikola Tesla" + +exp = parser.parseExpression("name == 'Nikola Tesla'"); +boolean result = exp.getValue(tesla, Boolean.class); +// result == true +``` + +Kotlin + +``` +// Create and set a calendar +val c = GregorianCalendar() +c.set(1856, 7, 9) + +// The constructor arguments are name, birthday, and nationality. +val tesla = Inventor("Nikola Tesla", c.time, "Serbian") + +val parser = SpelExpressionParser() + +var exp = parser.parseExpression("name") // Parse name as an expression +val name = exp.getValue(tesla) as String +// name == "Nikola Tesla" + +exp = parser.parseExpression("name == 'Nikola Tesla'") +val result = exp.getValue(tesla, Boolean::class.java) +// result == true +``` + +#### 4.1.1. Understanding `EvaluationContext` + +The `EvaluationContext` interface is used when evaluating an expression to resolve +properties, methods, or fields and to help perform type conversion. Spring provides two +implementations. + +* `SimpleEvaluationContext`: Exposes a subset of essential SpEL language features and + configuration options, for categories of expressions that do not require the full extent + of the SpEL language syntax and should be meaningfully restricted. Examples include but + are not limited to data binding expressions and property-based filters. + +* `StandardEvaluationContext`: Exposes the full set of SpEL language features and + configuration options. You can use it to specify a default root object and to configure + every available evaluation-related strategy. + +`SimpleEvaluationContext` is designed to support only a subset of the SpEL language syntax. +It excludes Java type references, constructors, and bean references. It also requires +you to explicitly choose the level of support for properties and methods in expressions. +By default, the `create()` static factory method enables only read access to properties. +You can also obtain a builder to configure the exact level of support needed, targeting +one or some combination of the following: + +* Custom `PropertyAccessor` only (no reflection) + +* Data binding properties for read-only access + +* Data binding properties for read and write + +##### Type Conversion + +By default, SpEL uses the conversion service available in Spring core +(`org.springframework.core.convert.ConversionService`). This conversion service comes +with many built-in converters for common conversions but is also fully extensible so that +you can add custom conversions between types. Additionally, it is +generics-aware. This means that, when you work with generic types in +expressions, SpEL attempts conversions to maintain type correctness for any objects +it encounters. + +What does this mean in practice? Suppose assignment, using `setValue()`, is being used +to set a `List` property. The type of the property is actually `List`. SpEL +recognizes that the elements of the list need to be converted to `Boolean` before +being placed in it. The following example shows how to do so: + +Java + +``` +class Simple { + public List booleanList = new ArrayList(); +} + +Simple simple = new Simple(); +simple.booleanList.add(true); + +EvaluationContext context = SimpleEvaluationContext.forReadOnlyDataBinding().build(); + +// "false" is passed in here as a String. SpEL and the conversion service +// will recognize that it needs to be a Boolean and convert it accordingly. +parser.parseExpression("booleanList[0]").setValue(context, simple, "false"); + +// b is false +Boolean b = simple.booleanList.get(0); +``` + +Kotlin + +``` +class Simple { + var booleanList: MutableList = ArrayList() +} + +val simple = Simple() +simple.booleanList.add(true) + +val context = SimpleEvaluationContext.forReadOnlyDataBinding().build() + +// "false" is passed in here as a String. SpEL and the conversion service +// will recognize that it needs to be a Boolean and convert it accordingly. +parser.parseExpression("booleanList[0]").setValue(context, simple, "false") + +// b is false +val b = simple.booleanList[0] +``` + +#### 4.1.2. Parser Configuration + +It is possible to configure the SpEL expression parser by using a parser configuration +object (`org.springframework.expression.spel.SpelParserConfiguration`). The configuration +object controls the behavior of some of the expression components. For example, if you +index into an array or collection and the element at the specified index is `null`, SpEL +can automatically create the element. This is useful when using expressions made up of a +chain of property references. If you index into an array or list and specify an index +that is beyond the end of the current size of the array or list, SpEL can automatically +grow the array or list to accommodate that index. In order to add an element at the +specified index, SpEL will try to create the element using the element type’s default +constructor before setting the specified value. If the element type does not have a +default constructor, `null` will be added to the array or list. If there is no built-in +or custom converter that knows how to set the value, `null` will remain in the array or +list at the specified index. The following example demonstrates how to automatically grow +the list: + +Java + +``` +class Demo { + public List list; +} + +// Turn on: +// - auto null reference initialization +// - auto collection growing +SpelParserConfiguration config = new SpelParserConfiguration(true, true); + +ExpressionParser parser = new SpelExpressionParser(config); + +Expression expression = parser.parseExpression("list[3]"); + +Demo demo = new Demo(); + +Object o = expression.getValue(demo); + +// demo.list will now be a real collection of 4 entries +// Each entry is a new empty String +``` + +Kotlin + +``` +class Demo { + var list: List? = null +} + +// Turn on: +// - auto null reference initialization +// - auto collection growing +val config = SpelParserConfiguration(true, true) + +val parser = SpelExpressionParser(config) + +val expression = parser.parseExpression("list[3]") + +val demo = Demo() + +val o = expression.getValue(demo) + +// demo.list will now be a real collection of 4 entries +// Each entry is a new empty String +``` + +#### 4.1.3. SpEL Compilation + +Spring Framework 4.1 includes a basic expression compiler. Expressions are usually +interpreted, which provides a lot of dynamic flexibility during evaluation but +does not provide optimum performance. For occasional expression usage, +this is fine, but, when used by other components such as Spring Integration, +performance can be very important, and there is no real need for the dynamism. + +The SpEL compiler is intended to address this need. During evaluation, the compiler +generates a Java class that embodies the expression behavior at runtime and uses that +class to achieve much faster expression evaluation. Due to the lack of typing around +expressions, the compiler uses information gathered during the interpreted evaluations +of an expression when performing compilation. For example, it does not know the type +of a property reference purely from the expression, but during the first interpreted +evaluation, it finds out what it is. Of course, basing compilation on such derived +information can cause trouble later if the types of the various expression elements +change over time. For this reason, compilation is best suited to expressions whose +type information is not going to change on repeated evaluations. + +Consider the following basic expression: + +``` +someArray[0].someProperty.someOtherProperty < 0.1 +``` + +Because the preceding expression involves array access, some property de-referencing, +and numeric operations, the performance gain can be very noticeable. In an example +micro benchmark run of 50000 iterations, it took 75ms to evaluate by using the +interpreter and only 3ms using the compiled version of the expression. + +##### Compiler Configuration + +The compiler is not turned on by default, but you can turn it on in either of two +different ways. You can turn it on by using the parser configuration process +([discussed earlier](#expressions-parser-configuration)) or by using a Spring property +when SpEL usage is embedded inside another component. This section discusses both of +these options. + +The compiler can operate in one of three modes, which are captured in the`org.springframework.expression.spel.SpelCompilerMode` enum. The modes are as follows: + +* `OFF` (default): The compiler is switched off. + +* `IMMEDIATE`: In immediate mode, the expressions are compiled as soon as possible. This + is typically after the first interpreted evaluation. If the compiled expression fails + (typically due to a type changing, as described earlier), the caller of the expression + evaluation receives an exception. + +* `MIXED`: In mixed mode, the expressions silently switch between interpreted and compiled + mode over time. After some number of interpreted runs, they switch to compiled + form and, if something goes wrong with the compiled form (such as a type changing, as + described earlier), the expression automatically switches back to interpreted form + again. Sometime later, it may generate another compiled form and switch to it. Basically, + the exception that the user gets in `IMMEDIATE` mode is instead handled internally. + +`IMMEDIATE` mode exists because `MIXED` mode could cause issues for expressions that +have side effects. If a compiled expression blows up after partially succeeding, it +may have already done something that has affected the state of the system. If this +has happened, the caller may not want it to silently re-run in interpreted mode, +since part of the expression may be running twice. + +After selecting a mode, use the `SpelParserConfiguration` to configure the parser. The +following example shows how to do so: + +Java + +``` +SpelParserConfiguration config = new SpelParserConfiguration(SpelCompilerMode.IMMEDIATE, + this.getClass().getClassLoader()); + +SpelExpressionParser parser = new SpelExpressionParser(config); + +Expression expr = parser.parseExpression("payload"); + +MyMessage message = new MyMessage(); + +Object payload = expr.getValue(message); +``` + +Kotlin + +``` +val config = SpelParserConfiguration(SpelCompilerMode.IMMEDIATE, + this.javaClass.classLoader) + +val parser = SpelExpressionParser(config) + +val expr = parser.parseExpression("payload") + +val message = MyMessage() + +val payload = expr.getValue(message) +``` + +When you specify the compiler mode, you can also specify a classloader (passing null is allowed). +Compiled expressions are defined in a child classloader created under any that is supplied. +It is important to ensure that, if a classloader is specified, it can see all the types involved in +the expression evaluation process. If you do not specify a classloader, a default classloader is used +(typically the context classloader for the thread that is running during expression evaluation). + +The second way to configure the compiler is for use when SpEL is embedded inside some +other component and it may not be possible to configure it through a configuration +object. In these cases, it is possible to set the `spring.expression.compiler.mode`property via a JVM system property (or via the[`SpringProperties`](appendix.html#appendix-spring-properties) mechanism) to one of the`SpelCompilerMode` enum values (`off`, `immediate`, or `mixed`). + +##### Compiler Limitations + +Since Spring Framework 4.1, the basic compilation framework is in place. However, the framework +does not yet support compiling every kind of expression. The initial focus has been on the +common expressions that are likely to be used in performance-critical contexts. The following +kinds of expression cannot be compiled at the moment: + +* Expressions involving assignment + +* Expressions relying on the conversion service + +* Expressions using custom resolvers or accessors + +* Expressions using selection or projection + +More types of expressions will be compilable in the future. + +### 4.2. Expressions in Bean Definitions + +You can use SpEL expressions with XML-based or annotation-based configuration metadata for +defining `BeanDefinition` instances. In both cases, the syntax to define the expression is of the +form `#{ }`. + +#### 4.2.1. XML Configuration + +A property or constructor argument value can be set by using expressions, as the following +example shows: + +``` + + + + + +``` + +All beans in the application context are available as predefined variables with their +common bean name. This includes standard context beans such as `environment` (of type`org.springframework.core.env.Environment`) as well as `systemProperties` and`systemEnvironment` (of type `Map`) for access to the runtime environment. + +The following example shows access to the `systemProperties` bean as a SpEL variable: + +``` + + + + + +``` + +Note that you do not have to prefix the predefined variable with the `#` symbol here. + +You can also refer to other bean properties by name, as the following example shows: + +``` + + + + + + + + + + + +``` + +#### 4.2.2. Annotation Configuration + +To specify a default value, you can place the `@Value` annotation on fields, methods, +and method or constructor parameters. + +The following example sets the default value of a field: + +Java + +``` +public class FieldValueTestBean { + + @Value("#{ systemProperties['user.region'] }") + private String defaultLocale; + + public void setDefaultLocale(String defaultLocale) { + this.defaultLocale = defaultLocale; + } + + public String getDefaultLocale() { + return this.defaultLocale; + } +} +``` + +Kotlin + +``` +class FieldValueTestBean { + + @Value("#{ systemProperties['user.region'] }") + var defaultLocale: String? = null +} +``` + +The following example shows the equivalent but on a property setter method: + +Java + +``` +public class PropertyValueTestBean { + + private String defaultLocale; + + @Value("#{ systemProperties['user.region'] }") + public void setDefaultLocale(String defaultLocale) { + this.defaultLocale = defaultLocale; + } + + public String getDefaultLocale() { + return this.defaultLocale; + } +} +``` + +Kotlin + +``` +class PropertyValueTestBean { + + @Value("#{ systemProperties['user.region'] }") + var defaultLocale: String? = null +} +``` + +Autowired methods and constructors can also use the `@Value` annotation, as the following +examples show: + +Java + +``` +public class SimpleMovieLister { + + private MovieFinder movieFinder; + private String defaultLocale; + + @Autowired + public void configure(MovieFinder movieFinder, + @Value("#{ systemProperties['user.region'] }") String defaultLocale) { + this.movieFinder = movieFinder; + this.defaultLocale = defaultLocale; + } + + // ... +} +``` + +Kotlin + +``` +class SimpleMovieLister { + + private lateinit var movieFinder: MovieFinder + private lateinit var defaultLocale: String + + @Autowired + fun configure(movieFinder: MovieFinder, + @Value("#{ systemProperties['user.region'] }") defaultLocale: String) { + this.movieFinder = movieFinder + this.defaultLocale = defaultLocale + } + + // ... +} +``` + +Java + +``` +public class MovieRecommender { + + private String defaultLocale; + + private CustomerPreferenceDao customerPreferenceDao; + + public MovieRecommender(CustomerPreferenceDao customerPreferenceDao, + @Value("#{systemProperties['user.country']}") String defaultLocale) { + this.customerPreferenceDao = customerPreferenceDao; + this.defaultLocale = defaultLocale; + } + + // ... +} +``` + +Kotlin + +``` +class MovieRecommender(private val customerPreferenceDao: CustomerPreferenceDao, + @Value("#{systemProperties['user.country']}") private val defaultLocale: String) { + // ... +} +``` + +### 4.3. Language Reference + +This section describes how the Spring Expression Language works. It covers the following +topics: + +* [Literal Expressions](#expressions-ref-literal) + +* [Properties, Arrays, Lists, Maps, and Indexers](#expressions-properties-arrays) + +* [Inline Lists](#expressions-inline-lists) + +* [Inline Maps](#expressions-inline-maps) + +* [Array Construction](#expressions-array-construction) + +* [Methods](#expressions-methods) + +* [Operators](#expressions-operators) + +* [Types](#expressions-types) + +* [Constructors](#expressions-constructors) + +* [Variables](#expressions-ref-variables) + +* [Functions](#expressions-ref-functions) + +* [Bean References](#expressions-bean-references) + +* [Ternary Operator (If-Then-Else)](#expressions-operator-ternary) + +* [The Elvis Operator](#expressions-operator-elvis) + +* [Safe Navigation Operator](#expressions-operator-safe-navigation) + +#### 4.3.1. Literal Expressions + +The types of literal expressions supported are strings, numeric values (int, real, hex), +boolean, and null. Strings are delimited by single quotation marks. To put a single quotation mark itself +in a string, use two single quotation mark characters. + +The following listing shows simple usage of literals. Typically, they are not used +in isolation like this but, rather, as part of a more complex expression — for example, +using a literal on one side of a logical comparison operator. + +Java + +``` +ExpressionParser parser = new SpelExpressionParser(); + +// evals to "Hello World" +String helloWorld = (String) parser.parseExpression("'Hello World'").getValue(); + +double avogadrosNumber = (Double) parser.parseExpression("6.0221415E+23").getValue(); + +// evals to 2147483647 +int maxValue = (Integer) parser.parseExpression("0x7FFFFFFF").getValue(); + +boolean trueValue = (Boolean) parser.parseExpression("true").getValue(); + +Object nullValue = parser.parseExpression("null").getValue(); +``` + +Kotlin + +``` +val parser = SpelExpressionParser() + +// evals to "Hello World" +val helloWorld = parser.parseExpression("'Hello World'").value as String + +val avogadrosNumber = parser.parseExpression("6.0221415E+23").value as Double + +// evals to 2147483647 +val maxValue = parser.parseExpression("0x7FFFFFFF").value as Int + +val trueValue = parser.parseExpression("true").value as Boolean + +val nullValue = parser.parseExpression("null").value +``` + +Numbers support the use of the negative sign, exponential notation, and decimal points. +By default, real numbers are parsed by using `Double.parseDouble()`. + +#### 4.3.2. Properties, Arrays, Lists, Maps, and Indexers + +Navigating with property references is easy. To do so, use a period to indicate a nested +property value. The instances of the `Inventor` class, `pupin` and `tesla`, were +populated with data listed in the [Classes used in the +examples](#expressions-example-classes) section. To navigate "down" the object graph and get Tesla’s year of birth and +Pupin’s city of birth, we use the following expressions: + +Java + +``` +// evals to 1856 +int year = (Integer) parser.parseExpression("birthdate.year + 1900").getValue(context); + +String city = (String) parser.parseExpression("placeOfBirth.city").getValue(context); +``` + +Kotlin + +``` +// evals to 1856 +val year = parser.parseExpression("birthdate.year + 1900").getValue(context) as Int + +val city = parser.parseExpression("placeOfBirth.city").getValue(context) as String +``` + +| |Case insensitivity is allowed for the first letter of property names. Thus, the
expressions in the above example may be written as `Birthdate.Year + 1900` and`PlaceOfBirth.City`, respectively. In addition, properties may optionally be accessed via
method invocations — for example, `getPlaceOfBirth().getCity()` instead of`placeOfBirth.city`.| +|---|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +The contents of arrays and lists are obtained by using square bracket notation, as the +following example shows: + +Java + +``` +ExpressionParser parser = new SpelExpressionParser(); +EvaluationContext context = SimpleEvaluationContext.forReadOnlyDataBinding().build(); + +// Inventions Array + +// evaluates to "Induction motor" +String invention = parser.parseExpression("inventions[3]").getValue( + context, tesla, String.class); + +// Members List + +// evaluates to "Nikola Tesla" +String name = parser.parseExpression("members[0].name").getValue( + context, ieee, String.class); + +// List and Array navigation +// evaluates to "Wireless communication" +String invention = parser.parseExpression("members[0].inventions[6]").getValue( + context, ieee, String.class); +``` + +Kotlin + +``` +val parser = SpelExpressionParser() +val context = SimpleEvaluationContext.forReadOnlyDataBinding().build() + +// Inventions Array + +// evaluates to "Induction motor" +val invention = parser.parseExpression("inventions[3]").getValue( + context, tesla, String::class.java) + +// Members List + +// evaluates to "Nikola Tesla" +val name = parser.parseExpression("members[0].name").getValue( + context, ieee, String::class.java) + +// List and Array navigation +// evaluates to "Wireless communication" +val invention = parser.parseExpression("members[0].inventions[6]").getValue( + context, ieee, String::class.java) +``` + +The contents of maps are obtained by specifying the literal key value within the +brackets. In the following example, because keys for the `officers` map are strings, we can specify +string literals: + +Java + +``` +// Officer's Dictionary + +Inventor pupin = parser.parseExpression("officers['president']").getValue( + societyContext, Inventor.class); + +// evaluates to "Idvor" +String city = parser.parseExpression("officers['president'].placeOfBirth.city").getValue( + societyContext, String.class); + +// setting values +parser.parseExpression("officers['advisors'][0].placeOfBirth.country").setValue( + societyContext, "Croatia"); +``` + +Kotlin + +``` +// Officer's Dictionary + +val pupin = parser.parseExpression("officers['president']").getValue( + societyContext, Inventor::class.java) + +// evaluates to "Idvor" +val city = parser.parseExpression("officers['president'].placeOfBirth.city").getValue( + societyContext, String::class.java) + +// setting values +parser.parseExpression("officers['advisors'][0].placeOfBirth.country").setValue( + societyContext, "Croatia") +``` + +#### 4.3.3. Inline Lists + +You can directly express lists in an expression by using `{}` notation. + +Java + +``` +// evaluates to a Java list containing the four numbers +List numbers = (List) parser.parseExpression("{1,2,3,4}").getValue(context); + +List listOfLists = (List) parser.parseExpression("{{'a','b'},{'x','y'}}").getValue(context); +``` + +Kotlin + +``` +// evaluates to a Java list containing the four numbers +val numbers = parser.parseExpression("{1,2,3,4}").getValue(context) as List<*> + +val listOfLists = parser.parseExpression("{{'a','b'},{'x','y'}}").getValue(context) as List<*> +``` + +`{}` by itself means an empty list. For performance reasons, if the list is itself +entirely composed of fixed literals, a constant list is created to represent the +expression (rather than building a new list on each evaluation). + +#### 4.3.4. Inline Maps + +You can also directly express maps in an expression by using `{key:value}` notation. The +following example shows how to do so: + +Java + +``` +// evaluates to a Java map containing the two entries +Map inventorInfo = (Map) parser.parseExpression("{name:'Nikola',dob:'10-July-1856'}").getValue(context); + +Map mapOfMaps = (Map) parser.parseExpression("{name:{first:'Nikola',last:'Tesla'},dob:{day:10,month:'July',year:1856}}").getValue(context); +``` + +Kotlin + +``` +// evaluates to a Java map containing the two entries +val inventorInfo = parser.parseExpression("{name:'Nikola',dob:'10-July-1856'}").getValue(context) as Map<*, *> + +val mapOfMaps = parser.parseExpression("{name:{first:'Nikola',last:'Tesla'},dob:{day:10,month:'July',year:1856}}").getValue(context) as Map<*, *> +``` + +`{:}` by itself means an empty map. For performance reasons, if the map is itself +composed of fixed literals or other nested constant structures (lists or maps), a +constant map is created to represent the expression (rather than building a new map on +each evaluation). Quoting of the map keys is optional (unless the key contains a period +(`.`)). The examples above do not use quoted keys. + +#### 4.3.5. Array Construction + +You can build arrays by using the familiar Java syntax, optionally supplying an initializer +to have the array populated at construction time. The following example shows how to do so: + +Java + +``` +int[] numbers1 = (int[]) parser.parseExpression("new int[4]").getValue(context); + +// Array with initializer +int[] numbers2 = (int[]) parser.parseExpression("new int[]{1,2,3}").getValue(context); + +// Multi dimensional array +int[][] numbers3 = (int[][]) parser.parseExpression("new int[4][5]").getValue(context); +``` + +Kotlin + +``` +val numbers1 = parser.parseExpression("new int[4]").getValue(context) as IntArray + +// Array with initializer +val numbers2 = parser.parseExpression("new int[]{1,2,3}").getValue(context) as IntArray + +// Multi dimensional array +val numbers3 = parser.parseExpression("new int[4][5]").getValue(context) as Array +``` + +You cannot currently supply an initializer when you construct a multi-dimensional array. + +#### 4.3.6. Methods + +You can invoke methods by using typical Java programming syntax. You can also invoke methods +on literals. Variable arguments are also supported. The following examples show how to +invoke methods: + +Java + +``` +// string literal, evaluates to "bc" +String bc = parser.parseExpression("'abc'.substring(1, 3)").getValue(String.class); + +// evaluates to true +boolean isMember = parser.parseExpression("isMember('Mihajlo Pupin')").getValue( + societyContext, Boolean.class); +``` + +Kotlin + +``` +// string literal, evaluates to "bc" +val bc = parser.parseExpression("'abc'.substring(1, 3)").getValue(String::class.java) + +// evaluates to true +val isMember = parser.parseExpression("isMember('Mihajlo Pupin')").getValue( + societyContext, Boolean::class.java) +``` + +#### 4.3.7. Operators + +The Spring Expression Language supports the following kinds of operators: + +* [Relational Operators](#expressions-operators-relational) + +* [Logical Operators](#expressions-operators-logical) + +* [Mathematical Operators](#expressions-operators-mathematical) + +* [The Assignment Operator](#expressions-assignment) + +##### Relational Operators + +The relational operators (equal, not equal, less than, less than or equal, greater than, +and greater than or equal) are supported by using standard operator notation. The +following listing shows a few examples of operators: + +Java + +``` +// evaluates to true +boolean trueValue = parser.parseExpression("2 == 2").getValue(Boolean.class); + +// evaluates to false +boolean falseValue = parser.parseExpression("2 < -5.0").getValue(Boolean.class); + +// evaluates to true +boolean trueValue = parser.parseExpression("'black' < 'block'").getValue(Boolean.class); +``` + +Kotlin + +``` +// evaluates to true +val trueValue = parser.parseExpression("2 == 2").getValue(Boolean::class.java) + +// evaluates to false +val falseValue = parser.parseExpression("2 < -5.0").getValue(Boolean::class.java) + +// evaluates to true +val trueValue = parser.parseExpression("'black' < 'block'").getValue(Boolean::class.java) +``` + +| |Greater-than and less-than comparisons against `null` follow a simple rule: `null` is treated as
nothing (that is NOT as zero). As a consequence, any other value is always greater
than `null` (`X > null` is always `true`) and no other value is ever less than nothing
(`X < null` is always `false`).

If you prefer numeric comparisons instead, avoid number-based `null` comparisons
in favor of comparisons against zero (for example, `X > 0` or `X < 0`).| +|---|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +In addition to the standard relational operators, SpEL supports the `instanceof` and regular +expression-based `matches` operator. The following listing shows examples of both: + +Java + +``` +// evaluates to false +boolean falseValue = parser.parseExpression( + "'xyz' instanceof T(Integer)").getValue(Boolean.class); + +// evaluates to true +boolean trueValue = parser.parseExpression( + "'5.00' matches '^-?\\d+(\\.\\d{2})?$'").getValue(Boolean.class); + +// evaluates to false +boolean falseValue = parser.parseExpression( + "'5.0067' matches '^-?\\d+(\\.\\d{2})?$'").getValue(Boolean.class); +``` + +Kotlin + +``` +// evaluates to false +val falseValue = parser.parseExpression( + "'xyz' instanceof T(Integer)").getValue(Boolean::class.java) + +// evaluates to true +val trueValue = parser.parseExpression( + "'5.00' matches '^-?\\d+(\\.\\d{2})?$'").getValue(Boolean::class.java) + +// evaluates to false +val falseValue = parser.parseExpression( + "'5.0067' matches '^-?\\d+(\\.\\d{2})?$'").getValue(Boolean::class.java) +``` + +| |Be careful with primitive types, as they are immediately boxed up to their
wrapper types. For example, `1 instanceof T(int)` evaluates to `false`, while`1 instanceof T(Integer)` evaluates to `true`, as expected.| +|---|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +Each symbolic operator can also be specified as a purely alphabetic equivalent. This +avoids problems where the symbols used have special meaning for the document type in +which the expression is embedded (such as in an XML document). The textual equivalents are: + +* `lt` (`<`) + +* `gt` (`>`) + +* `le` (`<=`) + +* `ge` (`>=`) + +* `eq` (`==`) + +* `ne` (`!=`) + +* `div` (`/`) + +* `mod` (`%`) + +* `not` (`!`). + +All of the textual operators are case-insensitive. + +##### Logical Operators + +SpEL supports the following logical operators: + +* `and` (`&&`) + +* `or` (`||`) + +* `not` (`!`) + +The following example shows how to use the logical operators: + +Java + +``` +// -- AND -- + +// evaluates to false +boolean falseValue = parser.parseExpression("true and false").getValue(Boolean.class); + +// evaluates to true +String expression = "isMember('Nikola Tesla') and isMember('Mihajlo Pupin')"; +boolean trueValue = parser.parseExpression(expression).getValue(societyContext, Boolean.class); + +// -- OR -- + +// evaluates to true +boolean trueValue = parser.parseExpression("true or false").getValue(Boolean.class); + +// evaluates to true +String expression = "isMember('Nikola Tesla') or isMember('Albert Einstein')"; +boolean trueValue = parser.parseExpression(expression).getValue(societyContext, Boolean.class); + +// -- NOT -- + +// evaluates to false +boolean falseValue = parser.parseExpression("!true").getValue(Boolean.class); + +// -- AND and NOT -- +String expression = "isMember('Nikola Tesla') and !isMember('Mihajlo Pupin')"; +boolean falseValue = parser.parseExpression(expression).getValue(societyContext, Boolean.class); +``` + +Kotlin + +``` +// -- AND -- + +// evaluates to false +val falseValue = parser.parseExpression("true and false").getValue(Boolean::class.java) + +// evaluates to true +val expression = "isMember('Nikola Tesla') and isMember('Mihajlo Pupin')" +val trueValue = parser.parseExpression(expression).getValue(societyContext, Boolean::class.java) + +// -- OR -- + +// evaluates to true +val trueValue = parser.parseExpression("true or false").getValue(Boolean::class.java) + +// evaluates to true +val expression = "isMember('Nikola Tesla') or isMember('Albert Einstein')" +val trueValue = parser.parseExpression(expression).getValue(societyContext, Boolean::class.java) + +// -- NOT -- + +// evaluates to false +val falseValue = parser.parseExpression("!true").getValue(Boolean::class.java) + +// -- AND and NOT -- +val expression = "isMember('Nikola Tesla') and !isMember('Mihajlo Pupin')" +val falseValue = parser.parseExpression(expression).getValue(societyContext, Boolean::class.java) +``` + +##### Mathematical Operators + +You can use the addition operator (`+`) on both numbers and strings. You can use the +subtraction (`-`), multiplication (`*`), and division (`/`) operators only on numbers. +You can also use the modulus (`%`) and exponential power (`^`) operators on numbers. +Standard operator precedence is enforced. The following example shows the mathematical +operators in use: + +Java + +``` +// Addition +int two = parser.parseExpression("1 + 1").getValue(Integer.class); // 2 + +String testString = parser.parseExpression( + "'test' + ' ' + 'string'").getValue(String.class); // 'test string' + +// Subtraction +int four = parser.parseExpression("1 - -3").getValue(Integer.class); // 4 + +double d = parser.parseExpression("1000.00 - 1e4").getValue(Double.class); // -9000 + +// Multiplication +int six = parser.parseExpression("-2 * -3").getValue(Integer.class); // 6 + +double twentyFour = parser.parseExpression("2.0 * 3e0 * 4").getValue(Double.class); // 24.0 + +// Division +int minusTwo = parser.parseExpression("6 / -3").getValue(Integer.class); // -2 + +double one = parser.parseExpression("8.0 / 4e0 / 2").getValue(Double.class); // 1.0 + +// Modulus +int three = parser.parseExpression("7 % 4").getValue(Integer.class); // 3 + +int one = parser.parseExpression("8 / 5 % 2").getValue(Integer.class); // 1 + +// Operator precedence +int minusTwentyOne = parser.parseExpression("1+2-3*8").getValue(Integer.class); // -21 +``` + +Kotlin + +``` +// Addition +val two = parser.parseExpression("1 + 1").getValue(Int::class.java) // 2 + +val testString = parser.parseExpression( + "'test' + ' ' + 'string'").getValue(String::class.java) // 'test string' + +// Subtraction +val four = parser.parseExpression("1 - -3").getValue(Int::class.java) // 4 + +val d = parser.parseExpression("1000.00 - 1e4").getValue(Double::class.java) // -9000 + +// Multiplication +val six = parser.parseExpression("-2 * -3").getValue(Int::class.java) // 6 + +val twentyFour = parser.parseExpression("2.0 * 3e0 * 4").getValue(Double::class.java) // 24.0 + +// Division +val minusTwo = parser.parseExpression("6 / -3").getValue(Int::class.java) // -2 + +val one = parser.parseExpression("8.0 / 4e0 / 2").getValue(Double::class.java) // 1.0 + +// Modulus +val three = parser.parseExpression("7 % 4").getValue(Int::class.java) // 3 + +val one = parser.parseExpression("8 / 5 % 2").getValue(Int::class.java) // 1 + +// Operator precedence +val minusTwentyOne = parser.parseExpression("1+2-3*8").getValue(Int::class.java) // -21 +``` + +##### The Assignment Operator + +To set a property, use the assignment operator (`=`). This is typically done within a +call to `setValue` but can also be done inside a call to `getValue`. The following +listing shows both ways to use the assignment operator: + +Java + +``` +Inventor inventor = new Inventor(); +EvaluationContext context = SimpleEvaluationContext.forReadWriteDataBinding().build(); + +parser.parseExpression("name").setValue(context, inventor, "Aleksandar Seovic"); + +// alternatively +String aleks = parser.parseExpression( + "name = 'Aleksandar Seovic'").getValue(context, inventor, String.class); +``` + +Kotlin + +``` +val inventor = Inventor() +val context = SimpleEvaluationContext.forReadWriteDataBinding().build() + +parser.parseExpression("name").setValue(context, inventor, "Aleksandar Seovic") + +// alternatively +val aleks = parser.parseExpression( + "name = 'Aleksandar Seovic'").getValue(context, inventor, String::class.java) +``` + +#### 4.3.8. Types + +You can use the special `T` operator to specify an instance of `java.lang.Class` (the +type). Static methods are invoked by using this operator as well. The`StandardEvaluationContext` uses a `TypeLocator` to find types, and the`StandardTypeLocator` (which can be replaced) is built with an understanding of the`java.lang` package. This means that `T()` references to types within the `java.lang`package do not need to be fully qualified, but all other type references must be. The +following example shows how to use the `T` operator: + +Java + +``` +Class dateClass = parser.parseExpression("T(java.util.Date)").getValue(Class.class); + +Class stringClass = parser.parseExpression("T(String)").getValue(Class.class); + +boolean trueValue = parser.parseExpression( + "T(java.math.RoundingMode).CEILING < T(java.math.RoundingMode).FLOOR") + .getValue(Boolean.class); +``` + +Kotlin + +``` +val dateClass = parser.parseExpression("T(java.util.Date)").getValue(Class::class.java) + +val stringClass = parser.parseExpression("T(String)").getValue(Class::class.java) + +val trueValue = parser.parseExpression( + "T(java.math.RoundingMode).CEILING < T(java.math.RoundingMode).FLOOR") + .getValue(Boolean::class.java) +``` + +#### 4.3.9. Constructors + +You can invoke constructors by using the `new` operator. You should use the fully +qualified class name for all types except those located in the `java.lang` package +(`Integer`, `Float`, `String`, and so on). The following example shows how to use the`new` operator to invoke constructors: + +Java + +``` +Inventor einstein = p.parseExpression( + "new org.spring.samples.spel.inventor.Inventor('Albert Einstein', 'German')") + .getValue(Inventor.class); + +// create new Inventor instance within the add() method of List +p.parseExpression( + "Members.add(new org.spring.samples.spel.inventor.Inventor( + 'Albert Einstein', 'German'))").getValue(societyContext); +``` + +Kotlin + +``` +val einstein = p.parseExpression( + "new org.spring.samples.spel.inventor.Inventor('Albert Einstein', 'German')") + .getValue(Inventor::class.java) + +// create new Inventor instance within the add() method of List +p.parseExpression( + "Members.add(new org.spring.samples.spel.inventor.Inventor('Albert Einstein', 'German'))") + .getValue(societyContext) +``` + +#### 4.3.10. Variables + +You can reference variables in the expression by using the `#variableName` syntax. Variables +are set by using the `setVariable` method on `EvaluationContext` implementations. + +| |Valid variable names must be composed of one or more of the following supported
characters.

* letters: `A` to `Z` and `a` to `z`

* digits: `0` to `9`

* underscore: `_`

* dollar sign: `$`| +|---|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +The following example shows how to use variables. + +Java + +``` +Inventor tesla = new Inventor("Nikola Tesla", "Serbian"); + +EvaluationContext context = SimpleEvaluationContext.forReadWriteDataBinding().build(); +context.setVariable("newName", "Mike Tesla"); + +parser.parseExpression("name = #newName").getValue(context, tesla); +System.out.println(tesla.getName()) // "Mike Tesla" +``` + +Kotlin + +``` +val tesla = Inventor("Nikola Tesla", "Serbian") + +val context = SimpleEvaluationContext.forReadWriteDataBinding().build() +context.setVariable("newName", "Mike Tesla") + +parser.parseExpression("name = #newName").getValue(context, tesla) +println(tesla.name) // "Mike Tesla" +``` + +##### The `#this` and `#root` Variables + +The `#this` variable is always defined and refers to the current evaluation object +(against which unqualified references are resolved). The `#root` variable is always +defined and refers to the root context object. Although `#this` may vary as components of +an expression are evaluated, `#root` always refers to the root. The following examples +show how to use the `#this` and `#root` variables: + +Java + +``` +// create an array of integers +List primes = new ArrayList(); +primes.addAll(Arrays.asList(2,3,5,7,11,13,17)); + +// create parser and set variable 'primes' as the array of integers +ExpressionParser parser = new SpelExpressionParser(); +EvaluationContext context = SimpleEvaluationContext.forReadOnlyDataAccess(); +context.setVariable("primes", primes); + +// all prime numbers > 10 from the list (using selection ?{...}) +// evaluates to [11, 13, 17] +List primesGreaterThanTen = (List) parser.parseExpression( + "#primes.?[#this>10]").getValue(context); +``` + +Kotlin + +``` +// create an array of integers +val primes = ArrayList() +primes.addAll(listOf(2, 3, 5, 7, 11, 13, 17)) + +// create parser and set variable 'primes' as the array of integers +val parser = SpelExpressionParser() +val context = SimpleEvaluationContext.forReadOnlyDataAccess() +context.setVariable("primes", primes) + +// all prime numbers > 10 from the list (using selection ?{...}) +// evaluates to [11, 13, 17] +val primesGreaterThanTen = parser.parseExpression( + "#primes.?[#this>10]").getValue(context) as List +``` + +#### 4.3.11. Functions + +You can extend SpEL by registering user-defined functions that can be called within the +expression string. The function is registered through the `EvaluationContext`. The +following example shows how to register a user-defined function: + +Java + +``` +Method method = ...; + +EvaluationContext context = SimpleEvaluationContext.forReadOnlyDataBinding().build(); +context.setVariable("myFunction", method); +``` + +Kotlin + +``` +val method: Method = ... + +val context = SimpleEvaluationContext.forReadOnlyDataBinding().build() +context.setVariable("myFunction", method) +``` + +For example, consider the following utility method that reverses a string: + +Java + +``` +public abstract class StringUtils { + + public static String reverseString(String input) { + StringBuilder backwards = new StringBuilder(input.length()); + for (int i = 0; i < input.length(); i++) { + backwards.append(input.charAt(input.length() - 1 - i)); + } + return backwards.toString(); + } +} +``` + +Kotlin + +``` +fun reverseString(input: String): String { + val backwards = StringBuilder(input.length) + for (i in 0 until input.length) { + backwards.append(input[input.length - 1 - i]) + } + return backwards.toString() +} +``` + +You can then register and use the preceding method, as the following example shows: + +Java + +``` +ExpressionParser parser = new SpelExpressionParser(); + +EvaluationContext context = SimpleEvaluationContext.forReadOnlyDataBinding().build(); +context.setVariable("reverseString", + StringUtils.class.getDeclaredMethod("reverseString", String.class)); + +String helloWorldReversed = parser.parseExpression( + "#reverseString('hello')").getValue(context, String.class); +``` + +Kotlin + +``` +val parser = SpelExpressionParser() + +val context = SimpleEvaluationContext.forReadOnlyDataBinding().build() +context.setVariable("reverseString", ::reverseString::javaMethod) + +val helloWorldReversed = parser.parseExpression( + "#reverseString('hello')").getValue(context, String::class.java) +``` + +#### 4.3.12. Bean References + +If the evaluation context has been configured with a bean resolver, you can +look up beans from an expression by using the `@` symbol. The following example shows how +to do so: + +Java + +``` +ExpressionParser parser = new SpelExpressionParser(); +StandardEvaluationContext context = new StandardEvaluationContext(); +context.setBeanResolver(new MyBeanResolver()); + +// This will end up calling resolve(context,"something") on MyBeanResolver during evaluation +Object bean = parser.parseExpression("@something").getValue(context); +``` + +Kotlin + +``` +val parser = SpelExpressionParser() +val context = StandardEvaluationContext() +context.setBeanResolver(MyBeanResolver()) + +// This will end up calling resolve(context,"something") on MyBeanResolver during evaluation +val bean = parser.parseExpression("@something").getValue(context) +``` + +To access a factory bean itself, you should instead prefix the bean name with an `&` symbol. +The following example shows how to do so: + +Java + +``` +ExpressionParser parser = new SpelExpressionParser(); +StandardEvaluationContext context = new StandardEvaluationContext(); +context.setBeanResolver(new MyBeanResolver()); + +// This will end up calling resolve(context,"&foo") on MyBeanResolver during evaluation +Object bean = parser.parseExpression("&foo").getValue(context); +``` + +Kotlin + +``` +val parser = SpelExpressionParser() +val context = StandardEvaluationContext() +context.setBeanResolver(MyBeanResolver()) + +// This will end up calling resolve(context,"&foo") on MyBeanResolver during evaluation +val bean = parser.parseExpression("&foo").getValue(context) +``` + +#### 4.3.13. Ternary Operator (If-Then-Else) + +You can use the ternary operator for performing if-then-else conditional logic inside +the expression. The following listing shows a minimal example: + +Java + +``` +String falseString = parser.parseExpression( + "false ? 'trueExp' : 'falseExp'").getValue(String.class); +``` + +Kotlin + +``` +val falseString = parser.parseExpression( + "false ? 'trueExp' : 'falseExp'").getValue(String::class.java) +``` + +In this case, the boolean `false` results in returning the string value `'falseExp'`. A more +realistic example follows: + +Java + +``` +parser.parseExpression("name").setValue(societyContext, "IEEE"); +societyContext.setVariable("queryName", "Nikola Tesla"); + +expression = "isMember(#queryName)? #queryName + ' is a member of the ' " + + "+ Name + ' Society' : #queryName + ' is not a member of the ' + Name + ' Society'"; + +String queryResultString = parser.parseExpression(expression) + .getValue(societyContext, String.class); +// queryResultString = "Nikola Tesla is a member of the IEEE Society" +``` + +Kotlin + +``` +parser.parseExpression("name").setValue(societyContext, "IEEE") +societyContext.setVariable("queryName", "Nikola Tesla") + +expression = "isMember(#queryName)? #queryName + ' is a member of the ' " + "+ Name + ' Society' : #queryName + ' is not a member of the ' + Name + ' Society'" + +val queryResultString = parser.parseExpression(expression) + .getValue(societyContext, String::class.java) +// queryResultString = "Nikola Tesla is a member of the IEEE Society" +``` + +See the next section on the Elvis operator for an even shorter syntax for the +ternary operator. + +#### 4.3.14. The Elvis Operator + +The Elvis operator is a shortening of the ternary operator syntax and is used in the[Groovy](http://www.groovy-lang.org/operators.html#_elvis_operator) language. +With the ternary operator syntax, you usually have to repeat a variable twice, as the +following example shows: + +``` +String name = "Elvis Presley"; +String displayName = (name != null ? name : "Unknown"); +``` + +Instead, you can use the Elvis operator (named for the resemblance to Elvis' hair style). +The following example shows how to use the Elvis operator: + +Java + +``` +ExpressionParser parser = new SpelExpressionParser(); + +String name = parser.parseExpression("name?:'Unknown'").getValue(new Inventor(), String.class); +System.out.println(name); // 'Unknown' +``` + +Kotlin + +``` +val parser = SpelExpressionParser() + +val name = parser.parseExpression("name?:'Unknown'").getValue(Inventor(), String::class.java) +println(name) // 'Unknown' +``` + +The following listing shows a more complex example: + +Java + +``` +ExpressionParser parser = new SpelExpressionParser(); +EvaluationContext context = SimpleEvaluationContext.forReadOnlyDataBinding().build(); + +Inventor tesla = new Inventor("Nikola Tesla", "Serbian"); +String name = parser.parseExpression("name?:'Elvis Presley'").getValue(context, tesla, String.class); +System.out.println(name); // Nikola Tesla + +tesla.setName(null); +name = parser.parseExpression("name?:'Elvis Presley'").getValue(context, tesla, String.class); +System.out.println(name); // Elvis Presley +``` + +Kotlin + +``` +val parser = SpelExpressionParser() +val context = SimpleEvaluationContext.forReadOnlyDataBinding().build() + +val tesla = Inventor("Nikola Tesla", "Serbian") +var name = parser.parseExpression("name?:'Elvis Presley'").getValue(context, tesla, String::class.java) +println(name) // Nikola Tesla + +tesla.setName(null) +name = parser.parseExpression("name?:'Elvis Presley'").getValue(context, tesla, String::class.java) +println(name) // Elvis Presley +``` + +| |You can use the Elvis operator to apply default values in expressions. The following
example shows how to use the Elvis operator in a `@Value` expression:

```
@Value("#{systemProperties['pop3.port'] ?: 25}")
```

This will inject a system property `pop3.port` if it is defined or 25 if not.| +|---|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +#### 4.3.15. Safe Navigation Operator + +The safe navigation operator is used to avoid a `NullPointerException` and comes from +the [Groovy](http://www.groovy-lang.org/operators.html#_safe_navigation_operator)language. Typically, when you have a reference to an object, you might need to verify that +it is not null before accessing methods or properties of the object. To avoid this, the +safe navigation operator returns null instead of throwing an exception. The following +example shows how to use the safe navigation operator: + +Java + +``` +ExpressionParser parser = new SpelExpressionParser(); +EvaluationContext context = SimpleEvaluationContext.forReadOnlyDataBinding().build(); + +Inventor tesla = new Inventor("Nikola Tesla", "Serbian"); +tesla.setPlaceOfBirth(new PlaceOfBirth("Smiljan")); + +String city = parser.parseExpression("placeOfBirth?.city").getValue(context, tesla, String.class); +System.out.println(city); // Smiljan + +tesla.setPlaceOfBirth(null); +city = parser.parseExpression("placeOfBirth?.city").getValue(context, tesla, String.class); +System.out.println(city); // null - does not throw NullPointerException!!! +``` + +Kotlin + +``` +val parser = SpelExpressionParser() +val context = SimpleEvaluationContext.forReadOnlyDataBinding().build() + +val tesla = Inventor("Nikola Tesla", "Serbian") +tesla.setPlaceOfBirth(PlaceOfBirth("Smiljan")) + +var city = parser.parseExpression("placeOfBirth?.city").getValue(context, tesla, String::class.java) +println(city) // Smiljan + +tesla.setPlaceOfBirth(null) +city = parser.parseExpression("placeOfBirth?.city").getValue(context, tesla, String::class.java) +println(city) // null - does not throw NullPointerException!!! +``` + +#### 4.3.16. Collection Selection + +Selection is a powerful expression language feature that lets you transform a +source collection into another collection by selecting from its entries. + +Selection uses a syntax of `.?[selectionExpression]`. It filters the collection and +returns a new collection that contains a subset of the original elements. For example, +selection lets us easily get a list of Serbian inventors, as the following example shows: + +Java + +``` +List list = (List) parser.parseExpression( + "members.?[nationality == 'Serbian']").getValue(societyContext); +``` + +Kotlin + +``` +val list = parser.parseExpression( + "members.?[nationality == 'Serbian']").getValue(societyContext) as List +``` + +Selection is supported for arrays and anything that implements `java.lang.Iterable` or`java.util.Map`. For a list or array, the selection criteria is evaluated against each +individual element. Against a map, the selection criteria is evaluated against each map +entry (objects of the Java type `Map.Entry`). Each map entry has its `key` and `value`accessible as properties for use in the selection. + +The following expression returns a new map that consists of those elements of the +original map where the entry’s value is less than 27: + +Java + +``` +Map newMap = parser.parseExpression("map.?[value<27]").getValue(); +``` + +Kotlin + +``` +val newMap = parser.parseExpression("map.?[value<27]").getValue() +``` + +In addition to returning all the selected elements, you can retrieve only the first or +the last element. To obtain the first element matching the selection, the syntax is`.^[selectionExpression]`. To obtain the last matching selection, the syntax is`.$[selectionExpression]`. + +#### 4.3.17. Collection Projection + +Projection lets a collection drive the evaluation of a sub-expression, and the result is +a new collection. The syntax for projection is `.![projectionExpression]`. For example, +suppose we have a list of inventors but want the list of cities where they were born. +Effectively, we want to evaluate 'placeOfBirth.city' for every entry in the inventor +list. The following example uses projection to do so: + +Java + +``` +// returns ['Smiljan', 'Idvor' ] +List placesOfBirth = (List)parser.parseExpression("members.![placeOfBirth.city]"); +``` + +Kotlin + +``` +// returns ['Smiljan', 'Idvor' ] +val placesOfBirth = parser.parseExpression("members.![placeOfBirth.city]") as List<*> +``` + +Projection is supported for arrays and anything that implements `java.lang.Iterable` or`java.util.Map`. When using a map to drive projection, the projection expression is +evaluated against each entry in the map (represented as a Java `Map.Entry`). The result +of a projection across a map is a list that consists of the evaluation of the projection +expression against each map entry. + +#### 4.3.18. Expression templating + +Expression templates allow mixing literal text with one or more evaluation blocks. +Each evaluation block is delimited with prefix and suffix characters that you can +define. A common choice is to use `#{ }` as the delimiters, as the following example +shows: + +Java + +``` +String randomPhrase = parser.parseExpression( + "random number is #{T(java.lang.Math).random()}", + new TemplateParserContext()).getValue(String.class); + +// evaluates to "random number is 0.7038186818312008" +``` + +Kotlin + +``` +val randomPhrase = parser.parseExpression( + "random number is #{T(java.lang.Math).random()}", + TemplateParserContext()).getValue(String::class.java) + +// evaluates to "random number is 0.7038186818312008" +``` + +The string is evaluated by concatenating the literal text `'random number is '` with the +result of evaluating the expression inside the `#{ }` delimiter (in this case, the result +of calling that `random()` method). The second argument to the `parseExpression()` method +is of the type `ParserContext`. The `ParserContext` interface is used to influence how +the expression is parsed in order to support the expression templating functionality. +The definition of `TemplateParserContext` follows: + +Java + +``` +public class TemplateParserContext implements ParserContext { + + public String getExpressionPrefix() { + return "#{"; + } + + public String getExpressionSuffix() { + return "}"; + } + + public boolean isTemplate() { + return true; + } +} +``` + +Kotlin + +``` +class TemplateParserContext : ParserContext { + + override fun getExpressionPrefix(): String { + return "#{" + } + + override fun getExpressionSuffix(): String { + return "}" + } + + override fun isTemplate(): Boolean { + return true + } +} +``` + +### 4.4. Classes Used in the Examples + +This section lists the classes used in the examples throughout this chapter. + +Inventor.Java + +``` +package org.spring.samples.spel.inventor; + +import java.util.Date; +import java.util.GregorianCalendar; + +public class Inventor { + + private String name; + private String nationality; + private String[] inventions; + private Date birthdate; + private PlaceOfBirth placeOfBirth; + + public Inventor(String name, String nationality) { + GregorianCalendar c= new GregorianCalendar(); + this.name = name; + this.nationality = nationality; + this.birthdate = c.getTime(); + } + + public Inventor(String name, Date birthdate, String nationality) { + this.name = name; + this.nationality = nationality; + this.birthdate = birthdate; + } + + public Inventor() { + } + + public String getName() { + return name; + } + + public void setName(String name) { + this.name = name; + } + + public String getNationality() { + return nationality; + } + + public void setNationality(String nationality) { + this.nationality = nationality; + } + + public Date getBirthdate() { + return birthdate; + } + + public void setBirthdate(Date birthdate) { + this.birthdate = birthdate; + } + + public PlaceOfBirth getPlaceOfBirth() { + return placeOfBirth; + } + + public void setPlaceOfBirth(PlaceOfBirth placeOfBirth) { + this.placeOfBirth = placeOfBirth; + } + + public void setInventions(String[] inventions) { + this.inventions = inventions; + } + + public String[] getInventions() { + return inventions; + } +} +``` + +Inventor.kt + +``` +class Inventor( + var name: String, + var nationality: String, + var inventions: Array? = null, + var birthdate: Date = GregorianCalendar().time, + var placeOfBirth: PlaceOfBirth? = null) +``` + +PlaceOfBirth.java + +``` +package org.spring.samples.spel.inventor; + +public class PlaceOfBirth { + + private String city; + private String country; + + public PlaceOfBirth(String city) { + this.city=city; + } + + public PlaceOfBirth(String city, String country) { + this(city); + this.country = country; + } + + public String getCity() { + return city; + } + + public void setCity(String s) { + this.city = s; + } + + public String getCountry() { + return country; + } + + public void setCountry(String country) { + this.country = country; + } +} +``` + +PlaceOfBirth.kt + +``` +class PlaceOfBirth(var city: String, var country: String? = null) { +``` + +Society.java + +``` +package org.spring.samples.spel.inventor; + +import java.util.*; + +public class Society { + + private String name; + + public static String Advisors = "advisors"; + public static String President = "president"; + + private List members = new ArrayList(); + private Map officers = new HashMap(); + + public List getMembers() { + return members; + } + + public Map getOfficers() { + return officers; + } + + public String getName() { + return name; + } + + public void setName(String name) { + this.name = name; + } + + public boolean isMember(String name) { + for (Inventor inventor : members) { + if (inventor.getName().equals(name)) { + return true; + } + } + return false; + } +} +``` + +Society.kt + +``` +package org.spring.samples.spel.inventor + +import java.util.* + +class Society { + + val Advisors = "advisors" + val President = "president" + + var name: String? = null + + val members = ArrayList() + val officers = mapOf() + + fun isMember(name: String): Boolean { + for (inventor in members) { + if (inventor.name == name) { + return true + } + } + return false + } +} +``` + +## 5. Aspect Oriented Programming with Spring + +Aspect-oriented Programming (AOP) complements Object-oriented Programming (OOP) by +providing another way of thinking about program structure. The key unit of modularity +in OOP is the class, whereas in AOP the unit of modularity is the aspect. Aspects +enable the modularization of concerns (such as transaction management) that cut across +multiple types and objects. (Such concerns are often termed “crosscutting” concerns +in AOP literature.) + +One of the key components of Spring is the AOP framework. While the Spring IoC +container does not depend on AOP (meaning you do not need to use AOP if you don’t want +to), AOP complements Spring IoC to provide a very capable middleware solution. + +Spring AOP with AspectJ pointcuts + +Spring provides simple and powerful ways of writing custom aspects by using either a[schema-based approach](#aop-schema) or the [@AspectJ annotation style](#aop-ataspectj). +Both of these styles offer fully typed advice and use of the AspectJ pointcut language +while still using Spring AOP for weaving. + +This chapter discusses the schema- and @AspectJ-based AOP support. +The lower-level AOP support is discussed in [the following chapter](#aop-api). + +AOP is used in the Spring Framework to: + +* Provide declarative enterprise services. The most important such service is[declarative transaction management](data-access.html#transaction-declarative). + +* Let users implement custom aspects, complementing their use of OOP with AOP. + +| |If you are interested only in generic declarative services or other pre-packaged
declarative middleware services such as pooling, you do not need to work directly with
Spring AOP, and can skip most of this chapter.| +|---|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +### 5.1. AOP Concepts + +Let us begin by defining some central AOP concepts and terminology. These terms are not +Spring-specific. Unfortunately, AOP terminology is not particularly intuitive. +However, it would be even more confusing if Spring used its own terminology. + +* Aspect: A modularization of a concern that cuts across multiple classes. + Transaction management is a good example of a crosscutting concern in enterprise Java + applications. In Spring AOP, aspects are implemented by using regular classes + (the [schema-based approach](#aop-schema)) or regular classes annotated with the`@Aspect` annotation (the [@AspectJ style](#aop-ataspectj)). + +* Join point: A point during the execution of a program, such as the execution of a + method or the handling of an exception. In Spring AOP, a join point always + represents a method execution. + +* Advice: Action taken by an aspect at a particular join point. Different types of + advice include “around”, “before” and “after” advice. (Advice types are discussed + later.) Many AOP frameworks, including Spring, model an advice as an interceptor and + maintain a chain of interceptors around the join point. + +* Pointcut: A predicate that matches join points. Advice is associated with a + pointcut expression and runs at any join point matched by the pointcut (for example, + the execution of a method with a certain name). The concept of join points as matched + by pointcut expressions is central to AOP, and Spring uses the AspectJ pointcut + expression language by default. + +* Introduction: Declaring additional methods or fields on behalf of a type. Spring + AOP lets you introduce new interfaces (and a corresponding implementation) to any + advised object. For example, you could use an introduction to make a bean implement an`IsModified` interface, to simplify caching. (An introduction is known as an + inter-type declaration in the AspectJ community.) + +* Target object: An object being advised by one or more aspects. Also referred to as + the “advised object”. Since Spring AOP is implemented by using runtime proxies, this + object is always a proxied object. + +* AOP proxy: An object created by the AOP framework in order to implement the aspect + contracts (advise method executions and so on). In the Spring Framework, an AOP proxy + is a JDK dynamic proxy or a CGLIB proxy. + +* Weaving: linking aspects with other application types or objects to create an + advised object. This can be done at compile time (using the AspectJ compiler, for + example), load time, or at runtime. Spring AOP, like other pure Java AOP frameworks, + performs weaving at runtime. + +Spring AOP includes the following types of advice: + +* Before advice: Advice that runs before a join point but that does not have + the ability to prevent execution flow proceeding to the join point (unless it throws + an exception). + +* After returning advice: Advice to be run after a join point completes + normally (for example, if a method returns without throwing an exception). + +* After throwing advice: Advice to be run if a method exits by throwing an + exception. + +* After (finally) advice: Advice to be run regardless of the means by which a + join point exits (normal or exceptional return). + +* Around advice: Advice that surrounds a join point such as a method invocation. + This is the most powerful kind of advice. Around advice can perform custom behavior + before and after the method invocation. It is also responsible for choosing whether to + proceed to the join point or to shortcut the advised method execution by returning its + own return value or throwing an exception. + +Around advice is the most general kind of advice. Since Spring AOP, like AspectJ, +provides a full range of advice types, we recommend that you use the least powerful +advice type that can implement the required behavior. For example, if you need only to +update a cache with the return value of a method, you are better off implementing an +after returning advice than an around advice, although an around advice can accomplish +the same thing. Using the most specific advice type provides a simpler programming model +with less potential for errors. For example, you do not need to invoke the `proceed()`method on the `JoinPoint` used for around advice, and, hence, you cannot fail to invoke it. + +All advice parameters are statically typed so that you work with advice parameters of +the appropriate type (e.g. the type of the return value from a method execution) rather +than `Object` arrays. + +The concept of join points matched by pointcuts is the key to AOP, which distinguishes +it from older technologies offering only interception. Pointcuts enable advice to be +targeted independently of the object-oriented hierarchy. For example, you can apply an +around advice providing declarative transaction management to a set of methods that span +multiple objects (such as all business operations in the service layer). + +### 5.2. Spring AOP Capabilities and Goals + +Spring AOP is implemented in pure Java. There is no need for a special compilation +process. Spring AOP does not need to control the class loader hierarchy and is thus +suitable for use in a servlet container or application server. + +Spring AOP currently supports only method execution join points (advising the execution +of methods on Spring beans). Field interception is not implemented, although support for +field interception could be added without breaking the core Spring AOP APIs. If you need +to advise field access and update join points, consider a language such as AspectJ. + +Spring AOP’s approach to AOP differs from that of most other AOP frameworks. The aim is +not to provide the most complete AOP implementation (although Spring AOP is quite +capable). Rather, the aim is to provide a close integration between AOP implementation and +Spring IoC, to help solve common problems in enterprise applications. + +Thus, for example, the Spring Framework’s AOP functionality is normally used in +conjunction with the Spring IoC container. Aspects are configured by using normal bean +definition syntax (although this allows powerful “auto-proxying” capabilities). This is a +crucial difference from other AOP implementations. You cannot do some things +easily or efficiently with Spring AOP, such as advise very fine-grained objects (typically, +domain objects). AspectJ is the best choice in such cases. However, our +experience is that Spring AOP provides an excellent solution to most problems in +enterprise Java applications that are amenable to AOP. + +Spring AOP never strives to compete with AspectJ to provide a comprehensive AOP +solution. We believe that both proxy-based frameworks such as Spring AOP and full-blown +frameworks such as AspectJ are valuable and that they are complementary, rather than in +competition. Spring seamlessly integrates Spring AOP and IoC with AspectJ, to enable +all uses of AOP within a consistent Spring-based application +architecture. This integration does not affect the Spring AOP API or the AOP Alliance +API. Spring AOP remains backward-compatible. See [the following chapter](#aop-api)for a discussion of the Spring AOP APIs. + +| |One of the central tenets of the Spring Framework is that of non-invasiveness. This
is the idea that you should not be forced to introduce framework-specific classes and
interfaces into your business or domain model. However, in some places, the Spring Framework
does give you the option to introduce Spring Framework-specific dependencies into your
codebase. The rationale in giving you such options is because, in certain scenarios, it
might be just plain easier to read or code some specific piece of functionality in such
a way. However, the Spring Framework (almost) always offers you the choice: You have the
freedom to make an informed decision as to which option best suits your particular use
case or scenario.

One such choice that is relevant to this chapter is that of which AOP framework (and
which AOP style) to choose. You have the choice of AspectJ, Spring AOP, or both. You
also have the choice of either the @AspectJ annotation-style approach or the Spring XML
configuration-style approach. The fact that this chapter chooses to introduce the
@AspectJ-style approach first should not be taken as an indication that the Spring team
favors the @AspectJ annotation-style approach over the Spring XML configuration-style.

See [Choosing which AOP Declaration Style to Use](#aop-choosing) for a more complete discussion of the “whys and wherefores” of
each style.| +|---|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +### 5.3. AOP Proxies + +Spring AOP defaults to using standard JDK dynamic proxies for AOP proxies. This +enables any interface (or set of interfaces) to be proxied. + +Spring AOP can also use CGLIB proxies. This is necessary to proxy classes rather than +interfaces. By default, CGLIB is used if a business object does not implement an +interface. As it is good practice to program to interfaces rather than classes, business +classes normally implement one or more business interfaces. It is possible to[force the use of CGLIB](#aop-proxying), in those (hopefully rare) cases where you +need to advise a method that is not declared on an interface or where you need to +pass a proxied object to a method as a concrete type. + +It is important to grasp the fact that Spring AOP is proxy-based. See[Understanding AOP Proxies](#aop-understanding-aop-proxies) for a thorough examination of exactly what this +implementation detail actually means. + +### 5.4. @AspectJ support + +@AspectJ refers to a style of declaring aspects as regular Java classes annotated with +annotations. The @AspectJ style was introduced by the[AspectJ project](https://www.eclipse.org/aspectj) as part of the AspectJ 5 release. Spring +interprets the same annotations as AspectJ 5, using a library supplied by AspectJ +for pointcut parsing and matching. The AOP runtime is still pure Spring AOP, though, and +there is no dependency on the AspectJ compiler or weaver. + +| |Using the AspectJ compiler and weaver enables use of the full AspectJ language and
is discussed in [Using AspectJ with Spring Applications](#aop-using-aspectj).| +|---|--------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +#### 5.4.1. Enabling @AspectJ Support + +To use @AspectJ aspects in a Spring configuration, you need to enable Spring support for +configuring Spring AOP based on @AspectJ aspects and auto-proxying beans based on +whether or not they are advised by those aspects. By auto-proxying, we mean that, if Spring +determines that a bean is advised by one or more aspects, it automatically generates +a proxy for that bean to intercept method invocations and ensures that advice is run +as needed. + +The @AspectJ support can be enabled with XML- or Java-style configuration. In either +case, you also need to ensure that AspectJ’s `aspectjweaver.jar` library is on the +classpath of your application (version 1.8 or later). This library is available in the`lib` directory of an AspectJ distribution or from the Maven Central repository. + +##### Enabling @AspectJ Support with Java Configuration + +To enable @AspectJ support with Java `@Configuration`, add the `@EnableAspectJAutoProxy`annotation, as the following example shows: + +Java + +``` +@Configuration +@EnableAspectJAutoProxy +public class AppConfig { + +} +``` + +Kotlin + +``` +@Configuration +@EnableAspectJAutoProxy +class AppConfig +``` + +##### Enabling @AspectJ Support with XML Configuration + +To enable @AspectJ support with XML-based configuration, use the `aop:aspectj-autoproxy`element, as the following example shows: + +``` + +``` + +This assumes that you use schema support as described in[XML Schema-based configuration](#xsd-schemas). +See [the AOP schema](#xsd-schemas-aop) for how to +import the tags in the `aop` namespace. + +#### 5.4.2. Declaring an Aspect + +With @AspectJ support enabled, any bean defined in your application context with a +class that is an @AspectJ aspect (has the `@Aspect` annotation) is automatically +detected by Spring and used to configure Spring AOP. The next two examples show the +minimal definition required for a not-very-useful aspect. + +The first of the two example shows a regular bean definition in the application +context that points to a bean class that has the `@Aspect` annotation: + +``` + + + +``` + +The second of the two examples shows the `NotVeryUsefulAspect` class definition, +which is annotated with the `org.aspectj.lang.annotation.Aspect` annotation; + +Java + +``` +package org.xyz; +import org.aspectj.lang.annotation.Aspect; + +@Aspect +public class NotVeryUsefulAspect { + +} +``` + +Kotlin + +``` +package org.xyz + +import org.aspectj.lang.annotation.Aspect; + +@Aspect +class NotVeryUsefulAspect +``` + +Aspects (classes annotated with `@Aspect`) can have methods and fields, the same as any +other class. They can also contain pointcut, advice, and introduction (inter-type) +declarations. + +| |Autodetecting aspects through component scanning

You can register aspect classes as regular beans in your Spring XML configuration,
via `@Bean` methods in `@Configuration` classes, or have Spring autodetect them through
classpath scanning — the same as any other Spring-managed bean. However, note that the`@Aspect` annotation is not sufficient for autodetection in the classpath. For that
purpose, you need to add a separate `@Component` annotation (or, alternatively, a custom
stereotype annotation that qualifies, as per the rules of Spring’s component scanner).| +|---|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +| |Advising aspects with other aspects?

In Spring AOP, aspects themselves cannot be the targets of advice from other
aspects. The `@Aspect` annotation on a class marks it as an aspect and, hence, excludes
it from auto-proxying.| +|---|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +#### 5.4.3. Declaring a Pointcut + +Pointcuts determine join points of interest and thus enable us to control +when advice runs. Spring AOP only supports method execution join points for Spring +beans, so you can think of a pointcut as matching the execution of methods on Spring +beans. A pointcut declaration has two parts: a signature comprising a name and any +parameters and a pointcut expression that determines exactly which method +executions we are interested in. In the @AspectJ annotation-style of AOP, a pointcut +signature is provided by a regular method definition, and the pointcut expression is +indicated by using the `@Pointcut` annotation (the method serving as the pointcut signature +must have a `void` return type). + +An example may help make this distinction between a pointcut signature and a pointcut +expression clear. The following example defines a pointcut named `anyOldTransfer` that +matches the execution of any method named `transfer`: + +Java + +``` +@Pointcut("execution(* transfer(..))") // the pointcut expression +private void anyOldTransfer() {} // the pointcut signature +``` + +Kotlin + +``` +@Pointcut("execution(* transfer(..))") // the pointcut expression +private fun anyOldTransfer() {} // the pointcut signature +``` + +The pointcut expression that forms the value of the `@Pointcut` annotation is a regular +AspectJ pointcut expression. For a full discussion of AspectJ’s pointcut language, see +the [AspectJ +Programming Guide](https://www.eclipse.org/aspectj/doc/released/progguide/index.html) (and, for extensions, the[AspectJ 5 +Developer’s Notebook](https://www.eclipse.org/aspectj/doc/released/adk15notebook/index.html)) or one of the books on AspectJ (such as *Eclipse AspectJ*, by Colyer +et al., or *AspectJ in Action*, by Ramnivas Laddad). + +##### Supported Pointcut Designators + +Spring AOP supports the following AspectJ pointcut designators (PCD) for use in pointcut +expressions: + +* `execution`: For matching method execution join points. This is the primary + pointcut designator to use when working with Spring AOP. + +* `within`: Limits matching to join points within certain types (the execution + of a method declared within a matching type when using Spring AOP). + +* `this`: Limits matching to join points (the execution of methods when using Spring + AOP) where the bean reference (Spring AOP proxy) is an instance of the given type. + +* `target`: Limits matching to join points (the execution of methods when using + Spring AOP) where the target object (application object being proxied) is an instance + of the given type. + +* `args`: Limits matching to join points (the execution of methods when using Spring + AOP) where the arguments are instances of the given types. + +* `@target`: Limits matching to join points (the execution of methods when using + Spring AOP) where the class of the executing object has an annotation of the given type. + +* `@args`: Limits matching to join points (the execution of methods when using Spring + AOP) where the runtime type of the actual arguments passed have annotations of the + given types. + +* `@within`: Limits matching to join points within types that have the given + annotation (the execution of methods declared in types with the given annotation when + using Spring AOP). + +* `@annotation`: Limits matching to join points where the subject of the join point + (the method being run in Spring AOP) has the given annotation. + +Other pointcut types + +The full AspectJ pointcut language supports additional pointcut designators that are not +supported in Spring: `call`, `get`, `set`, `preinitialization`,`staticinitialization`, `initialization`, `handler`, `adviceexecution`, `withincode`, `cflow`,`cflowbelow`, `if`, `@this`, and `@withincode`. Use of these pointcut designators in pointcut +expressions interpreted by Spring AOP results in an `IllegalArgumentException` being +thrown. + +The set of pointcut designators supported by Spring AOP may be extended in future +releases to support more of the AspectJ pointcut designators. + +Because Spring AOP limits matching to only method execution join points, the preceding discussion +of the pointcut designators gives a narrower definition than you can find in the +AspectJ programming guide. In addition, AspectJ itself has type-based semantics and, at +an execution join point, both `this` and `target` refer to the same object: the +object executing the method. Spring AOP is a proxy-based system and differentiates +between the proxy object itself (which is bound to `this`) and the target object behind the +proxy (which is bound to `target`). + +| |Due to the proxy-based nature of Spring’s AOP framework, calls within the target object
are, by definition, not intercepted. For JDK proxies, only public interface method
calls on the proxy can be intercepted. With CGLIB, public and protected method calls on
the proxy are intercepted (and even package-visible methods, if necessary). However,
common interactions through proxies should always be designed through public signatures.

Note that pointcut definitions are generally matched against any intercepted method.
If a pointcut is strictly meant to be public-only, even in a CGLIB proxy scenario with
potential non-public interactions through proxies, it needs to be defined accordingly.

If your interception needs include method calls or even constructors within the target
class, consider the use of Spring-driven [native AspectJ weaving](#aop-aj-ltw) instead
of Spring’s proxy-based AOP framework. This constitutes a different mode of AOP usage
with different characteristics, so be sure to make yourself familiar with weaving
before making a decision.| +|---|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +Spring AOP also supports an additional PCD named `bean`. This PCD lets you limit +the matching of join points to a particular named Spring bean or to a set of named +Spring beans (when using wildcards). The `bean` PCD has the following form: + +Java + +``` +bean(idOrNameOfBean) +``` + +Kotlin + +``` +bean(idOrNameOfBean) +``` + +The `idOrNameOfBean` token can be the name of any Spring bean. Limited wildcard +support that uses the `*` character is provided, so, if you establish some naming +conventions for your Spring beans, you can write a `bean` PCD expression +to select them. As is the case with other pointcut designators, the `bean` PCD can +be used with the `&&` (and), `||` (or), and `!` (negation) operators, too. + +| |The `bean` PCD is supported only in Spring AOP and not in
native AspectJ weaving. It is a Spring-specific extension to the standard PCDs that
AspectJ defines and is, therefore, not available for aspects declared in the `@Aspect` model.

The `bean` PCD operates at the instance level (building on the Spring bean name
concept) rather than at the type level only (to which weaving-based AOP is limited).
Instance-based pointcut designators are a special capability of Spring’s
proxy-based AOP framework and its close integration with the Spring bean factory, where
it is natural and straightforward to identify specific beans by name.| +|---|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +##### Combining Pointcut Expressions + +You can combine pointcut expressions by using `&&,` `||` and `!`. You can also refer to +pointcut expressions by name. The following example shows three pointcut expressions: + +Java + +``` +@Pointcut("execution(public * *(..))") +private void anyPublicOperation() {} (1) + +@Pointcut("within(com.xyz.myapp.trading..*)") +private void inTrading() {} (2) + +@Pointcut("anyPublicOperation() && inTrading()") +private void tradingOperation() {} (3) +``` + +|**1**|`anyPublicOperation` matches if a method execution join point represents the execution
of any public method.| +|-----|----------------------------------------------------------------------------------------------------------------| +|**2**| `inTrading` matches if a method execution is in the trading module. | +|**3**| `tradingOperation` matches if a method execution represents any public method in the
trading module. | + +Kotlin + +``` +@Pointcut("execution(public * *(..))") +private fun anyPublicOperation() {} (1) + +@Pointcut("within(com.xyz.myapp.trading..*)") +private fun inTrading() {} (2) + +@Pointcut("anyPublicOperation() && inTrading()") +private fun tradingOperation() {} (3) +``` + +|**1**|`anyPublicOperation` matches if a method execution join point represents the execution
of any public method.| +|-----|----------------------------------------------------------------------------------------------------------------| +|**2**| `inTrading` matches if a method execution is in the trading module. | +|**3**| `tradingOperation` matches if a method execution represents any public method in the
trading module. | + +It is a best practice to build more complex pointcut expressions out of smaller named +components, as shown earlier. When referring to pointcuts by name, normal Java visibility +rules apply (you can see private pointcuts in the same type, protected pointcuts in the +hierarchy, public pointcuts anywhere, and so on). Visibility does not affect pointcut +matching. + +##### Sharing Common Pointcut Definitions + +When working with enterprise applications, developers often want to refer to modules of +the application and particular sets of operations from within several aspects. We +recommend defining a `CommonPointcuts` aspect that captures common pointcut expressions +for this purpose. Such an aspect typically resembles the following example: + +Java + +``` +package com.xyz.myapp; + +import org.aspectj.lang.annotation.Aspect; +import org.aspectj.lang.annotation.Pointcut; + +@Aspect +public class CommonPointcuts { + + /** + * A join point is in the web layer if the method is defined + * in a type in the com.xyz.myapp.web package or any sub-package + * under that. + */ + @Pointcut("within(com.xyz.myapp.web..*)") + public void inWebLayer() {} + + /** + * A join point is in the service layer if the method is defined + * in a type in the com.xyz.myapp.service package or any sub-package + * under that. + */ + @Pointcut("within(com.xyz.myapp.service..*)") + public void inServiceLayer() {} + + /** + * A join point is in the data access layer if the method is defined + * in a type in the com.xyz.myapp.dao package or any sub-package + * under that. + */ + @Pointcut("within(com.xyz.myapp.dao..*)") + public void inDataAccessLayer() {} + + /** + * A business service is the execution of any method defined on a service + * interface. This definition assumes that interfaces are placed in the + * "service" package, and that implementation types are in sub-packages. + * + * If you group service interfaces by functional area (for example, + * in packages com.xyz.myapp.abc.service and com.xyz.myapp.def.service) then + * the pointcut expression "execution(* com.xyz.myapp..service.*.*(..))" + * could be used instead. + * + * Alternatively, you can write the expression using the 'bean' + * PCD, like so "bean(*Service)". (This assumes that you have + * named your Spring service beans in a consistent fashion.) + */ + @Pointcut("execution(* com.xyz.myapp..service.*.*(..))") + public void businessService() {} + + /** + * A data access operation is the execution of any method defined on a + * dao interface. This definition assumes that interfaces are placed in the + * "dao" package, and that implementation types are in sub-packages. + */ + @Pointcut("execution(* com.xyz.myapp.dao.*.*(..))") + public void dataAccessOperation() {} + +} +``` + +Kotlin + +``` +package com.xyz.myapp + +import org.aspectj.lang.annotation.Aspect +import org.aspectj.lang.annotation.Pointcut + +@Aspect +class CommonPointcuts { + + /** + * A join point is in the web layer if the method is defined + * in a type in the com.xyz.myapp.web package or any sub-package + * under that. + */ + @Pointcut("within(com.xyz.myapp.web..*)") + fun inWebLayer() { + } + + /** + * A join point is in the service layer if the method is defined + * in a type in the com.xyz.myapp.service package or any sub-package + * under that. + */ + @Pointcut("within(com.xyz.myapp.service..*)") + fun inServiceLayer() { + } + + /** + * A join point is in the data access layer if the method is defined + * in a type in the com.xyz.myapp.dao package or any sub-package + * under that. + */ + @Pointcut("within(com.xyz.myapp.dao..*)") + fun inDataAccessLayer() { + } + + /** + * A business service is the execution of any method defined on a service + * interface. This definition assumes that interfaces are placed in the + * "service" package, and that implementation types are in sub-packages. + * + * If you group service interfaces by functional area (for example, + * in packages com.xyz.myapp.abc.service and com.xyz.myapp.def.service) then + * the pointcut expression "execution(* com.xyz.myapp..service.*.*(..))" + * could be used instead. + * + * Alternatively, you can write the expression using the 'bean' + * PCD, like so "bean(*Service)". (This assumes that you have + * named your Spring service beans in a consistent fashion.) + */ + @Pointcut("execution(* com.xyz.myapp..service.*.*(..))") + fun businessService() { + } + + /** + * A data access operation is the execution of any method defined on a + * dao interface. This definition assumes that interfaces are placed in the + * "dao" package, and that implementation types are in sub-packages. + */ + @Pointcut("execution(* com.xyz.myapp.dao.*.*(..))") + fun dataAccessOperation() { + } + +} +``` + +You can refer to the pointcuts defined in such an aspect anywhere you need a +pointcut expression. For example, to make the service layer transactional, you could +write the following: + +``` + + + + + + + + + +``` + +The `` and `` elements are discussed in [Schema-based AOP Support](#aop-schema). The +transaction elements are discussed in [Transaction Management](data-access.html#transaction). + +##### Examples + +Spring AOP users are likely to use the `execution` pointcut designator the most often. +The format of an execution expression follows: + +``` + execution(modifiers-pattern? ret-type-pattern declaring-type-pattern?name-pattern(param-pattern) + throws-pattern?) +``` + +All parts except the returning type pattern (`ret-type-pattern` in the preceding snippet), +the name pattern, and the parameters pattern are optional. The returning type pattern determines +what the return type of the method must be in order for a join point to be matched.`*` is most frequently used as the returning type pattern. It matches any return +type. A fully-qualified type name matches only when the method returns the given +type. The name pattern matches the method name. You can use the `*` wildcard as all or +part of a name pattern. If you specify a declaring type pattern, +include a trailing `.` to join it to the name pattern component. +The parameters pattern is slightly more complex: `()` matches a +method that takes no parameters, whereas `(..)` matches any number (zero or more) of parameters. +The `(*)` pattern matches a method that takes one parameter of any type.`(*,String)` matches a method that takes two parameters. The first can be of any type, while the +second must be a `String`. Consult the[Language +Semantics](https://www.eclipse.org/aspectj/doc/released/progguide/semantics-pointcuts.html) section of the AspectJ Programming Guide for more information. + +The following examples show some common pointcut expressions: + +* The execution of any public method: + + ``` + execution(public * *(..)) + ``` + +* The execution of any method with a name that begins with `set`: + + ``` + execution(* set*(..)) + ``` + +* The execution of any method defined by the `AccountService` interface: + + ``` + execution(* com.xyz.service.AccountService.*(..)) + ``` + +* The execution of any method defined in the `service` package: + + ``` + execution(* com.xyz.service.*.*(..)) + ``` + +* The execution of any method defined in the service package or one of its sub-packages: + + ``` + execution(* com.xyz.service..*.*(..)) + ``` + +* Any join point (method execution only in Spring AOP) within the service package: + + ``` + within(com.xyz.service.*) + ``` + +* Any join point (method execution only in Spring AOP) within the service package or one of its + sub-packages: + + ``` + within(com.xyz.service..*) + ``` + +* Any join point (method execution only in Spring AOP) where the proxy implements the`AccountService` interface: + + ``` + this(com.xyz.service.AccountService) + ``` + + | |`this` is more commonly used in a binding form. See the section on [Declaring Advice](#aop-advice)for how to make the proxy object available in the advice body.| + |---|----------------------------------------------------------------------------------------------------------------------------------------------------------------| + +* Any join point (method execution only in Spring AOP) where the target object + implements the `AccountService` interface: + + ``` + target(com.xyz.service.AccountService) + ``` + + | |`target` is more commonly used in a binding form. See the [Declaring Advice](#aop-advice) section
for how to make the target object available in the advice body.| + |---|---------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +* Any join point (method execution only in Spring AOP) that takes a single parameter + and where the argument passed at runtime is `Serializable`: + + ``` + args(java.io.Serializable) + ``` + + | |`args` is more commonly used in a binding form. See the [Declaring Advice](#aop-advice) section
for how to make the method arguments available in the advice body.| + |---|----------------------------------------------------------------------------------------------------------------------------------------------------------------------| + + Note that the pointcut given in this example is different from `execution(* + *(java.io.Serializable))`. The args version matches if the argument passed at runtime is`Serializable`, and the execution version matches if the method signature declares a single + parameter of type `Serializable`. + +* Any join point (method execution only in Spring AOP) where the target object has a`@Transactional` annotation: + + ``` + @target(org.springframework.transaction.annotation.Transactional) + ``` + + | |You can also use `@target` in a binding form. See the [Declaring Advice](#aop-advice) section for
how to make the annotation object available in the advice body.| + |---|---------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +* Any join point (method execution only in Spring AOP) where the declared type of the + target object has an `@Transactional` annotation: + + ``` + @within(org.springframework.transaction.annotation.Transactional) + ``` + + | |You can also use `@within` in a binding form. See the [Declaring Advice](#aop-advice) section for
how to make the annotation object available in the advice body.| + |---|---------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +* Any join point (method execution only in Spring AOP) where the executing method has an`@Transactional` annotation: + + ``` + @annotation(org.springframework.transaction.annotation.Transactional) + ``` + + | |You can also use `@annotation` in a binding form. See the [Declaring Advice](#aop-advice) section
for how to make the annotation object available in the advice body.| + |---|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +* Any join point (method execution only in Spring AOP) which takes a single parameter, + and where the runtime type of the argument passed has the `@Classified` annotation: + + ``` + @args(com.xyz.security.Classified) + ``` + + | |You can also use `@args` in a binding form. See the [Declaring Advice](#aop-advice) section
how to make the annotation object(s) available in the advice body.| + |---|------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +* Any join point (method execution only in Spring AOP) on a Spring bean named`tradeService`: + + ``` + bean(tradeService) + ``` + +* Any join point (method execution only in Spring AOP) on Spring beans having names that + match the wildcard expression `*Service`: + + ``` + bean(*Service) + ``` + +##### Writing Good Pointcuts + +During compilation, AspectJ processes pointcuts in order to optimize matching +performance. Examining code and determining if each join point matches (statically or +dynamically) a given pointcut is a costly process. (A dynamic match means the match +cannot be fully determined from static analysis and that a test is placed in the code to +determine if there is an actual match when the code is running). On first encountering a +pointcut declaration, AspectJ rewrites it into an optimal form for the matching +process. What does this mean? Basically, pointcuts are rewritten in DNF (Disjunctive +Normal Form) and the components of the pointcut are sorted such that those components +that are cheaper to evaluate are checked first. This means you do not have to worry +about understanding the performance of various pointcut designators and may supply them +in any order in a pointcut declaration. + +However, AspectJ can work only with what it is told. For optimal performance of +matching, you should think about what they are trying to achieve and narrow the search +space for matches as much as possible in the definition. The existing designators +naturally fall into one of three groups: kinded, scoping, and contextual: + +* Kinded designators select a particular kind of join point:`execution`, `get`, `set`, `call`, and `handler`. + +* Scoping designators select a group of join points of interest + (probably of many kinds): `within` and `withincode` + +* Contextual designators match (and optionally bind) based on context:`this`, `target`, and `@annotation` + +A well written pointcut should include at least the first two types (kinded and +scoping). You can include the contextual designators to match based on +join point context or bind that context for use in the advice. Supplying only a +kinded designator or only a contextual designator works but could affect weaving +performance (time and memory used), due to extra processing and analysis. Scoping +designators are very fast to match, and using them means AspectJ can very quickly +dismiss groups of join points that should not be further processed. A good +pointcut should always include one if possible. + +#### 5.4.4. Declaring Advice + +Advice is associated with a pointcut expression and runs before, after, or around +method executions matched by the pointcut. The pointcut expression may be either a +simple reference to a named pointcut or a pointcut expression declared in place. + +##### Before Advice + +You can declare before advice in an aspect by using the `@Before` annotation: + +Java + +``` +import org.aspectj.lang.annotation.Aspect; +import org.aspectj.lang.annotation.Before; + +@Aspect +public class BeforeExample { + + @Before("com.xyz.myapp.CommonPointcuts.dataAccessOperation()") + public void doAccessCheck() { + // ... + } +} +``` + +Kotlin + +``` +import org.aspectj.lang.annotation.Aspect +import org.aspectj.lang.annotation.Before + +@Aspect +class BeforeExample { + + @Before("com.xyz.myapp.CommonPointcuts.dataAccessOperation()") + fun doAccessCheck() { + // ... + } +} +``` + +If we use an in-place pointcut expression, we could rewrite the preceding example as the +following example: + +Java + +``` +import org.aspectj.lang.annotation.Aspect; +import org.aspectj.lang.annotation.Before; + +@Aspect +public class BeforeExample { + + @Before("execution(* com.xyz.myapp.dao.*.*(..))") + public void doAccessCheck() { + // ... + } +} +``` + +Kotlin + +``` +import org.aspectj.lang.annotation.Aspect +import org.aspectj.lang.annotation.Before + +@Aspect +class BeforeExample { + + @Before("execution(* com.xyz.myapp.dao.*.*(..))") + fun doAccessCheck() { + // ... + } +} +``` + +##### After Returning Advice + +After returning advice runs when a matched method execution returns normally. +You can declare it by using the `@AfterReturning` annotation: + +Java + +``` +import org.aspectj.lang.annotation.Aspect; +import org.aspectj.lang.annotation.AfterReturning; + +@Aspect +public class AfterReturningExample { + + @AfterReturning("com.xyz.myapp.CommonPointcuts.dataAccessOperation()") + public void doAccessCheck() { + // ... + } +} +``` + +Kotlin + +``` +import org.aspectj.lang.annotation.Aspect +import org.aspectj.lang.annotation.AfterReturning + +@Aspect +class AfterReturningExample { + + @AfterReturning("com.xyz.myapp.CommonPointcuts.dataAccessOperation()") + fun doAccessCheck() { + // ... + } +} +``` + +| |You can have multiple advice declarations (and other members as well),
all inside the same aspect. We show only a single advice declaration in these
examples to focus the effect of each one.| +|---|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +Sometimes, you need access in the advice body to the actual value that was returned. +You can use the form of `@AfterReturning` that binds the return value to get that +access, as the following example shows: + +Java + +``` +import org.aspectj.lang.annotation.Aspect; +import org.aspectj.lang.annotation.AfterReturning; + +@Aspect +public class AfterReturningExample { + + @AfterReturning( + pointcut="com.xyz.myapp.CommonPointcuts.dataAccessOperation()", + returning="retVal") + public void doAccessCheck(Object retVal) { + // ... + } +} +``` + +Kotlin + +``` +import org.aspectj.lang.annotation.Aspect +import org.aspectj.lang.annotation.AfterReturning + +@Aspect +class AfterReturningExample { + + @AfterReturning( + pointcut = "com.xyz.myapp.CommonPointcuts.dataAccessOperation()", + returning = "retVal") + fun doAccessCheck(retVal: Any) { + // ... + } +} +``` + +The name used in the `returning` attribute must correspond to the name of a parameter +in the advice method. When a method execution returns, the return value is passed to +the advice method as the corresponding argument value. A `returning` clause also +restricts matching to only those method executions that return a value of the +specified type (in this case, `Object`, which matches any return value). + +Please note that it is not possible to return a totally different reference when +using after returning advice. + +##### After Throwing Advice + +After throwing advice runs when a matched method execution exits by throwing an +exception. You can declare it by using the `@AfterThrowing` annotation, as the +following example shows: + +Java + +``` +import org.aspectj.lang.annotation.Aspect; +import org.aspectj.lang.annotation.AfterThrowing; + +@Aspect +public class AfterThrowingExample { + + @AfterThrowing("com.xyz.myapp.CommonPointcuts.dataAccessOperation()") + public void doRecoveryActions() { + // ... + } +} +``` + +Kotlin + +``` +import org.aspectj.lang.annotation.Aspect +import org.aspectj.lang.annotation.AfterThrowing + +@Aspect +class AfterThrowingExample { + + @AfterThrowing("com.xyz.myapp.CommonPointcuts.dataAccessOperation()") + fun doRecoveryActions() { + // ... + } +} +``` + +Often, you want the advice to run only when exceptions of a given type are thrown, +and you also often need access to the thrown exception in the advice body. You can +use the `throwing` attribute to both restrict matching (if desired — use `Throwable`as the exception type otherwise) and bind the thrown exception to an advice parameter. +The following example shows how to do so: + +Java + +``` +import org.aspectj.lang.annotation.Aspect; +import org.aspectj.lang.annotation.AfterThrowing; + +@Aspect +public class AfterThrowingExample { + + @AfterThrowing( + pointcut="com.xyz.myapp.CommonPointcuts.dataAccessOperation()", + throwing="ex") + public void doRecoveryActions(DataAccessException ex) { + // ... + } +} +``` + +Kotlin + +``` +import org.aspectj.lang.annotation.Aspect +import org.aspectj.lang.annotation.AfterThrowing + +@Aspect +class AfterThrowingExample { + + @AfterThrowing( + pointcut = "com.xyz.myapp.CommonPointcuts.dataAccessOperation()", + throwing = "ex") + fun doRecoveryActions(ex: DataAccessException) { + // ... + } +} +``` + +The name used in the `throwing` attribute must correspond to the name of a parameter in +the advice method. When a method execution exits by throwing an exception, the exception +is passed to the advice method as the corresponding argument value. A `throwing` clause +also restricts matching to only those method executions that throw an exception of the +specified type (`DataAccessException`, in this case). + +| |Note that `@AfterThrowing` does not indicate a general exception handling callback.
Specifically, an `@AfterThrowing` advice method is only supposed to receive exceptions
from the join point (user-declared target method) itself but not from an accompanying`@After`/`@AfterReturning` method.| +|---|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +##### Advice + +After (finally) advice runs when a matched method execution exits. It is declared by +using the `@After` annotation. After advice must be prepared to handle both normal and +exception return conditions. It is typically used for releasing resources and similar +purposes. The following example shows how to use after finally advice: + +Java + +``` +import org.aspectj.lang.annotation.Aspect; +import org.aspectj.lang.annotation.After; + +@Aspect +public class AfterFinallyExample { + + @After("com.xyz.myapp.CommonPointcuts.dataAccessOperation()") + public void doReleaseLock() { + // ... + } +} +``` + +Kotlin + +``` +import org.aspectj.lang.annotation.Aspect +import org.aspectj.lang.annotation.After + +@Aspect +class AfterFinallyExample { + + @After("com.xyz.myapp.CommonPointcuts.dataAccessOperation()") + fun doReleaseLock() { + // ... + } +} +``` + +| |Note that `@After` advice in AspectJ is defined as "after finally advice", analogous
to a finally block in a try-catch statement. It will be invoked for any outcome,
normal return or exception thrown from the join point (user-declared target method),
in contrast to `@AfterReturning` which only applies to successful normal returns.| +|---|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +##### Around Advice + +The last kind of advice is *around* advice. Around advice runs "around" a matched +method’s execution. It has the opportunity to do work both before and after the method +runs and to determine when, how, and even if the method actually gets to run at all. +Around advice is often used if you need to share state before and after a method +execution in a thread-safe manner – for example, starting and stopping a timer. + +| |Always use the least powerful form of advice that meets your requirements.

For example, do not use *around* advice if *before* advice is sufficient for your needs.| +|---|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +Around advice is declared by annotating a method with the `@Around` annotation. The +method should declare `Object` as its return type, and the first parameter of the method +must be of type `ProceedingJoinPoint`. Within the body of the advice method, you must +invoke `proceed()` on the `ProceedingJoinPoint` in order for the underlying method to +run. Invoking `proceed()` without arguments will result in the caller’s original +arguments being supplied to the underlying method when it is invoked. For advanced use +cases, there is an overloaded variant of the `proceed()` method which accepts an array of +arguments (`Object[]`). The values in the array will be used as the arguments to the +underlying method when it is invoked. + +| |The behavior of `proceed` when called with an `Object[]` is a little different than the
behavior of `proceed` for around advice compiled by the AspectJ compiler. For around
advice written using the traditional AspectJ language, the number of arguments passed to`proceed` must match the number of arguments passed to the around advice (not the number
of arguments taken by the underlying join point), and the value passed to proceed in a
given argument position supplants the original value at the join point for the entity the
value was bound to (do not worry if this does not make sense right now).

The approach taken by Spring is simpler and a better match to its proxy-based,
execution-only semantics. You only need to be aware of this difference if you compile`@AspectJ` aspects written for Spring and use `proceed` with arguments with the AspectJ
compiler and weaver. There is a way to write such aspects that is 100% compatible across
both Spring AOP and AspectJ, and this is discussed in the[following section on advice parameters](#aop-ataspectj-advice-proceeding-with-the-call).| +|---|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +The value returned by the around advice is the return value seen by the caller of the +method. For example, a simple caching aspect could return a value from a cache if it has +one or invoke `proceed()` (and return that value) if it does not. Note that `proceed`may be invoked once, many times, or not at all within the body of the around advice. All +of these are legal. + +| |If you declare the return type of your around advice method as `void`, `null`will always be returned to the caller, effectively ignoring the result of any invocation
of `proceed()`. It is therefore recommended that an around advice method declare a return
type of `Object`. The advice method should typically return the value returned from an
invocation of `proceed()`, even if the underlying method has a `void` return type.
However, the advice may optionally return a cached value, a wrapped value, or some other
value depending on the use case.| +|---|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +The following example shows how to use around advice: + +Java + +``` +import org.aspectj.lang.annotation.Aspect; +import org.aspectj.lang.annotation.Around; +import org.aspectj.lang.ProceedingJoinPoint; + +@Aspect +public class AroundExample { + + @Around("com.xyz.myapp.CommonPointcuts.businessService()") + public Object doBasicProfiling(ProceedingJoinPoint pjp) throws Throwable { + // start stopwatch + Object retVal = pjp.proceed(); + // stop stopwatch + return retVal; + } +} +``` + +Kotlin + +``` +import org.aspectj.lang.annotation.Aspect +import org.aspectj.lang.annotation.Around +import org.aspectj.lang.ProceedingJoinPoint + +@Aspect +class AroundExample { + + @Around("com.xyz.myapp.CommonPointcuts.businessService()") + fun doBasicProfiling(pjp: ProceedingJoinPoint): Any { + // start stopwatch + val retVal = pjp.proceed() + // stop stopwatch + return retVal + } +} +``` + +##### Advice Parameters + +Spring offers fully typed advice, meaning that you declare the parameters you need in the +advice signature (as we saw earlier for the returning and throwing examples) rather than +work with `Object[]` arrays all the time. We see how to make argument and other contextual +values available to the advice body later in this section. First, we take a look at how to +write generic advice that can find out about the method the advice is currently advising. + +###### Access to the Current `JoinPoint` + +Any advice method may declare, as its first parameter, a parameter of type`org.aspectj.lang.JoinPoint`. Note that around advice is required to declare a first +parameter of type `ProceedingJoinPoint`, which is a subclass of `JoinPoint`. + +The `JoinPoint` interface provides a number of useful methods: + +* `getArgs()`: Returns the method arguments. + +* `getThis()`: Returns the proxy object. + +* `getTarget()`: Returns the target object. + +* `getSignature()`: Returns a description of the method that is being advised. + +* `toString()`: Prints a useful description of the method being advised. + +See the [javadoc](https://www.eclipse.org/aspectj/doc/released/runtime-api/org/aspectj/lang/JoinPoint.html) for more detail. + +###### Passing Parameters to Advice + +We have already seen how to bind the returned value or exception value (using after +returning and after throwing advice). To make argument values available to the advice +body, you can use the binding form of `args`. If you use a parameter name in place of a +type name in an `args` expression, the value of the corresponding argument is passed as +the parameter value when the advice is invoked. An example should make this clearer. +Suppose you want to advise the execution of DAO operations that take an `Account`object as the first parameter, and you need access to the account in the advice body. +You could write the following: + +Java + +``` +@Before("com.xyz.myapp.CommonPointcuts.dataAccessOperation() && args(account,..)") +public void validateAccount(Account account) { + // ... +} +``` + +Kotlin + +``` +@Before("com.xyz.myapp.CommonPointcuts.dataAccessOperation() && args(account,..)") +fun validateAccount(account: Account) { + // ... +} +``` + +The `args(account,..)` part of the pointcut expression serves two purposes. First, it +restricts matching to only those method executions where the method takes at least one +parameter, and the argument passed to that parameter is an instance of `Account`. +Second, it makes the actual `Account` object available to the advice through the `account`parameter. + +Another way of writing this is to declare a pointcut that "provides" the `Account`object value when it matches a join point, and then refer to the named pointcut +from the advice. This would look as follows: + +Java + +``` +@Pointcut("com.xyz.myapp.CommonPointcuts.dataAccessOperation() && args(account,..)") +private void accountDataAccessOperation(Account account) {} + +@Before("accountDataAccessOperation(account)") +public void validateAccount(Account account) { + // ... +} +``` + +Kotlin + +``` +@Pointcut("com.xyz.myapp.CommonPointcuts.dataAccessOperation() && args(account,..)") +private fun accountDataAccessOperation(account: Account) { +} + +@Before("accountDataAccessOperation(account)") +fun validateAccount(account: Account) { + // ... +} +``` + +See the AspectJ programming guide for more details. + +The proxy object (`this`), target object (`target`), and annotations (`@within`,`@target`, `@annotation`, and `@args`) can all be bound in a similar fashion. The next +two examples show how to match the execution of methods annotated with an `@Auditable`annotation and extract the audit code: + +The first of the two examples shows the definition of the `@Auditable` annotation: + +Java + +``` +@Retention(RetentionPolicy.RUNTIME) +@Target(ElementType.METHOD) +public @interface Auditable { + AuditCode value(); +} +``` + +Kotlin + +``` +@Retention(AnnotationRetention.RUNTIME) +@Target(AnnotationTarget.FUNCTION) +annotation class Auditable(val value: AuditCode) +``` + +The second of the two examples shows the advice that matches the execution of `@Auditable` methods: + +Java + +``` +@Before("com.xyz.lib.Pointcuts.anyPublicMethod() && @annotation(auditable)") +public void audit(Auditable auditable) { + AuditCode code = auditable.value(); + // ... +} +``` + +Kotlin + +``` +@Before("com.xyz.lib.Pointcuts.anyPublicMethod() && @annotation(auditable)") +fun audit(auditable: Auditable) { + val code = auditable.value() + // ... +} +``` + +###### Advice Parameters and Generics + +Spring AOP can handle generics used in class declarations and method parameters. Suppose +you have a generic type like the following: + +Java + +``` +public interface Sample { + void sampleGenericMethod(T param); + void sampleGenericCollectionMethod(Collection param); +} +``` + +Kotlin + +``` +interface Sample { + fun sampleGenericMethod(param: T) + fun sampleGenericCollectionMethod(param: Collection) +} +``` + +You can restrict interception of method types to certain parameter types by +tying the advice parameter to the parameter type for which you want to intercept the method: + +Java + +``` +@Before("execution(* ..Sample+.sampleGenericMethod(*)) && args(param)") +public void beforeSampleMethod(MyType param) { + // Advice implementation +} +``` + +Kotlin + +``` +@Before("execution(* ..Sample+.sampleGenericMethod(*)) && args(param)") +fun beforeSampleMethod(param: MyType) { + // Advice implementation +} +``` + +This approach does not work for generic collections. So you cannot define a +pointcut as follows: + +Java + +``` +@Before("execution(* ..Sample+.sampleGenericCollectionMethod(*)) && args(param)") +public void beforeSampleMethod(Collection param) { + // Advice implementation +} +``` + +Kotlin + +``` +@Before("execution(* ..Sample+.sampleGenericCollectionMethod(*)) && args(param)") +fun beforeSampleMethod(param: Collection) { + // Advice implementation +} +``` + +To make this work, we would have to inspect every element of the collection, which is not +reasonable, as we also cannot decide how to treat `null` values in general. To achieve +something similar to this, you have to type the parameter to `Collection` and manually +check the type of the elements. + +###### Determining Argument Names + +The parameter binding in advice invocations relies on matching names used in pointcut +expressions to declared parameter names in advice and pointcut method signatures. +Parameter names are not available through Java reflection, so Spring AOP uses the +following strategy to determine parameter names: + +* If the parameter names have been explicitly specified by the user, the specified + parameter names are used. Both the advice and the pointcut annotations have + an optional `argNames` attribute that you can use to specify the argument names of + the annotated method. These argument names are available at runtime. The following example + shows how to use the `argNames` attribute: + +Java + +``` +@Before(value="com.xyz.lib.Pointcuts.anyPublicMethod() && target(bean) && @annotation(auditable)", + argNames="bean,auditable") +public void audit(Object bean, Auditable auditable) { + AuditCode code = auditable.value(); + // ... use code and bean +} +``` + +Kotlin + +``` +@Before(value = "com.xyz.lib.Pointcuts.anyPublicMethod() && target(bean) && @annotation(auditable)", argNames = "bean,auditable") +fun audit(bean: Any, auditable: Auditable) { + val code = auditable.value() + // ... use code and bean +} +``` + +If the first parameter is of the `JoinPoint`, `ProceedingJoinPoint`, or`JoinPoint.StaticPart` type, you can leave out the name of the parameter from the value +of the `argNames` attribute. For example, if you modify the preceding advice to receive +the join point object, the `argNames` attribute need not include it: + +Java + +``` +@Before(value="com.xyz.lib.Pointcuts.anyPublicMethod() && target(bean) && @annotation(auditable)", + argNames="bean,auditable") +public void audit(JoinPoint jp, Object bean, Auditable auditable) { + AuditCode code = auditable.value(); + // ... use code, bean, and jp +} +``` + +Kotlin + +``` +@Before(value = "com.xyz.lib.Pointcuts.anyPublicMethod() && target(bean) && @annotation(auditable)", argNames = "bean,auditable") +fun audit(jp: JoinPoint, bean: Any, auditable: Auditable) { + val code = auditable.value() + // ... use code, bean, and jp +} +``` + +The special treatment given to the first parameter of the `JoinPoint`,`ProceedingJoinPoint`, and `JoinPoint.StaticPart` types is particularly convenient for +advice instances that do not collect any other join point context. In such situations, you may +omit the `argNames` attribute. For example, the following advice need not declare +the `argNames` attribute: + +Java + +``` +@Before("com.xyz.lib.Pointcuts.anyPublicMethod()") +public void audit(JoinPoint jp) { + // ... use jp +} +``` + +Kotlin + +``` +@Before("com.xyz.lib.Pointcuts.anyPublicMethod()") +fun audit(jp: JoinPoint) { + // ... use jp +} +``` + +* Using the `argNames` attribute is a little clumsy, so if the `argNames` attribute + has not been specified, Spring AOP looks at the debug information for the + class and tries to determine the parameter names from the local variable table. This + information is present as long as the classes have been compiled with debug + information (`-g:vars` at a minimum). The consequences of compiling with this flag + on are: (1) your code is slightly easier to understand (reverse engineer), (2) + the class file sizes are very slightly bigger (typically inconsequential), (3) the + optimization to remove unused local variables is not applied by your compiler. In + other words, you should encounter no difficulties by building with this flag on. + + | |If an @AspectJ aspect has been compiled by the AspectJ compiler (`ajc`) even
without the debug information, you need not add the `argNames` attribute, as the compiler
retain the needed information.| + |---|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +* If the code has been compiled without the necessary debug information, Spring AOP + tries to deduce the pairing of binding variables to parameters (for example, if + only one variable is bound in the pointcut expression, and the advice method + takes only one parameter, the pairing is obvious). If the binding of variables is + ambiguous given the available information, an `AmbiguousBindingException` is + thrown. + +* If all of the above strategies fail, an `IllegalArgumentException` is thrown. + +###### Proceeding with Arguments + +We remarked earlier that we would describe how to write a `proceed` call with +arguments that works consistently across Spring AOP and AspectJ. The solution is +to ensure that the advice signature binds each of the method parameters in order. +The following example shows how to do so: + +Java + +``` +@Around("execution(List find*(..)) && " + + "com.xyz.myapp.CommonPointcuts.inDataAccessLayer() && " + + "args(accountHolderNamePattern)") +public Object preProcessQueryPattern(ProceedingJoinPoint pjp, + String accountHolderNamePattern) throws Throwable { + String newPattern = preProcess(accountHolderNamePattern); + return pjp.proceed(new Object[] {newPattern}); +} +``` + +Kotlin + +``` +@Around("execution(List find*(..)) && " + + "com.xyz.myapp.CommonPointcuts.inDataAccessLayer() && " + + "args(accountHolderNamePattern)") +fun preProcessQueryPattern(pjp: ProceedingJoinPoint, + accountHolderNamePattern: String): Any { + val newPattern = preProcess(accountHolderNamePattern) + return pjp.proceed(arrayOf(newPattern)) +} +``` + +In many cases, you do this binding anyway (as in the preceding example). + +##### Advice Ordering + +What happens when multiple pieces of advice all want to run at the same join point? +Spring AOP follows the same precedence rules as AspectJ to determine the order of advice +execution. The highest precedence advice runs first "on the way in" (so, given two pieces +of before advice, the one with highest precedence runs first). "On the way out" from a +join point, the highest precedence advice runs last (so, given two pieces of after +advice, the one with the highest precedence will run second). + +When two pieces of advice defined in different aspects both need to run at the same +join point, unless you specify otherwise, the order of execution is undefined. You can +control the order of execution by specifying precedence. This is done in the normal +Spring way by either implementing the `org.springframework.core.Ordered` interface in +the aspect class or annotating it with the `@Order` annotation. Given two aspects, the +aspect returning the lower value from `Ordered.getOrder()` (or the annotation value) has +the higher precedence. + +| |Each of the distinct advice types of a particular aspect is conceptually meant to apply
to the join point directly. As a consequence, an `@AfterThrowing` advice method is not
supposed to receive an exception from an accompanying `@After`/`@AfterReturning` method.

As of Spring Framework 5.2.7, advice methods defined in the same `@Aspect` class that
need to run at the same join point are assigned precedence based on their advice type in
the following order, from highest to lowest precedence: `@Around`, `@Before`, `@After`,`@AfterReturning`, `@AfterThrowing`. Note, however, that an `@After` advice method will
effectively be invoked after any `@AfterReturning` or `@AfterThrowing` advice methods
in the same aspect, following AspectJ’s "after finally advice" semantics for `@After`.

When two pieces of the same type of advice (for example, two `@After` advice methods)
defined in the same `@Aspect` class both need to run at the same join point, the ordering
is undefined (since there is no way to retrieve the source code declaration order through
reflection for javac-compiled classes). Consider collapsing such advice methods into one
advice method per join point in each `@Aspect` class or refactor the pieces of advice into
separate `@Aspect` classes that you can order at the aspect level via `Ordered` or `@Order`.| +|---|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +#### 5.4.5. Introductions + +Introductions (known as inter-type declarations in AspectJ) enable an aspect to declare +that advised objects implement a given interface, and to provide an implementation of +that interface on behalf of those objects. + +You can make an introduction by using the `@DeclareParents` annotation. This annotation +is used to declare that matching types have a new parent (hence the name). For example, +given an interface named `UsageTracked` and an implementation of that interface named`DefaultUsageTracked`, the following aspect declares that all implementors of service +interfaces also implement the `UsageTracked` interface (e.g. for statistics via JMX): + +Java + +``` +@Aspect +public class UsageTracking { + + @DeclareParents(value="com.xzy.myapp.service.*+", defaultImpl=DefaultUsageTracked.class) + public static UsageTracked mixin; + + @Before("com.xyz.myapp.CommonPointcuts.businessService() && this(usageTracked)") + public void recordUsage(UsageTracked usageTracked) { + usageTracked.incrementUseCount(); + } + +} +``` + +Kotlin + +``` +@Aspect +class UsageTracking { + + companion object { + @DeclareParents(value = "com.xzy.myapp.service.*+", defaultImpl = DefaultUsageTracked::class) + lateinit var mixin: UsageTracked + } + + @Before("com.xyz.myapp.CommonPointcuts.businessService() && this(usageTracked)") + fun recordUsage(usageTracked: UsageTracked) { + usageTracked.incrementUseCount() + } +} +``` + +The interface to be implemented is determined by the type of the annotated field. The`value` attribute of the `@DeclareParents` annotation is an AspectJ type pattern. Any +bean of a matching type implements the `UsageTracked` interface. Note that, in the +before advice of the preceding example, service beans can be directly used as +implementations of the `UsageTracked` interface. If accessing a bean programmatically, +you would write the following: + +Java + +``` +UsageTracked usageTracked = (UsageTracked) context.getBean("myService"); +``` + +Kotlin + +``` +val usageTracked = context.getBean("myService") as UsageTracked +``` + +#### 5.4.6. Aspect Instantiation Models + +| |This is an advanced topic. If you are just starting out with AOP, you can safely skip
it until later.| +|---|---------------------------------------------------------------------------------------------------------| + +By default, there is a single instance of each aspect within the application +context. AspectJ calls this the singleton instantiation model. It is possible to define +aspects with alternate lifecycles. Spring supports AspectJ’s `perthis` and `pertarget`instantiation models; `percflow`, `percflowbelow`, and `pertypewithin` are not currently +supported. + +You can declare a `perthis` aspect by specifying a `perthis` clause in the `@Aspect`annotation. Consider the following example: + +Java + +``` +@Aspect("perthis(com.xyz.myapp.CommonPointcuts.businessService())") +public class MyAspect { + + private int someState; + + @Before("com.xyz.myapp.CommonPointcuts.businessService()") + public void recordServiceUsage() { + // ... + } +} +``` + +Kotlin + +``` +@Aspect("perthis(com.xyz.myapp.CommonPointcuts.businessService())") +class MyAspect { + + private val someState: Int = 0 + + @Before("com.xyz.myapp.CommonPointcuts.businessService()") + fun recordServiceUsage() { + // ... + } +} +``` + +In the preceding example, the effect of the `perthis` clause is that one aspect instance +is created for each unique service object that performs a business service (each unique +object bound to `this` at join points matched by the pointcut expression). The aspect +instance is created the first time that a method is invoked on the service object. The +aspect goes out of scope when the service object goes out of scope. Before the aspect +instance is created, none of the advice within it runs. As soon as the aspect instance +has been created, the advice declared within it runs at matched join points, but only +when the service object is the one with which this aspect is associated. See the AspectJ +Programming Guide for more information on `per` clauses. + +The `pertarget` instantiation model works in exactly the same way as `perthis`, but it +creates one aspect instance for each unique target object at matched join points. + +#### 5.4.7. An AOP Example + +Now that you have seen how all the constituent parts work, we can put them together to do +something useful. + +The execution of business services can sometimes fail due to concurrency issues (for +example, a deadlock loser). If the operation is retried, it is likely to succeed +on the next try. For business services where it is appropriate to retry in such +conditions (idempotent operations that do not need to go back to the user for conflict +resolution), we want to transparently retry the operation to avoid the client seeing a`PessimisticLockingFailureException`. This is a requirement that clearly cuts across +multiple services in the service layer and, hence, is ideal for implementing through an +aspect. + +Because we want to retry the operation, we need to use around advice so that we can +call `proceed` multiple times. The following listing shows the basic aspect implementation: + +Java + +``` +@Aspect +public class ConcurrentOperationExecutor implements Ordered { + + private static final int DEFAULT_MAX_RETRIES = 2; + + private int maxRetries = DEFAULT_MAX_RETRIES; + private int order = 1; + + public void setMaxRetries(int maxRetries) { + this.maxRetries = maxRetries; + } + + public int getOrder() { + return this.order; + } + + public void setOrder(int order) { + this.order = order; + } + + @Around("com.xyz.myapp.CommonPointcuts.businessService()") + public Object doConcurrentOperation(ProceedingJoinPoint pjp) throws Throwable { + int numAttempts = 0; + PessimisticLockingFailureException lockFailureException; + do { + numAttempts++; + try { + return pjp.proceed(); + } + catch(PessimisticLockingFailureException ex) { + lockFailureException = ex; + } + } while(numAttempts <= this.maxRetries); + throw lockFailureException; + } +} +``` + +Kotlin + +``` +@Aspect +class ConcurrentOperationExecutor : Ordered { + + private val DEFAULT_MAX_RETRIES = 2 + private var maxRetries = DEFAULT_MAX_RETRIES + private var order = 1 + + fun setMaxRetries(maxRetries: Int) { + this.maxRetries = maxRetries + } + + override fun getOrder(): Int { + return this.order + } + + fun setOrder(order: Int) { + this.order = order + } + + @Around("com.xyz.myapp.CommonPointcuts.businessService()") + fun doConcurrentOperation(pjp: ProceedingJoinPoint): Any { + var numAttempts = 0 + var lockFailureException: PessimisticLockingFailureException + do { + numAttempts++ + try { + return pjp.proceed() + } catch (ex: PessimisticLockingFailureException) { + lockFailureException = ex + } + + } while (numAttempts <= this.maxRetries) + throw lockFailureException + } +} +``` + +Note that the aspect implements the `Ordered` interface so that we can set the precedence of +the aspect higher than the transaction advice (we want a fresh transaction each time we +retry). The `maxRetries` and `order` properties are both configured by Spring. The +main action happens in the `doConcurrentOperation` around advice. Notice that, for the +moment, we apply the retry logic to each `businessService()`. We try to proceed, +and if we fail with a `PessimisticLockingFailureException`, we try again, unless +we have exhausted all of our retry attempts. + +The corresponding Spring configuration follows: + +``` + + + + + + +``` + +To refine the aspect so that it retries only idempotent operations, we might define the following`Idempotent` annotation: + +Java + +``` +@Retention(RetentionPolicy.RUNTIME) +public @interface Idempotent { + // marker annotation +} +``` + +Kotlin + +``` +@Retention(AnnotationRetention.RUNTIME) +annotation class Idempotent// marker annotation +``` + +We can then use the annotation to annotate the implementation of service operations. The change +to the aspect to retry only idempotent operations involves refining the pointcut +expression so that only `@Idempotent` operations match, as follows: + +Java + +``` +@Around("com.xyz.myapp.CommonPointcuts.businessService() && " + + "@annotation(com.xyz.myapp.service.Idempotent)") +public Object doConcurrentOperation(ProceedingJoinPoint pjp) throws Throwable { + // ... +} +``` + +Kotlin + +``` +@Around("com.xyz.myapp.CommonPointcuts.businessService() && " + + "@annotation(com.xyz.myapp.service.Idempotent)") +fun doConcurrentOperation(pjp: ProceedingJoinPoint): Any { + // ... +} +``` + +### 5.5. Schema-based AOP Support + +If you prefer an XML-based format, Spring also offers support for defining aspects +using the `aop` namespace tags. The exact same pointcut expressions and advice kinds +as when using the @AspectJ style are supported. Hence, in this section we focus on +that syntax and refer the reader to the discussion in the previous section +([@AspectJ support](#aop-ataspectj)) for an understanding of writing pointcut expressions and the binding +of advice parameters. + +To use the aop namespace tags described in this section, you need to import the`spring-aop` schema, as described in [XML Schema-based configuration](#xsd-schemas). See [the AOP schema](#xsd-schemas-aop)for how to import the tags in the `aop` namespace. + +Within your Spring configurations, all aspect and advisor elements must be placed within +an `` element (you can have more than one `` element in an +application context configuration). An `` element can contain pointcut, +advisor, and aspect elements (note that these must be declared in that order). + +| |The `` style of configuration makes heavy use of Spring’s[auto-proxying](#aop-autoproxy) mechanism. This can cause issues (such as advice
not being woven) if you already use explicit auto-proxying through the use of`BeanNameAutoProxyCreator` or something similar. The recommended usage pattern is to
use either only the `` style or only the `AutoProxyCreator` style and
never mix them.| +|---|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +#### 5.5.1. Declaring an Aspect + +When you use the schema support, an aspect is a regular Java object defined as a bean in +your Spring application context. The state and behavior are captured in the fields and +methods of the object, and the pointcut and advice information are captured in the XML. + +You can declare an aspect by using the `` element, and reference the backing bean +by using the `ref` attribute, as the following example shows: + +``` + + + ... + + + + + ... + +``` + +The bean that backs the aspect (`aBean` in this case) can of course be configured and +dependency injected just like any other Spring bean. + +#### 5.5.2. Declaring a Pointcut + +You can declare a named pointcut inside an `` element, letting the pointcut +definition be shared across several aspects and advisors. + +A pointcut that represents the execution of any business service in the service layer can +be defined as follows: + +``` + + + + + +``` + +Note that the pointcut expression itself is using the same AspectJ pointcut expression +language as described in [@AspectJ support](#aop-ataspectj). If you use the schema based declaration +style, you can refer to named pointcuts defined in types (@Aspects) within the +pointcut expression. Another way of defining the above pointcut would be as follows: + +``` + + + + + +``` + +Assume that you have a `CommonPointcuts` aspect as described in [Sharing Common Pointcut Definitions](#aop-common-pointcuts). + +Then declaring a pointcut inside an aspect is very similar to declaring a top-level pointcut, +as the following example shows: + +``` + + + + + + + ... + + + +``` + +In much the same way as an @AspectJ aspect, pointcuts declared by using the schema based +definition style can collect join point context. For example, the following pointcut +collects the `this` object as the join point context and passes it to the advice: + +``` + + + + + + + + + ... + + + +``` + +The advice must be declared to receive the collected join point context by including +parameters of the matching names, as follows: + +Java + +``` +public void monitor(Object service) { + // ... +} +``` + +Kotlin + +``` +fun monitor(service: Any) { + // ... +} +``` + +When combining pointcut sub-expressions, `&&` is awkward within an XML +document, so you can use the `and`, `or`, and `not` keywords in place of `&&`,`||`, and `!`, respectively. For example, the previous pointcut can be better written as +follows: + +``` + + + + + + + + + ... + + +``` + +Note that pointcuts defined in this way are referred to by their XML `id` and cannot be +used as named pointcuts to form composite pointcuts. The named pointcut support in the +schema-based definition style is thus more limited than that offered by the @AspectJ +style. + +#### 5.5.3. Declaring Advice + +The schema-based AOP support uses the same five kinds of advice as the @AspectJ style, and they have +exactly the same semantics. + +##### Before Advice + +Before advice runs before a matched method execution. It is declared inside an`` by using the `` element, as the following example shows: + +``` + + + + + ... + + +``` + +Here, `dataAccessOperation` is the `id` of a pointcut defined at the top (``) +level. To define the pointcut inline instead, replace the `pointcut-ref` attribute with +a `pointcut` attribute, as follows: + +``` + + + + + ... + +``` + +As we noted in the discussion of the @AspectJ style, using named pointcuts can +significantly improve the readability of your code. + +The `method` attribute identifies a method (`doAccessCheck`) that provides the body of +the advice. This method must be defined for the bean referenced by the aspect element +that contains the advice. Before a data access operation is performed (a method execution +join point matched by the pointcut expression), the `doAccessCheck` method on the aspect +bean is invoked. + +##### After Returning Advice + +After returning advice runs when a matched method execution completes normally. It is +declared inside an `` in the same way as before advice. The following example +shows how to declare it: + +``` + + + + + ... + +``` + +As in the @AspectJ style, you can get the return value within the advice body. +To do so, use the `returning` attribute to specify the name of the parameter to which +the return value should be passed, as the following example shows: + +``` + + + + + ... + +``` + +The `doAccessCheck` method must declare a parameter named `retVal`. The type of this +parameter constrains matching in the same way as described for `@AfterReturning`. For +example, you can declare the method signature as follows: + +Java + +``` +public void doAccessCheck(Object retVal) {... +``` + +Kotlin + +``` +fun doAccessCheck(retVal: Any) {... +``` + +##### After Throwing Advice + +After throwing advice runs when a matched method execution exits by throwing an +exception. It is declared inside an `` by using the `after-throwing` element, +as the following example shows: + +``` + + + + + ... + +``` + +As in the @AspectJ style, you can get the thrown exception within the advice body. +To do so, use the `throwing` attribute to specify the name of the parameter to +which the exception should be passed as the following example shows: + +``` + + + + + ... + +``` + +The `doRecoveryActions` method must declare a parameter named `dataAccessEx`. +The type of this parameter constrains matching in the same way as described for`@AfterThrowing`. For example, the method signature may be declared as follows: + +Java + +``` +public void doRecoveryActions(DataAccessException dataAccessEx) {... +``` + +Kotlin + +``` +fun doRecoveryActions(dataAccessEx: DataAccessException) {... +``` + +##### After (Finally) Advice + +After (finally) advice runs no matter how a matched method execution exits. +You can declare it by using the `after` element, as the following example shows: + +``` + + + + + ... + +``` + +##### Around Advice + +The last kind of advice is *around* advice. Around advice runs "around" a matched +method’s execution. It has the opportunity to do work both before and after the method +runs and to determine when, how, and even if the method actually gets to run at all. +Around advice is often used if you need to share state before and after a method +execution in a thread-safe manner – for example, starting and stopping a timer. + +| |Always use the least powerful form of advice that meets your requirements.

For example, do not use *around* advice if *before* advice is sufficient for your needs.| +|---|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +You can declare around advice by using the `aop:around` element. The advice method should +declare `Object` as its return type, and the first parameter of the method must be of +type `ProceedingJoinPoint`. Within the body of the advice method, you must invoke`proceed()` on the `ProceedingJoinPoint` in order for the underlying method to run. +Invoking `proceed()` without arguments will result in the caller’s original arguments +being supplied to the underlying method when it is invoked. For advanced use cases, there +is an overloaded variant of the `proceed()` method which accepts an array of arguments +(`Object[]`). The values in the array will be used as the arguments to the underlying +method when it is invoked. See [Around Advice](#aop-ataspectj-around-advice) for notes on calling`proceed` with an `Object[]`. + +The following example shows how to declare around advice in XML: + +``` + + + + + ... + +``` + +The implementation of the `doBasicProfiling` advice can be exactly the same as in the +@AspectJ example (minus the annotation, of course), as the following example shows: + +Java + +``` +public Object doBasicProfiling(ProceedingJoinPoint pjp) throws Throwable { + // start stopwatch + Object retVal = pjp.proceed(); + // stop stopwatch + return retVal; +} +``` + +Kotlin + +``` +fun doBasicProfiling(pjp: ProceedingJoinPoint): Any { + // start stopwatch + val retVal = pjp.proceed() + // stop stopwatch + return pjp.proceed() +} +``` + +##### Advice Parameters + +The schema-based declaration style supports fully typed advice in the same way as +described for the @AspectJ support — by matching pointcut parameters by name against +advice method parameters. See [Advice Parameters](#aop-ataspectj-advice-params) for details. If you wish +to explicitly specify argument names for the advice methods (not relying on the +detection strategies previously described), you can do so by using the `arg-names`attribute of the advice element, which is treated in the same manner as the `argNames`attribute in an advice annotation (as described in [Determining Argument Names](#aop-ataspectj-advice-params-names)). +The following example shows how to specify an argument name in XML: + +``` + +``` + +The `arg-names` attribute accepts a comma-delimited list of parameter names. + +The following slightly more involved example of the XSD-based approach shows +some around advice used in conjunction with a number of strongly typed parameters: + +Java + +``` +package x.y.service; + +public interface PersonService { + + Person getPerson(String personName, int age); +} + +public class DefaultPersonService implements PersonService { + + public Person getPerson(String name, int age) { + return new Person(name, age); + } +} +``` + +Kotlin + +``` +package x.y.service + +interface PersonService { + + fun getPerson(personName: String, age: Int): Person +} + +class DefaultPersonService : PersonService { + + fun getPerson(name: String, age: Int): Person { + return Person(name, age) + } +} +``` + +Next up is the aspect. Notice the fact that the `profile(..)` method accepts a number of +strongly-typed parameters, the first of which happens to be the join point used to +proceed with the method call. The presence of this parameter is an indication that the`profile(..)` is to be used as `around` advice, as the following example shows: + +Java + +``` +package x.y; + +import org.aspectj.lang.ProceedingJoinPoint; +import org.springframework.util.StopWatch; + +public class SimpleProfiler { + + public Object profile(ProceedingJoinPoint call, String name, int age) throws Throwable { + StopWatch clock = new StopWatch("Profiling for '" + name + "' and '" + age + "'"); + try { + clock.start(call.toShortString()); + return call.proceed(); + } finally { + clock.stop(); + System.out.println(clock.prettyPrint()); + } + } +} +``` + +Kotlin + +``` +import org.aspectj.lang.ProceedingJoinPoint +import org.springframework.util.StopWatch + +class SimpleProfiler { + + fun profile(call: ProceedingJoinPoint, name: String, age: Int): Any { + val clock = StopWatch("Profiling for '$name' and '$age'") + try { + clock.start(call.toShortString()) + return call.proceed() + } finally { + clock.stop() + println(clock.prettyPrint()) + } + } +} +``` + +Finally, the following example XML configuration effects the execution of the +preceding advice for a particular join point: + +``` + + + + + + + + + + + + + + + + + + + +``` + +Consider the following driver script: + +Java + +``` +import org.springframework.beans.factory.BeanFactory; +import org.springframework.context.support.ClassPathXmlApplicationContext; +import x.y.service.PersonService; + +public final class Boot { + + public static void main(final String[] args) throws Exception { + BeanFactory ctx = new ClassPathXmlApplicationContext("x/y/plain.xml"); + PersonService person = (PersonService) ctx.getBean("personService"); + person.getPerson("Pengo", 12); + } +} +``` + +Kotlin + +``` +fun main() { + val ctx = ClassPathXmlApplicationContext("x/y/plain.xml") + val person = ctx.getBean("personService") as PersonService + person.getPerson("Pengo", 12) +} +``` + +With such a Boot class, we would get output similar to the following on standard output: + +``` +StopWatch 'Profiling for 'Pengo' and '12': running time (millis) = 0 +----------------------------------------- +ms % Task name +----------------------------------------- +00000 ? execution(getFoo) +``` + +##### Advice Ordering + +When multiple pieces of advice need to run at the same join point (executing method) +the ordering rules are as described in [Advice Ordering](#aop-ataspectj-advice-ordering). The precedence +between aspects is determined via the `order` attribute in the `` element or +by either adding the `@Order` annotation to the bean that backs the aspect or by having +the bean implement the `Ordered` interface. + +| |In contrast to the precedence rules for advice methods defined in the same `@Aspect`class, when two pieces of advice defined in the same `` element both need to
run at the same join point, the precedence is determined by the order in which the advice
elements are declared within the enclosing `` element, from highest to lowest
precedence.

For example, given an `around` advice and a `before` advice defined in the same`` element that apply to the same join point, to ensure that the `around`advice has higher precedence than the `before` advice, the `` element must be
declared before the `` element.

As a general rule of thumb, if you find that you have multiple pieces of advice defined
in the same `` element that apply to the same join point, consider collapsing
such advice methods into one advice method per join point in each `` element
or refactor the pieces of advice into separate `` elements that you can order
at the aspect level.| +|---|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +#### 5.5.4. Introductions + +Introductions (known as inter-type declarations in AspectJ) let an aspect declare +that advised objects implement a given interface and provide an implementation of +that interface on behalf of those objects. + +You can make an introduction by using the `aop:declare-parents` element inside an `aop:aspect`. +You can use the `aop:declare-parents` element to declare that matching types have a new parent (hence the name). +For example, given an interface named `UsageTracked` and an implementation of that interface named`DefaultUsageTracked`, the following aspect declares that all implementors of service +interfaces also implement the `UsageTracked` interface. (In order to expose statistics +through JMX for example.) + +``` + + + + + + + +``` + +The class that backs the `usageTracking` bean would then contain the following method: + +Java + +``` +public void recordUsage(UsageTracked usageTracked) { + usageTracked.incrementUseCount(); +} +``` + +Kotlin + +``` +fun recordUsage(usageTracked: UsageTracked) { + usageTracked.incrementUseCount() +} +``` + +The interface to be implemented is determined by the `implement-interface` attribute. The +value of the `types-matching` attribute is an AspectJ type pattern. Any bean of a +matching type implements the `UsageTracked` interface. Note that, in the before +advice of the preceding example, service beans can be directly used as implementations of +the `UsageTracked` interface. To access a bean programmatically, you could write the +following: + +Java + +``` +UsageTracked usageTracked = (UsageTracked) context.getBean("myService"); +``` + +Kotlin + +``` +val usageTracked = context.getBean("myService") as UsageTracked +``` + +#### 5.5.5. Aspect Instantiation Models + +The only supported instantiation model for schema-defined aspects is the singleton +model. Other instantiation models may be supported in future releases. + +#### 5.5.6. Advisors + +The concept of “advisors” comes from the AOP support defined in Spring +and does not have a direct equivalent in AspectJ. An advisor is like a small +self-contained aspect that has a single piece of advice. The advice itself is +represented by a bean and must implement one of the advice interfaces described in[Advice Types in Spring](#aop-api-advice-types). Advisors can take advantage of AspectJ pointcut expressions. + +Spring supports the advisor concept with the `` element. You most +commonly see it used in conjunction with transactional advice, which also has its own +namespace support in Spring. The following example shows an advisor: + +``` + + + + + + + + + + + + + +``` + +As well as the `pointcut-ref` attribute used in the preceding example, you can also use the`pointcut` attribute to define a pointcut expression inline. + +To define the precedence of an advisor so that the advice can participate in ordering, +use the `order` attribute to define the `Ordered` value of the advisor. + +#### 5.5.7. An AOP Schema Example + +This section shows how the concurrent locking failure retry example from[An AOP Example](#aop-ataspectj-example) looks when rewritten with the schema support. + +The execution of business services can sometimes fail due to concurrency issues (for +example, a deadlock loser). If the operation is retried, it is likely to succeed +on the next try. For business services where it is appropriate to retry in such +conditions (idempotent operations that do not need to go back to the user for conflict +resolution), we want to transparently retry the operation to avoid the client seeing a`PessimisticLockingFailureException`. This is a requirement that clearly cuts across +multiple services in the service layer and, hence, is ideal for implementing through an +aspect. + +Because we want to retry the operation, we need to use around advice so that we can +call `proceed` multiple times. The following listing shows the basic aspect implementation +(which is a regular Java class that uses the schema support): + +Java + +``` +public class ConcurrentOperationExecutor implements Ordered { + + private static final int DEFAULT_MAX_RETRIES = 2; + + private int maxRetries = DEFAULT_MAX_RETRIES; + private int order = 1; + + public void setMaxRetries(int maxRetries) { + this.maxRetries = maxRetries; + } + + public int getOrder() { + return this.order; + } + + public void setOrder(int order) { + this.order = order; + } + + public Object doConcurrentOperation(ProceedingJoinPoint pjp) throws Throwable { + int numAttempts = 0; + PessimisticLockingFailureException lockFailureException; + do { + numAttempts++; + try { + return pjp.proceed(); + } + catch(PessimisticLockingFailureException ex) { + lockFailureException = ex; + } + } while(numAttempts <= this.maxRetries); + throw lockFailureException; + } +} +``` + +Kotlin + +``` +class ConcurrentOperationExecutor : Ordered { + + private val DEFAULT_MAX_RETRIES = 2 + + private var maxRetries = DEFAULT_MAX_RETRIES + private var order = 1 + + fun setMaxRetries(maxRetries: Int) { + this.maxRetries = maxRetries + } + + override fun getOrder(): Int { + return this.order + } + + fun setOrder(order: Int) { + this.order = order + } + + fun doConcurrentOperation(pjp: ProceedingJoinPoint): Any { + var numAttempts = 0 + var lockFailureException: PessimisticLockingFailureException + do { + numAttempts++ + try { + return pjp.proceed() + } catch (ex: PessimisticLockingFailureException) { + lockFailureException = ex + } + + } while (numAttempts <= this.maxRetries) + throw lockFailureException + } +} +``` + +Note that the aspect implements the `Ordered` interface so that we can set the precedence of +the aspect higher than the transaction advice (we want a fresh transaction each time we +retry). The `maxRetries` and `order` properties are both configured by Spring. The +main action happens in the `doConcurrentOperation` around advice method. We try to +proceed. If we fail with a `PessimisticLockingFailureException`, we try again, +unless we have exhausted all of our retry attempts. + +| |This class is identical to the one used in the @AspectJ example, but with the
annotations removed.| +|---|------------------------------------------------------------------------------------------------------| + +The corresponding Spring configuration is as follows: + +``` + + + + + + + + + + + + + + + + +``` + +Notice that, for the time being, we assume that all business services are idempotent. If +this is not the case, we can refine the aspect so that it retries only genuinely +idempotent operations, by introducing an `Idempotent` annotation and using the annotation +to annotate the implementation of service operations, as the following example shows: + +Java + +``` +@Retention(RetentionPolicy.RUNTIME) +public @interface Idempotent { + // marker annotation +} +``` + +Kotlin + +``` +@Retention(AnnotationRetention.RUNTIME) +annotation class Idempotent { + // marker annotation +} +``` + +The +change to the aspect to retry only idempotent operations involves refining the +pointcut expression so that only `@Idempotent` operations match, as follows: + +``` + +``` + +### 5.6. Choosing which AOP Declaration Style to Use + +Once you have decided that an aspect is the best approach for implementing a given +requirement, how do you decide between using Spring AOP or AspectJ and between the +Aspect language (code) style, the @AspectJ annotation style, or the Spring XML style? These +decisions are influenced by a number of factors including application requirements, +development tools, and team familiarity with AOP. + +#### 5.6.1. Spring AOP or Full AspectJ? + +Use the simplest thing that can work. Spring AOP is simpler than using full AspectJ, as +there is no requirement to introduce the AspectJ compiler / weaver into your development +and build processes. If you only need to advise the execution of operations on Spring +beans, Spring AOP is the right choice. If you need to advise objects not managed by +the Spring container (such as domain objects, typically), you need to use +AspectJ. You also need to use AspectJ if you wish to advise join points other than +simple method executions (for example, field get or set join points and so on). + +When you use AspectJ, you have the choice of the AspectJ language syntax (also known as +the “code style”) or the @AspectJ annotation style. Clearly, if you do not use Java +5+, the choice has been made for you: Use the code style. If aspects play a large +role in your design, and you are able to use the [AspectJ +Development Tools (AJDT)](https://www.eclipse.org/ajdt/) plugin for Eclipse, the AspectJ language syntax is the +preferred option. It is cleaner and simpler because the language was purposefully +designed for writing aspects. If you do not use Eclipse or have only a few aspects +that do not play a major role in your application, you may want to consider using +the @AspectJ style, sticking with regular Java compilation in your IDE, and adding +an aspect weaving phase to your build script. + +#### 5.6.2. @AspectJ or XML for Spring AOP? + +If you have chosen to use Spring AOP, you have a choice of @AspectJ or XML style. +There are various tradeoffs to consider. + +The XML style may be most familiar to existing Spring users, and it is backed by genuine +POJOs. When using AOP as a tool to configure enterprise services, XML can be a good +choice (a good test is whether you consider the pointcut expression to be a part of your +configuration that you might want to change independently). With the XML style, it is +arguably clearer from your configuration which aspects are present in the system. + +The XML style has two disadvantages. First, it does not fully encapsulate the +implementation of the requirement it addresses in a single place. The DRY principle says +that there should be a single, unambiguous, authoritative representation of any piece of +knowledge within a system. When using the XML style, the knowledge of how a requirement +is implemented is split across the declaration of the backing bean class and the XML in +the configuration file. When you use the @AspectJ style, this information is encapsulated +in a single module: the aspect. Secondly, the XML style is slightly more limited in what +it can express than the @AspectJ style: Only the “singleton” aspect instantiation model +is supported, and it is not possible to combine named pointcuts declared in XML. +For example, in the @AspectJ style you can write something like the following: + +Java + +``` +@Pointcut("execution(* get*())") +public void propertyAccess() {} + +@Pointcut("execution(org.xyz.Account+ *(..))") +public void operationReturningAnAccount() {} + +@Pointcut("propertyAccess() && operationReturningAnAccount()") +public void accountPropertyAccess() {} +``` + +Kotlin + +``` +@Pointcut("execution(* get*())") +fun propertyAccess() {} + +@Pointcut("execution(org.xyz.Account+ *(..))") +fun operationReturningAnAccount() {} + +@Pointcut("propertyAccess() && operationReturningAnAccount()") +fun accountPropertyAccess() {} +``` + +In the XML style you can declare the first two pointcuts: + +``` + + + +``` + +The downside of the XML approach is that you cannot define the`accountPropertyAccess` pointcut by combining these definitions. + +The @AspectJ style supports additional instantiation models and richer pointcut +composition. It has the advantage of keeping the aspect as a modular unit. It also has +the advantage that the @AspectJ aspects can be understood (and thus consumed) both by +Spring AOP and by AspectJ. So, if you later decide you need the capabilities of AspectJ +to implement additional requirements, you can easily migrate to a classic AspectJ setup. +On balance, the Spring team prefers the @AspectJ style for custom aspects beyond simple +configuration of enterprise services. + +### 5.7. Mixing Aspect Types + +It is perfectly possible to mix @AspectJ style aspects by using the auto-proxying support, +schema-defined `` aspects, `` declared advisors, and even proxies +and interceptors in other styles in the same configuration. All of these are implemented +by using the same underlying support mechanism and can co-exist without any difficulty. + +### 5.8. Proxying Mechanisms + +Spring AOP uses either JDK dynamic proxies or CGLIB to create the proxy for a given +target object. JDK dynamic proxies are built into the JDK, whereas CGLIB is a common +open-source class definition library (repackaged into `spring-core`). + +If the target object to be proxied implements at least one interface, a JDK dynamic +proxy is used. All of the interfaces implemented by the target type are proxied. +If the target object does not implement any interfaces, a CGLIB proxy is created. + +If you want to force the use of CGLIB proxying (for example, to proxy every method +defined for the target object, not only those implemented by its interfaces), +you can do so. However, you should consider the following issues: + +* With CGLIB, `final` methods cannot be advised, as they cannot be overridden in + runtime-generated subclasses. + +* As of Spring 4.0, the constructor of your proxied object is NOT called twice anymore, + since the CGLIB proxy instance is created through Objenesis. Only if your JVM does + not allow for constructor bypassing, you might see double invocations and + corresponding debug log entries from Spring’s AOP support. + +To force the use of CGLIB proxies, set the value of the `proxy-target-class` attribute +of the `` element to true, as follows: + +``` + + + +``` + +To force CGLIB proxying when you use the @AspectJ auto-proxy support, set the`proxy-target-class` attribute of the `` element to `true`, +as follows: + +``` + +``` + +| |Multiple `` sections are collapsed into a single unified auto-proxy creator
at runtime, which applies the *strongest* proxy settings that any of the`` sections (typically from different XML bean definition files) specified.
This also applies to the `` and ``elements.

To be clear, using `proxy-target-class="true"` on ``,``, or `` elements forces the use of CGLIB
proxies *for all three of them*.| +|---|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +#### 5.8.1. Understanding AOP Proxies + +Spring AOP is proxy-based. It is vitally important that you grasp the semantics of +what that last statement actually means before you write your own aspects or use any of +the Spring AOP-based aspects supplied with the Spring Framework. + +Consider first the scenario where you have a plain-vanilla, un-proxied, +nothing-special-about-it, straight object reference, as the following +code snippet shows: + +Java + +``` +public class SimplePojo implements Pojo { + + public void foo() { + // this next method invocation is a direct call on the 'this' reference + this.bar(); + } + + public void bar() { + // some logic... + } +} +``` + +Kotlin + +``` +class SimplePojo : Pojo { + + fun foo() { + // this next method invocation is a direct call on the 'this' reference + this.bar() + } + + fun bar() { + // some logic... + } +} +``` + +If you invoke a method on an object reference, the method is invoked directly on +that object reference, as the following image and listing show: + +![aop proxy plain pojo call](images/aop-proxy-plain-pojo-call.png) + +Java + +``` +public class Main { + + public static void main(String[] args) { + Pojo pojo = new SimplePojo(); + // this is a direct method call on the 'pojo' reference + pojo.foo(); + } +} +``` + +Kotlin + +``` +fun main() { + val pojo = SimplePojo() + // this is a direct method call on the 'pojo' reference + pojo.foo() +} +``` + +Things change slightly when the reference that client code has is a proxy. Consider the +following diagram and code snippet: + +![aop proxy call](images/aop-proxy-call.png) + +Java + +``` +public class Main { + + public static void main(String[] args) { + ProxyFactory factory = new ProxyFactory(new SimplePojo()); + factory.addInterface(Pojo.class); + factory.addAdvice(new RetryAdvice()); + + Pojo pojo = (Pojo) factory.getProxy(); + // this is a method call on the proxy! + pojo.foo(); + } +} +``` + +Kotlin + +``` +fun main() { + val factory = ProxyFactory(SimplePojo()) + factory.addInterface(Pojo::class.java) + factory.addAdvice(RetryAdvice()) + + val pojo = factory.proxy as Pojo + // this is a method call on the proxy! + pojo.foo() +} +``` + +The key thing to understand here is that the client code inside the `main(..)` method +of the `Main` class has a reference to the proxy. This means that method calls on that +object reference are calls on the proxy. As a result, the proxy can delegate to all of +the interceptors (advice) that are relevant to that particular method call. However, +once the call has finally reached the target object (the `SimplePojo` reference in +this case), any method calls that it may make on itself, such as `this.bar()` or`this.foo()`, are going to be invoked against the `this` reference, and not the proxy. +This has important implications. It means that self-invocation is not going to result +in the advice associated with a method invocation getting a chance to run. + +Okay, so what is to be done about this? The best approach (the term "best" is used +loosely here) is to refactor your code such that the self-invocation does not happen. +This does entail some work on your part, but it is the best, least-invasive approach. +The next approach is absolutely horrendous, and we hesitate to point it out, precisely +because it is so horrendous. You can (painful as it is to us) totally tie the logic +within your class to Spring AOP, as the following example shows: + +Java + +``` +public class SimplePojo implements Pojo { + + public void foo() { + // this works, but... gah! + ((Pojo) AopContext.currentProxy()).bar(); + } + + public void bar() { + // some logic... + } +} +``` + +Kotlin + +``` +class SimplePojo : Pojo { + + fun foo() { + // this works, but... gah! + (AopContext.currentProxy() as Pojo).bar() + } + + fun bar() { + // some logic... + } +} +``` + +This totally couples your code to Spring AOP, and it makes the class itself aware of +the fact that it is being used in an AOP context, which flies in the face of AOP. It +also requires some additional configuration when the proxy is being created, as the +following example shows: + +Java + +``` +public class Main { + + public static void main(String[] args) { + ProxyFactory factory = new ProxyFactory(new SimplePojo()); + factory.addInterface(Pojo.class); + factory.addAdvice(new RetryAdvice()); + factory.setExposeProxy(true); + + Pojo pojo = (Pojo) factory.getProxy(); + // this is a method call on the proxy! + pojo.foo(); + } +} +``` + +Kotlin + +``` +fun main() { + val factory = ProxyFactory(SimplePojo()) + factory.addInterface(Pojo::class.java) + factory.addAdvice(RetryAdvice()) + factory.isExposeProxy = true + + val pojo = factory.proxy as Pojo + // this is a method call on the proxy! + pojo.foo() +} +``` + +Finally, it must be noted that AspectJ does not have this self-invocation issue because +it is not a proxy-based AOP framework. + +### 5.9. Programmatic Creation of @AspectJ Proxies + +In addition to declaring aspects in your configuration by using either ``or ``, it is also possible to programmatically create proxies +that advise target objects. For the full details of Spring’s AOP API, see the[next chapter](#aop-api). Here, we want to focus on the ability to automatically +create proxies by using @AspectJ aspects. + +You can use the `org.springframework.aop.aspectj.annotation.AspectJProxyFactory` class +to create a proxy for a target object that is advised by one or more @AspectJ aspects. +The basic usage for this class is very simple, as the following example shows: + +Java + +``` +// create a factory that can generate a proxy for the given target object +AspectJProxyFactory factory = new AspectJProxyFactory(targetObject); + +// add an aspect, the class must be an @AspectJ aspect +// you can call this as many times as you need with different aspects +factory.addAspect(SecurityManager.class); + +// you can also add existing aspect instances, the type of the object supplied must be an @AspectJ aspect +factory.addAspect(usageTracker); + +// now get the proxy object... +MyInterfaceType proxy = factory.getProxy(); +``` + +Kotlin + +``` +// create a factory that can generate a proxy for the given target object +val factory = AspectJProxyFactory(targetObject) + +// add an aspect, the class must be an @AspectJ aspect +// you can call this as many times as you need with different aspects +factory.addAspect(SecurityManager::class.java) + +// you can also add existing aspect instances, the type of the object supplied must be an @AspectJ aspect +factory.addAspect(usageTracker) + +// now get the proxy object... +val proxy = factory.getProxy() +``` + +See the [javadoc](https://docs.spring.io/spring-framework/docs/5.3.16/javadoc-api/org/springframework/aop/aspectj/annotation/AspectJProxyFactory.html) for more information. + +### 5.10. Using AspectJ with Spring Applications + +Everything we have covered so far in this chapter is pure Spring AOP. In this section, +we look at how you can use the AspectJ compiler or weaver instead of or in +addition to Spring AOP if your needs go beyond the facilities offered by Spring AOP +alone. + +Spring ships with a small AspectJ aspect library, which is available stand-alone in your +distribution as `spring-aspects.jar`. You need to add this to your classpath in order +to use the aspects in it. [Using AspectJ to Dependency Inject Domain Objects with Spring](#aop-atconfigurable) and [Other Spring aspects for AspectJ](#aop-ajlib-other) discuss the +content of this library and how you can use it. [Configuring AspectJ Aspects by Using Spring IoC](#aop-aj-configure) discusses how to +dependency inject AspectJ aspects that are woven using the AspectJ compiler. Finally,[Load-time Weaving with AspectJ in the Spring Framework](#aop-aj-ltw) provides an introduction to load-time weaving for Spring applications +that use AspectJ. + +#### 5.10.1. Using AspectJ to Dependency Inject Domain Objects with Spring + +The Spring container instantiates and configures beans defined in your application +context. It is also possible to ask a bean factory to configure a pre-existing +object, given the name of a bean definition that contains the configuration to be applied.`spring-aspects.jar` contains an annotation-driven aspect that exploits this +capability to allow dependency injection of any object. The support is intended to +be used for objects created outside of the control of any container. Domain objects +often fall into this category because they are often created programmatically with the`new` operator or by an ORM tool as a result of a database query. + +The `@Configurable` annotation marks a class as being eligible for Spring-driven +configuration. In the simplest case, you can use purely it as a marker annotation, as the +following example shows: + +Java + +``` +package com.xyz.myapp.domain; + +import org.springframework.beans.factory.annotation.Configurable; + +@Configurable +public class Account { + // ... +} +``` + +Kotlin + +``` +package com.xyz.myapp.domain + +import org.springframework.beans.factory.annotation.Configurable + +@Configurable +class Account { + // ... +} +``` + +When used as a marker interface in this way, Spring configures new instances of the +annotated type (`Account`, in this case) by using a bean definition (typically +prototype-scoped) with the same name as the fully-qualified type name +(`com.xyz.myapp.domain.Account`). Since the default name for a bean is the +fully-qualified name of its type, a convenient way to declare the prototype definition +is to omit the `id` attribute, as the following example shows: + +``` + + + +``` + +If you want to explicitly specify the name of the prototype bean definition to use, you +can do so directly in the annotation, as the following example shows: + +Java + +``` +package com.xyz.myapp.domain; + +import org.springframework.beans.factory.annotation.Configurable; + +@Configurable("account") +public class Account { + // ... +} +``` + +Kotlin + +``` +package com.xyz.myapp.domain + +import org.springframework.beans.factory.annotation.Configurable + +@Configurable("account") +class Account { + // ... +} +``` + +Spring now looks for a bean definition named `account` and uses that as the +definition to configure new `Account` instances. + +You can also use autowiring to avoid having to specify a dedicated bean definition at +all. To have Spring apply autowiring, use the `autowire` property of the `@Configurable`annotation. You can specify either `@Configurable(autowire=Autowire.BY_TYPE)` or`@Configurable(autowire=Autowire.BY_NAME)` for autowiring by type or by name, +respectively. As an alternative, it is preferable to specify explicit, annotation-driven +dependency injection for your `@Configurable` beans through `@Autowired` or `@Inject`at the field or method level (see [Annotation-based Container Configuration](#beans-annotation-config) for further details). + +Finally, you can enable Spring dependency checking for the object references in the newly +created and configured object by using the `dependencyCheck` attribute (for example,`@Configurable(autowire=Autowire.BY_NAME,dependencyCheck=true)`). If this attribute is +set to `true`, Spring validates after configuration that all properties (which +are not primitives or collections) have been set. + +Note that using the annotation on its own does nothing. It is the`AnnotationBeanConfigurerAspect` in `spring-aspects.jar` that acts on the presence of +the annotation. In essence, the aspect says, “after returning from the initialization of +a new object of a type annotated with `@Configurable`, configure the newly created object +using Spring in accordance with the properties of the annotation”. In this context, +“initialization” refers to newly instantiated objects (for example, objects instantiated +with the `new` operator) as well as to `Serializable` objects that are undergoing +deserialization (for example, through[readResolve()](https://docs.oracle.com/javase/8/docs/api/java/io/Serializable.html)). + +| |One of the key phrases in the above paragraph is “in essence”. For most cases, the
exact semantics of “after returning from the initialization of a new object” are
fine. In this context, “after initialization” means that the dependencies are
injected after the object has been constructed. This means that the dependencies
are not available for use in the constructor bodies of the class. If you want the
dependencies to be injected before the constructor bodies run and thus be
available for use in the body of the constructors, you need to define this on the`@Configurable` declaration, as follows:

Java

```
@Configurable(preConstruction = true)
```

Kotlin

```
@Configurable(preConstruction = true)
```

You can find more information about the language semantics of the various pointcut
types in AspectJ[in this
appendix](https://www.eclipse.org/aspectj/doc/next/progguide/semantics-joinPoints.html) of the [AspectJ
Programming Guide](https://www.eclipse.org/aspectj/doc/next/progguide/index.html).| +|---|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +For this to work, the annotated types must be woven with the AspectJ weaver. You can +either use a build-time Ant or Maven task to do this (see, for example, the[AspectJ Development +Environment Guide](https://www.eclipse.org/aspectj/doc/released/devguide/antTasks.html)) or load-time weaving (see [Load-time Weaving with AspectJ in the Spring Framework](#aop-aj-ltw)). The`AnnotationBeanConfigurerAspect` itself needs to be configured by Spring (in order to obtain +a reference to the bean factory that is to be used to configure new objects). If you +use Java-based configuration, you can add `@EnableSpringConfigured` to any`@Configuration` class, as follows: + +Java + +``` +@Configuration +@EnableSpringConfigured +public class AppConfig { +} +``` + +Kotlin + +``` +@Configuration +@EnableSpringConfigured +class AppConfig { +} +``` + +If you prefer XML based configuration, the Spring[`context` namespace](#xsd-schemas-context)defines a convenient `context:spring-configured` element, which you can use as follows: + +``` + +``` + +Instances of `@Configurable` objects created before the aspect has been configured +result in a message being issued to the debug log and no configuration of the +object taking place. An example might be a bean in the Spring configuration that creates +domain objects when it is initialized by Spring. In this case, you can use the`depends-on` bean attribute to manually specify that the bean depends on the +configuration aspect. The following example shows how to use the `depends-on` attribute: + +``` + + + + + +``` + +| |Do not activate `@Configurable` processing through the bean configurer aspect unless you
really mean to rely on its semantics at runtime. In particular, make sure that you do
not use `@Configurable` on bean classes that are registered as regular Spring beans
with the container. Doing so results in double initialization, once through the
container and once through the aspect.| +|---|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +##### Unit Testing `@Configurable` Objects + +One of the goals of the `@Configurable` support is to enable independent unit testing +of domain objects without the difficulties associated with hard-coded lookups. +If `@Configurable` types have not been woven by AspectJ, the annotation has no affect +during unit testing. You can set mock or stub property references in the object under +test and proceed as normal. If `@Configurable` types have been woven by AspectJ, +you can still unit test outside of the container as normal, but you see a warning +message each time that you construct a `@Configurable` object indicating that it has +not been configured by Spring. + +##### Working with Multiple Application Contexts + +The `AnnotationBeanConfigurerAspect` that is used to implement the `@Configurable` support +is an AspectJ singleton aspect. The scope of a singleton aspect is the same as the scope +of `static` members: There is one aspect instance per classloader that defines the type. +This means that, if you define multiple application contexts within the same classloader +hierarchy, you need to consider where to define the `@EnableSpringConfigured` bean and +where to place `spring-aspects.jar` on the classpath. + +Consider a typical Spring web application configuration that has a shared parent application +context that defines common business services, everything needed to support those services, +and one child application context for each servlet (which contains definitions particular +to that servlet). All of these contexts co-exist within the same classloader hierarchy, +and so the `AnnotationBeanConfigurerAspect` can hold a reference to only one of them. +In this case, we recommend defining the `@EnableSpringConfigured` bean in the shared +(parent) application context. This defines the services that you are likely to want to +inject into domain objects. A consequence is that you cannot configure domain objects +with references to beans defined in the child (servlet-specific) contexts by using the +@Configurable mechanism (which is probably not something you want to do anyway). + +When deploying multiple web applications within the same container, ensure that each +web application loads the types in `spring-aspects.jar` by using its own classloader +(for example, by placing `spring-aspects.jar` in `WEB-INF/lib`). If `spring-aspects.jar`is added only to the container-wide classpath (and hence loaded by the shared parent +classloader), all web applications share the same aspect instance (which is probably +not what you want). + +#### 5.10.2. Other Spring aspects for AspectJ + +In addition to the `@Configurable` aspect, `spring-aspects.jar` contains an AspectJ +aspect that you can use to drive Spring’s transaction management for types and methods +annotated with the `@Transactional` annotation. This is primarily intended for users who +want to use the Spring Framework’s transaction support outside of the Spring container. + +The aspect that interprets `@Transactional` annotations is the`AnnotationTransactionAspect`. When you use this aspect, you must annotate the +implementation class (or methods within that class or both), not the interface (if +any) that the class implements. AspectJ follows Java’s rule that annotations on +interfaces are not inherited. + +A `@Transactional` annotation on a class specifies the default transaction semantics for +the execution of any public operation in the class. + +A `@Transactional` annotation on a method within the class overrides the default +transaction semantics given by the class annotation (if present). Methods of any +visibility may be annotated, including private methods. Annotating non-public methods +directly is the only way to get transaction demarcation for the execution of such methods. + +| |Since Spring Framework 4.2, `spring-aspects` provides a similar aspect that offers the
exact same features for the standard `javax.transaction.Transactional` annotation. Check`JtaAnnotationTransactionAspect` for more details.| +|---|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +For AspectJ programmers who want to use the Spring configuration and transaction +management support but do not want to (or cannot) use annotations, `spring-aspects.jar`also contains `abstract` aspects you can extend to provide your own pointcut +definitions. See the sources for the `AbstractBeanConfigurerAspect` and`AbstractTransactionAspect` aspects for more information. As an example, the following +excerpt shows how you could write an aspect to configure all instances of objects +defined in the domain model by using prototype bean definitions that match the +fully qualified class names: + +``` +public aspect DomainObjectConfiguration extends AbstractBeanConfigurerAspect { + + public DomainObjectConfiguration() { + setBeanWiringInfoResolver(new ClassNameBeanWiringInfoResolver()); + } + + // the creation of a new bean (any object in the domain model) + protected pointcut beanCreation(Object beanInstance) : + initialization(new(..)) && + CommonPointcuts.inDomainModel() && + this(beanInstance); +} +``` + +#### 5.10.3. Configuring AspectJ Aspects by Using Spring IoC + +When you use AspectJ aspects with Spring applications, it is natural to both want and +expect to be able to configure such aspects with Spring. The AspectJ runtime itself is +responsible for aspect creation, and the means of configuring the AspectJ-created +aspects through Spring depends on the AspectJ instantiation model (the `per-xxx` clause) +used by the aspect. + +The majority of AspectJ aspects are singleton aspects. Configuration of these +aspects is easy. You can create a bean definition that references the aspect type as +normal and include the `factory-method="aspectOf"` bean attribute. This ensures that +Spring obtains the aspect instance by asking AspectJ for it rather than trying to create +an instance itself. The following example shows how to use the `factory-method="aspectOf"` attribute: + +``` + (1) + + + +``` + +|**1**|Note the `factory-method="aspectOf"` attribute| +|-----|----------------------------------------------| + +Non-singleton aspects are harder to configure. However, it is possible to do so by +creating prototype bean definitions and using the `@Configurable` support from`spring-aspects.jar` to configure the aspect instances once they have bean created by +the AspectJ runtime. + +If you have some @AspectJ aspects that you want to weave with AspectJ (for example, +using load-time weaving for domain model types) and other @AspectJ aspects that you want +to use with Spring AOP, and these aspects are all configured in Spring, you +need to tell the Spring AOP @AspectJ auto-proxying support which exact subset of the +@AspectJ aspects defined in the configuration should be used for auto-proxying. You can +do this by using one or more `` elements inside the ``declaration. Each `` element specifies a name pattern, and only beans with +names matched by at least one of the patterns are used for Spring AOP auto-proxy +configuration. The following example shows how to use `` elements: + +``` + + + + +``` + +| |Do not be misled by the name of the `` element. Using it
results in the creation of Spring AOP proxies. The @AspectJ style of aspect
declaration is being used here, but the AspectJ runtime is not involved.| +|---|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +#### 5.10.4. Load-time Weaving with AspectJ in the Spring Framework + +Load-time weaving (LTW) refers to the process of weaving AspectJ aspects into an +application’s class files as they are being loaded into the Java virtual machine (JVM). +The focus of this section is on configuring and using LTW in the specific context of the +Spring Framework. This section is not a general introduction to LTW. For full details on +the specifics of LTW and configuring LTW with only AspectJ (with Spring not being +involved at all), see the[LTW section of the AspectJ +Development Environment Guide](https://www.eclipse.org/aspectj/doc/released/devguide/ltw.html). + +The value that the Spring Framework brings to AspectJ LTW is in enabling much +finer-grained control over the weaving process. 'Vanilla' AspectJ LTW is effected by using +a Java (5+) agent, which is switched on by specifying a VM argument when starting up a +JVM. It is, thus, a JVM-wide setting, which may be fine in some situations but is often a +little too coarse. Spring-enabled LTW lets you switch on LTW on a +per-`ClassLoader` basis, which is more fine-grained and which can make more +sense in a 'single-JVM-multiple-application' environment (such as is found in a typical +application server environment). + +Further, [in certain environments](#aop-aj-ltw-environments), this support enables +load-time weaving without making any modifications to the application server’s launch +script that is needed to add `-javaagent:path/to/aspectjweaver.jar` or (as we describe +later in this section) `-javaagent:path/to/spring-instrument.jar`. Developers configure +the application context to enable load-time weaving instead of relying on administrators +who typically are in charge of the deployment configuration, such as the launch script. + +Now that the sales pitch is over, let us first walk through a quick example of AspectJ +LTW that uses Spring, followed by detailed specifics about elements introduced in the +example. For a complete example, see the[Petclinic sample application](https://github.com/spring-projects/spring-petclinic). + +##### A First Example + +Assume that you are an application developer who has been tasked with diagnosing +the cause of some performance problems in a system. Rather than break out a +profiling tool, we are going to switch on a simple profiling aspect that lets us +quickly get some performance metrics. We can then apply a finer-grained profiling +tool to that specific area immediately afterwards. + +| |The example presented here uses XML configuration. You can also configure and
use @AspectJ with [Java configuration](#beans-java). Specifically, you can use the`@EnableLoadTimeWeaving` annotation as an alternative to ``(see [below](#aop-aj-ltw-spring) for details).| +|---|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +The following example shows the profiling aspect, which is not fancy. +It is a time-based profiler that uses the @AspectJ-style of aspect declaration: + +Java + +``` +package foo; + +import org.aspectj.lang.ProceedingJoinPoint; +import org.aspectj.lang.annotation.Aspect; +import org.aspectj.lang.annotation.Around; +import org.aspectj.lang.annotation.Pointcut; +import org.springframework.util.StopWatch; +import org.springframework.core.annotation.Order; + +@Aspect +public class ProfilingAspect { + + @Around("methodsToBeProfiled()") + public Object profile(ProceedingJoinPoint pjp) throws Throwable { + StopWatch sw = new StopWatch(getClass().getSimpleName()); + try { + sw.start(pjp.getSignature().getName()); + return pjp.proceed(); + } finally { + sw.stop(); + System.out.println(sw.prettyPrint()); + } + } + + @Pointcut("execution(public * foo..*.*(..))") + public void methodsToBeProfiled(){} +} +``` + +Kotlin + +``` +package foo + +import org.aspectj.lang.ProceedingJoinPoint +import org.aspectj.lang.annotation.Aspect +import org.aspectj.lang.annotation.Around +import org.aspectj.lang.annotation.Pointcut +import org.springframework.util.StopWatch +import org.springframework.core.annotation.Order + +@Aspect +class ProfilingAspect { + + @Around("methodsToBeProfiled()") + fun profile(pjp: ProceedingJoinPoint): Any { + val sw = StopWatch(javaClass.simpleName) + try { + sw.start(pjp.getSignature().getName()) + return pjp.proceed() + } finally { + sw.stop() + println(sw.prettyPrint()) + } + } + + @Pointcut("execution(public * foo..*.*(..))") + fun methodsToBeProfiled() { + } +} +``` + +We also need to create an `META-INF/aop.xml` file, to inform the AspectJ weaver that +we want to weave our `ProfilingAspect` into our classes. This file convention, namely +the presence of a file (or files) on the Java classpath called `META-INF/aop.xml` is +standard AspectJ. The following example shows the `aop.xml` file: + +``` + + + + + + + + + + + + + + +``` + +Now we can move on to the Spring-specific portion of the configuration. We need +to configure a `LoadTimeWeaver` (explained later). This load-time weaver is the +essential component responsible for weaving the aspect configuration in one or +more `META-INF/aop.xml` files into the classes in your application. The good +thing is that it does not require a lot of configuration (there are some more +options that you can specify, but these are detailed later), as can be seen in +the following example: + +``` + + + + + + + + + +``` + +Now that all the required artifacts (the aspect, the `META-INF/aop.xml`file, and the Spring configuration) are in place, we can create the following +driver class with a `main(..)` method to demonstrate the LTW in action: + +Java + +``` +package foo; + +import org.springframework.context.support.ClassPathXmlApplicationContext; + +public final class Main { + + public static void main(String[] args) { + ApplicationContext ctx = new ClassPathXmlApplicationContext("beans.xml", Main.class); + + EntitlementCalculationService entitlementCalculationService = + (EntitlementCalculationService) ctx.getBean("entitlementCalculationService"); + + // the profiling aspect is 'woven' around this method execution + entitlementCalculationService.calculateEntitlement(); + } +} +``` + +Kotlin + +``` +package foo + +import org.springframework.context.support.ClassPathXmlApplicationContext + +fun main() { + val ctx = ClassPathXmlApplicationContext("beans.xml") + + val entitlementCalculationService = ctx.getBean("entitlementCalculationService") as EntitlementCalculationService + + // the profiling aspect is 'woven' around this method execution + entitlementCalculationService.calculateEntitlement() +} +``` + +We have one last thing to do. The introduction to this section did say that one could +switch on LTW selectively on a per-`ClassLoader` basis with Spring, and this is true. +However, for this example, we use a Java agent (supplied with Spring) to switch on LTW. +We use the following command to run the `Main` class shown earlier: + +``` +java -javaagent:C:/projects/foo/lib/global/spring-instrument.jar foo.Main +``` + +The `-javaagent` is a flag for specifying and enabling[agents +to instrument programs that run on the JVM](https://docs.oracle.com/javase/8/docs/api/java/lang/instrument/package-summary.html). The Spring Framework ships with such an +agent, the `InstrumentationSavingAgent`, which is packaged in the`spring-instrument.jar` that was supplied as the value of the `-javaagent` argument in +the preceding example. + +The output from the execution of the `Main` program looks something like the next example. +(I have introduced a `Thread.sleep(..)` statement into the `calculateEntitlement()`implementation so that the profiler actually captures something other than 0 +milliseconds (the `01234` milliseconds is not an overhead introduced by the AOP). +The following listing shows the output we got when we ran our profiler: + +``` +Calculating entitlement + +StopWatch 'ProfilingAspect': running time (millis) = 1234 +------ ----- ---------------------------- +ms % Task name +------ ----- ---------------------------- +01234 100% calculateEntitlement +``` + +Since this LTW is effected by using full-blown AspectJ, we are not limited only to advising +Spring beans. The following slight variation on the `Main` program yields the same +result: + +Java + +``` +package foo; + +import org.springframework.context.support.ClassPathXmlApplicationContext; + +public final class Main { + + public static void main(String[] args) { + new ClassPathXmlApplicationContext("beans.xml", Main.class); + + EntitlementCalculationService entitlementCalculationService = + new StubEntitlementCalculationService(); + + // the profiling aspect will be 'woven' around this method execution + entitlementCalculationService.calculateEntitlement(); + } +} +``` + +Kotlin + +``` +package foo + +import org.springframework.context.support.ClassPathXmlApplicationContext + +fun main(args: Array) { + ClassPathXmlApplicationContext("beans.xml") + + val entitlementCalculationService = StubEntitlementCalculationService() + + // the profiling aspect will be 'woven' around this method execution + entitlementCalculationService.calculateEntitlement() +} +``` + +Notice how, in the preceding program, we bootstrap the Spring container and +then create a new instance of the `StubEntitlementCalculationService` totally outside +the context of Spring. The profiling advice still gets woven in. + +Admittedly, the example is simplistic. However, the basics of the LTW support in Spring +have all been introduced in the earlier example, and the rest of this section explains +the “why” behind each bit of configuration and usage in detail. + +| |The `ProfilingAspect` used in this example may be basic, but it is quite useful. It is a
nice example of a development-time aspect that developers can use during development
and then easily exclude from builds of the application being deployed
into UAT or production.| +|---|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +##### Aspects + +The aspects that you use in LTW have to be AspectJ aspects. You can write them in +either the AspectJ language itself, or you can write your aspects in the @AspectJ-style. +Your aspects are then both valid AspectJ and Spring AOP aspects. +Furthermore, the compiled aspect classes need to be available on the classpath. + +##### 'META-INF/aop.xml' + +The AspectJ LTW infrastructure is configured by using one or more `META-INF/aop.xml`files that are on the Java classpath (either directly or, more typically, in jar files). + +The structure and contents of this file is detailed in the LTW part of the[AspectJ reference +documentation](https://www.eclipse.org/aspectj/doc/released/devguide/ltw-configuration.html). Because the `aop.xml` file is 100% AspectJ, we do not describe it further here. + +##### Required libraries (JARS) + +At minimum, you need the following libraries to use the Spring Framework’s support +for AspectJ LTW: + +* `spring-aop.jar` + +* `aspectjweaver.jar` + +If you use the [Spring-provided agent to enable +instrumentation](#aop-aj-ltw-environments-generic), you also need: + +* `spring-instrument.jar` + +##### Spring Configuration + +The key component in Spring’s LTW support is the `LoadTimeWeaver` interface (in the`org.springframework.instrument.classloading` package), and the numerous implementations +of it that ship with the Spring distribution. A `LoadTimeWeaver` is responsible for +adding one or more `java.lang.instrument.ClassFileTransformers` to a `ClassLoader` at +runtime, which opens the door to all manner of interesting applications, one of which +happens to be the LTW of aspects. + +| |If you are unfamiliar with the idea of runtime class file transformation, see the
javadoc API documentation for the `java.lang.instrument` package before continuing.
While that documentation is not comprehensive, at least you can see the key interfaces
and classes (for reference as you read through this section).| +|---|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +Configuring a `LoadTimeWeaver` for a particular `ApplicationContext` can be as easy as +adding one line. (Note that you almost certainly need to use an`ApplicationContext` as your Spring container — typically, a `BeanFactory` is not +enough because the LTW support uses `BeanFactoryPostProcessors`.) + +To enable the Spring Framework’s LTW support, you need to configure a `LoadTimeWeaver`, +which typically is done by using the `@EnableLoadTimeWeaving` annotation, as follows: + +Java + +``` +@Configuration +@EnableLoadTimeWeaving +public class AppConfig { +} +``` + +Kotlin + +``` +@Configuration +@EnableLoadTimeWeaving +class AppConfig { +} +``` + +Alternatively, if you prefer XML-based configuration, use the`` element. Note that the element is defined in the`context` namespace. The following example shows how to use ``: + +``` + + + + + + +``` + +The preceding configuration automatically defines and registers a number of LTW-specific +infrastructure beans, such as a `LoadTimeWeaver` and an `AspectJWeavingEnabler`, for you. +The default `LoadTimeWeaver` is the `DefaultContextLoadTimeWeaver` class, which attempts +to decorate an automatically detected `LoadTimeWeaver`. The exact type of `LoadTimeWeaver`that is “automatically detected” is dependent upon your runtime environment. +The following table summarizes various `LoadTimeWeaver` implementations: + +| Runtime Environment |`LoadTimeWeaver` implementation| +|-----------------------------------------------------------------------------------------------------------------------------------------------------------|-------------------------------| +| Running in [Apache Tomcat](https://tomcat.apache.org/) | `TomcatLoadTimeWeaver` | +| Running in [GlassFish](https://eclipse-ee4j.github.io/glassfish/) (limited to EAR deployments) | `GlassFishLoadTimeWeaver` | +| Running in Red Hat’s [JBoss AS](https://www.jboss.org/jbossas/) or [WildFly](https://www.wildfly.org/) | `JBossLoadTimeWeaver` | +| Running in IBM’s [WebSphere](https://www-01.ibm.com/software/webservers/appserv/was/) | `WebSphereLoadTimeWeaver` | +| Running in Oracle’s[WebLogic](https://www.oracle.com/technetwork/middleware/weblogic/overview/index-085209.html) | `WebLogicLoadTimeWeaver` | +| JVM started with Spring `InstrumentationSavingAgent`(`java -javaagent:path/to/spring-instrument.jar`) |`InstrumentationLoadTimeWeaver`| +|Fallback, expecting the underlying ClassLoader to follow common conventions
(namely `addTransformer` and optionally a `getThrowawayClassLoader` method)| `ReflectiveLoadTimeWeaver` | + +Note that the table lists only the `LoadTimeWeavers` that are autodetected when you +use the `DefaultContextLoadTimeWeaver`. You can specify exactly which `LoadTimeWeaver`implementation to use. + +To specify a specific `LoadTimeWeaver` with Java configuration, implement the`LoadTimeWeavingConfigurer` interface and override the `getLoadTimeWeaver()` method. +The following example specifies a `ReflectiveLoadTimeWeaver`: + +Java + +``` +@Configuration +@EnableLoadTimeWeaving +public class AppConfig implements LoadTimeWeavingConfigurer { + + @Override + public LoadTimeWeaver getLoadTimeWeaver() { + return new ReflectiveLoadTimeWeaver(); + } +} +``` + +Kotlin + +``` +@Configuration +@EnableLoadTimeWeaving +class AppConfig : LoadTimeWeavingConfigurer { + + override fun getLoadTimeWeaver(): LoadTimeWeaver { + return ReflectiveLoadTimeWeaver() + } +} +``` + +If you use XML-based configuration, you can specify the fully qualified classname +as the value of the `weaver-class` attribute on the ``element. Again, the following example specifies a `ReflectiveLoadTimeWeaver`: + +``` + + + + + + +``` + +The `LoadTimeWeaver` that is defined and registered by the configuration can be later +retrieved from the Spring container by using the well known name, `loadTimeWeaver`. +Remember that the `LoadTimeWeaver` exists only as a mechanism for Spring’s LTW +infrastructure to add one or more `ClassFileTransformers`. The actual`ClassFileTransformer` that does the LTW is the `ClassPreProcessorAgentAdapter` (from +the `org.aspectj.weaver.loadtime` package) class. See the class-level javadoc of the`ClassPreProcessorAgentAdapter` class for further details, because the specifics of how +the weaving is actually effected is beyond the scope of this document. + +There is one final attribute of the configuration left to discuss: the `aspectjWeaving`attribute (or `aspectj-weaving` if you use XML). This attribute controls whether LTW +is enabled or not. It accepts one of three possible values, with the default value being`autodetect` if the attribute is not present. The following table summarizes the three +possible values: + +|Annotation Value| XML Value | Explanation | +|----------------|------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------| +| `ENABLED` | `on` | AspectJ weaving is on, and aspects are woven at load-time as appropriate. | +| `DISABLED` | `off` | LTW is off. No aspect is woven at load-time. | +| `AUTODETECT` |`autodetect`|If the Spring LTW infrastructure can find at least one `META-INF/aop.xml` file,
then AspectJ weaving is on. Otherwise, it is off. This is the default value.| + +##### Environment-specific Configuration + +This last section contains any additional settings and configuration that you need +when you use Spring’s LTW support in environments such as application servers and web +containers. + +###### Tomcat, JBoss, WebSphere, WebLogic + +Tomcat, JBoss/WildFly, IBM WebSphere Application Server and Oracle WebLogic Server all +provide a general app `ClassLoader` that is capable of local instrumentation. Spring’s +native LTW may leverage those ClassLoader implementations to provide AspectJ weaving. +You can simply enable load-time weaving, as [described earlier](#aop-using-aspectj). +Specifically, you do not need to modify the JVM launch script to add`-javaagent:path/to/spring-instrument.jar`. + +Note that on JBoss, you may need to disable the app server scanning to prevent it from +loading the classes before the application actually starts. A quick workaround is to add +to your artifact a file named `WEB-INF/jboss-scanning.xml` with the following content: + +``` + +``` + +###### Generic Java Applications + +When class instrumentation is required in environments that are not supported by +specific `LoadTimeWeaver` implementations, a JVM agent is the general solution. +For such cases, Spring provides `InstrumentationLoadTimeWeaver` which requires a +Spring-specific (but very general) JVM agent, `spring-instrument.jar`, autodetected +by common `@EnableLoadTimeWeaving` and `` setups. + +To use it, you must start the virtual machine with the Spring agent by supplying +the following JVM options: + +``` +-javaagent:/path/to/spring-instrument.jar +``` + +Note that this requires modification of the JVM launch script, which may prevent you +from using this in application server environments (depending on your server and your +operation policies). That said, for one-app-per-JVM deployments such as standalone +Spring Boot applications, you typically control the entire JVM setup in any case. + +### 5.11. Further Resources + +More information on AspectJ can be found on the [AspectJ website](https://www.eclipse.org/aspectj). + +*Eclipse AspectJ* by Adrian Colyer et. al. (Addison-Wesley, 2005) provides a +comprehensive introduction and reference for the AspectJ language. + +*AspectJ in Action*, Second Edition by Ramnivas Laddad (Manning, 2009) comes highly +recommended. The focus of the book is on AspectJ, but a lot of general AOP themes are +explored (in some depth). + +## 6. Spring AOP APIs + +The previous chapter described the Spring’s support for AOP with @AspectJ and schema-based +aspect definitions. In this chapter, we discuss the lower-level Spring AOP APIs. For common +applications, we recommend the use of Spring AOP with AspectJ pointcuts as described in the +previous chapter. + +### 6.1. Pointcut API in Spring + +This section describes how Spring handles the crucial pointcut concept. + +#### 6.1.1. Concepts + +Spring’s pointcut model enables pointcut reuse independent of advice types. You can +target different advice with the same pointcut. + +The `org.springframework.aop.Pointcut` interface is the central interface, used to +target advices to particular classes and methods. The complete interface follows: + +``` +public interface Pointcut { + + ClassFilter getClassFilter(); + + MethodMatcher getMethodMatcher(); +} +``` + +Splitting the `Pointcut` interface into two parts allows reuse of class and method +matching parts and fine-grained composition operations (such as performing a “union” +with another method matcher). + +The `ClassFilter` interface is used to restrict the pointcut to a given set of target +classes. If the `matches()` method always returns true, all target classes are +matched. The following listing shows the `ClassFilter` interface definition: + +``` +public interface ClassFilter { + + boolean matches(Class clazz); +} +``` + +The `MethodMatcher` interface is normally more important. The complete interface follows: + +``` +public interface MethodMatcher { + + boolean matches(Method m, Class targetClass); + + boolean isRuntime(); + + boolean matches(Method m, Class targetClass, Object... args); +} +``` + +The `matches(Method, Class)` method is used to test whether this pointcut ever +matches a given method on a target class. This evaluation can be performed when an AOP +proxy is created to avoid the need for a test on every method invocation. If the +two-argument `matches` method returns `true` for a given method, and the `isRuntime()`method for the MethodMatcher returns `true`, the three-argument matches method is +invoked on every method invocation. This lets a pointcut look at the arguments passed +to the method invocation immediately before the target advice starts. + +Most `MethodMatcher` implementations are static, meaning that their `isRuntime()` method +returns `false`. In this case, the three-argument `matches` method is never invoked. + +| |If possible, try to make pointcuts static, allowing the AOP framework to cache the
results of pointcut evaluation when an AOP proxy is created.| +|---|---------------------------------------------------------------------------------------------------------------------------------------------------| + +#### 6.1.2. Operations on Pointcuts + +Spring supports operations (notably, union and intersection) on pointcuts. + +Union means the methods that either pointcut matches. +Intersection means the methods that both pointcuts match. +Union is usually more useful. +You can compose pointcuts by using the static methods in the`org.springframework.aop.support.Pointcuts` class or by using the`ComposablePointcut` class in the same package. However, using AspectJ pointcut +expressions is usually a simpler approach. + +#### 6.1.3. AspectJ Expression Pointcuts + +Since 2.0, the most important type of pointcut used by Spring is`org.springframework.aop.aspectj.AspectJExpressionPointcut`. This is a pointcut that +uses an AspectJ-supplied library to parse an AspectJ pointcut expression string. + +See the [previous chapter](#aop) for a discussion of supported AspectJ pointcut primitives. + +#### 6.1.4. Convenience Pointcut Implementations + +Spring provides several convenient pointcut implementations. You can use some of them +directly; others are intended to be subclassed in application-specific pointcuts. + +##### Static Pointcuts + +Static pointcuts are based on the method and the target class and cannot take into account +the method’s arguments. Static pointcuts suffice — and are best — for most usages. +Spring can evaluate a static pointcut only once, when a method is first invoked. +After that, there is no need to evaluate the pointcut again with each method invocation. + +The rest of this section describes some of the static pointcut implementations that are +included with Spring. + +###### Regular Expression Pointcuts + +One obvious way to specify static pointcuts is regular expressions. Several AOP +frameworks besides Spring make this possible.`org.springframework.aop.support.JdkRegexpMethodPointcut` is a generic regular +expression pointcut that uses the regular expression support in the JDK. + +With the `JdkRegexpMethodPointcut` class, you can provide a list of pattern strings. +If any of these is a match, the pointcut evaluates to `true`. (As a consequence, +the resulting pointcut is effectively the union of the specified patterns.) + +The following example shows how to use `JdkRegexpMethodPointcut`: + +``` + + + + .*set.* + .*absquatulate + + + +``` + +Spring provides a convenience class named `RegexpMethodPointcutAdvisor`, which lets us +also reference an `Advice` (remember that an `Advice` can be an interceptor, before advice, +throws advice, and others). Behind the scenes, Spring uses a `JdkRegexpMethodPointcut`. +Using `RegexpMethodPointcutAdvisor` simplifies wiring, as the one bean encapsulates both +pointcut and advice, as the following example shows: + +``` + + + + + + + .*set.* + .*absquatulate + + + +``` + +You can use `RegexpMethodPointcutAdvisor` with any `Advice` type. + +###### Attribute-driven Pointcuts + +An important type of static pointcut is a metadata-driven pointcut. This uses the +values of metadata attributes (typically, source-level metadata). + +##### Dynamic pointcuts + +Dynamic pointcuts are costlier to evaluate than static pointcuts. They take into account +method arguments as well as static information. This means that they must be +evaluated with every method invocation and that the result cannot be cached, as arguments will +vary. + +The main example is the `control flow` pointcut. + +###### Control Flow Pointcuts + +Spring control flow pointcuts are conceptually similar to AspectJ `cflow` pointcuts, +although less powerful. (There is currently no way to specify that a pointcut runs +below a join point matched by another pointcut.) A control flow pointcut matches the +current call stack. For example, it might fire if the join point was invoked by a method +in the `com.mycompany.web` package or by the `SomeCaller` class. Control flow pointcuts +are specified by using the `org.springframework.aop.support.ControlFlowPointcut` class. + +| |Control flow pointcuts are significantly more expensive to evaluate at runtime than even
other dynamic pointcuts. In Java 1.4, the cost is about five times that of other dynamic
pointcuts.| +|---|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +#### 6.1.5. Pointcut Superclasses + +Spring provides useful pointcut superclasses to help you to implement your own pointcuts. + +Because static pointcuts are most useful, you should probably subclass`StaticMethodMatcherPointcut`. This requires implementing only one +abstract method (although you can override other methods to customize behavior). The +following example shows how to subclass `StaticMethodMatcherPointcut`: + +Java + +``` +class TestStaticPointcut extends StaticMethodMatcherPointcut { + + public boolean matches(Method m, Class targetClass) { + // return true if custom criteria match + } +} +``` + +Kotlin + +``` +class TestStaticPointcut : StaticMethodMatcherPointcut() { + + override fun matches(method: Method, targetClass: Class<*>): Boolean { + // return true if custom criteria match + } +} +``` + +There are also superclasses for dynamic pointcuts. +You can use custom pointcuts with any advice type. + +#### 6.1.6. Custom Pointcuts + +Because pointcuts in Spring AOP are Java classes rather than language features (as in +AspectJ), you can declare custom pointcuts, whether static or dynamic. Custom +pointcuts in Spring can be arbitrarily complex. However, we recommend using the AspectJ pointcut +expression language, if you can. + +| |Later versions of Spring may offer support for “semantic pointcuts” as offered by JAC — for example, “all methods that change instance variables in the target object.”| +|---|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +### 6.2. Advice API in Spring + +Now we can examine how Spring AOP handles advice. + +#### 6.2.1. Advice Lifecycles + +Each advice is a Spring bean. An advice instance can be shared across all advised +objects or be unique to each advised object. This corresponds to per-class or +per-instance advice. + +Per-class advice is used most often. It is appropriate for generic advice, such as +transaction advisors. These do not depend on the state of the proxied object or add new +state. They merely act on the method and arguments. + +Per-instance advice is appropriate for introductions, to support mixins. In this case, +the advice adds state to the proxied object. + +You can use a mix of shared and per-instance advice in the same AOP proxy. + +#### 6.2.2. Advice Types in Spring + +Spring provides several advice types and is extensible to support +arbitrary advice types. This section describes the basic concepts and standard advice types. + +##### Interception Around Advice + +The most fundamental advice type in Spring is interception around advice. + +Spring is compliant with the AOP `Alliance` interface for around advice that uses method +interception. Classes that implement `MethodInterceptor` and that implement around advice should also implement the +following interface: + +``` +public interface MethodInterceptor extends Interceptor { + + Object invoke(MethodInvocation invocation) throws Throwable; +} +``` + +The `MethodInvocation` argument to the `invoke()` method exposes the method being +invoked, the target join point, the AOP proxy, and the arguments to the method. The`invoke()` method should return the invocation’s result: the return value of the join +point. + +The following example shows a simple `MethodInterceptor` implementation: + +Java + +``` +public class DebugInterceptor implements MethodInterceptor { + + public Object invoke(MethodInvocation invocation) throws Throwable { + System.out.println("Before: invocation=[" + invocation + "]"); + Object rval = invocation.proceed(); + System.out.println("Invocation returned"); + return rval; + } +} +``` + +Kotlin + +``` +class DebugInterceptor : MethodInterceptor { + + override fun invoke(invocation: MethodInvocation): Any { + println("Before: invocation=[$invocation]") + val rval = invocation.proceed() + println("Invocation returned") + return rval + } +} +``` + +Note the call to the `proceed()` method of `MethodInvocation`. This proceeds down the +interceptor chain towards the join point. Most interceptors invoke this method and +return its return value. However, a `MethodInterceptor`, like any around advice, can +return a different value or throw an exception rather than invoke the proceed method. +However, you do not want to do this without good reason. + +| |`MethodInterceptor` implementations offer interoperability with other AOP Alliance-compliant AOP
implementations. The other advice types discussed in the remainder of this section
implement common AOP concepts but in a Spring-specific way. While there is an advantage
in using the most specific advice type, stick with `MethodInterceptor` around advice if
you are likely to want to run the aspect in another AOP framework. Note that pointcuts
are not currently interoperable between frameworks, and the AOP Alliance does not
currently define pointcut interfaces.| +|---|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +##### Before Advice + +A simpler advice type is a before advice. This does not need a `MethodInvocation`object, since it is called only before entering the method. + +The main advantage of a before advice is that there is no need to invoke the `proceed()`method and, therefore, no possibility of inadvertently failing to proceed down the +interceptor chain. + +The following listing shows the `MethodBeforeAdvice` interface: + +``` +public interface MethodBeforeAdvice extends BeforeAdvice { + + void before(Method m, Object[] args, Object target) throws Throwable; +} +``` + +(Spring’s API design would allow for +field before advice, although the usual objects apply to field interception and it is +unlikely for Spring to ever implement it.) + +Note that the return type is `void`. Before advice can insert custom behavior before the join +point runs but cannot change the return value. If a before advice throws an +exception, it stops further execution of the interceptor chain. The exception +propagates back up the interceptor chain. If it is unchecked or on the signature of +the invoked method, it is passed directly to the client. Otherwise, it is +wrapped in an unchecked exception by the AOP proxy. + +The following example shows a before advice in Spring, which counts all method invocations: + +Java + +``` +public class CountingBeforeAdvice implements MethodBeforeAdvice { + + private int count; + + public void before(Method m, Object[] args, Object target) throws Throwable { + ++count; + } + + public int getCount() { + return count; + } +} +``` + +Kotlin + +``` +class CountingBeforeAdvice : MethodBeforeAdvice { + + var count: Int = 0 + + override fun before(m: Method, args: Array, target: Any?) { + ++count + } +} +``` + +| |Before advice can be used with any pointcut.| +|---|--------------------------------------------| + +##### Throws Advice + +Throws advice is invoked after the return of the join point if the join point threw +an exception. Spring offers typed throws advice. Note that this means that the`org.springframework.aop.ThrowsAdvice` interface does not contain any methods. It is a +tag interface identifying that the given object implements one or more typed throws +advice methods. These should be in the following form: + +``` +afterThrowing([Method, args, target], subclassOfThrowable) +``` + +Only the last argument is required. The method signatures may have either one or four +arguments, depending on whether the advice method is interested in the method and +arguments. The next two listing show classes that are examples of throws advice. + +The following advice is invoked if a `RemoteException` is thrown (including from subclasses): + +Java + +``` +public class RemoteThrowsAdvice implements ThrowsAdvice { + + public void afterThrowing(RemoteException ex) throws Throwable { + // Do something with remote exception + } +} +``` + +Kotlin + +``` +class RemoteThrowsAdvice : ThrowsAdvice { + + fun afterThrowing(ex: RemoteException) { + // Do something with remote exception + } +} +``` + +Unlike the preceding +advice, the next example declares four arguments, so that it has access to the invoked method, method +arguments, and target object. The following advice is invoked if a `ServletException` is thrown: + +Java + +``` +public class ServletThrowsAdviceWithArguments implements ThrowsAdvice { + + public void afterThrowing(Method m, Object[] args, Object target, ServletException ex) { + // Do something with all arguments + } +} +``` + +Kotlin + +``` +class ServletThrowsAdviceWithArguments : ThrowsAdvice { + + fun afterThrowing(m: Method, args: Array, target: Any, ex: ServletException) { + // Do something with all arguments + } +} +``` + +The final example illustrates how these two methods could be used in a single class +that handles both `RemoteException` and `ServletException`. Any number of throws advice +methods can be combined in a single class. The following listing shows the final example: + +Java + +``` +public static class CombinedThrowsAdvice implements ThrowsAdvice { + + public void afterThrowing(RemoteException ex) throws Throwable { + // Do something with remote exception + } + + public void afterThrowing(Method m, Object[] args, Object target, ServletException ex) { + // Do something with all arguments + } +} +``` + +Kotlin + +``` +class CombinedThrowsAdvice : ThrowsAdvice { + + fun afterThrowing(ex: RemoteException) { + // Do something with remote exception + } + + fun afterThrowing(m: Method, args: Array, target: Any, ex: ServletException) { + // Do something with all arguments + } +} +``` + +| |If a throws-advice method throws an exception itself, it overrides the
original exception (that is, it changes the exception thrown to the user). The overriding
exception is typically a RuntimeException, which is compatible with any method
signature. However, if a throws-advice method throws a checked exception, it must
match the declared exceptions of the target method and is, hence, to some degree
coupled to specific target method signatures. *Do not throw an undeclared checked
exception that is incompatible with the target method’s signature!*| +|---|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +| |Throws advice can be used with any pointcut.| +|---|--------------------------------------------| + +##### After Returning Advice + +An after returning advice in Spring must implement the`org.springframework.aop.AfterReturningAdvice` interface, which the following listing shows: + +``` +public interface AfterReturningAdvice extends Advice { + + void afterReturning(Object returnValue, Method m, Object[] args, Object target) + throws Throwable; +} +``` + +An after returning advice has access to the return value (which it cannot modify), +the invoked method, the method’s arguments, and the target. + +The following after returning advice counts all successful method invocations that have +not thrown exceptions: + +Java + +``` +public class CountingAfterReturningAdvice implements AfterReturningAdvice { + + private int count; + + public void afterReturning(Object returnValue, Method m, Object[] args, Object target) + throws Throwable { + ++count; + } + + public int getCount() { + return count; + } +} +``` + +Kotlin + +``` +class CountingAfterReturningAdvice : AfterReturningAdvice { + + var count: Int = 0 + private set + + override fun afterReturning(returnValue: Any?, m: Method, args: Array, target: Any?) { + ++count + } +} +``` + +This advice does not change the execution path. If it throws an exception, it is +thrown up the interceptor chain instead of the return value. + +| |After returning advice can be used with any pointcut.| +|---|-----------------------------------------------------| + +##### Introduction Advice + +Spring treats introduction advice as a special kind of interception advice. + +Introduction requires an `IntroductionAdvisor` and an `IntroductionInterceptor` that +implement the following interface: + +``` +public interface IntroductionInterceptor extends MethodInterceptor { + + boolean implementsInterface(Class intf); +} +``` + +The `invoke()` method inherited from the AOP Alliance `MethodInterceptor` interface must +implement the introduction. That is, if the invoked method is on an introduced +interface, the introduction interceptor is responsible for handling the method call — it +cannot invoke `proceed()`. + +Introduction advice cannot be used with any pointcut, as it applies only at the class, +rather than the method, level. You can only use introduction advice with the`IntroductionAdvisor`, which has the following methods: + +``` +public interface IntroductionAdvisor extends Advisor, IntroductionInfo { + + ClassFilter getClassFilter(); + + void validateInterfaces() throws IllegalArgumentException; +} + +public interface IntroductionInfo { + + Class[] getInterfaces(); +} +``` + +There is no `MethodMatcher` and, hence, no `Pointcut` associated with introduction +advice. Only class filtering is logical. + +The `getInterfaces()` method returns the interfaces introduced by this advisor. + +The `validateInterfaces()` method is used internally to see whether or not the +introduced interfaces can be implemented by the configured `IntroductionInterceptor`. + +Consider an example from the Spring test suite and suppose we want to +introduce the following interface to one or more objects: + +Java + +``` +public interface Lockable { + void lock(); + void unlock(); + boolean locked(); +} +``` + +Kotlin + +``` +interface Lockable { + fun lock() + fun unlock() + fun locked(): Boolean +} +``` + +This illustrates a mixin. We want to be able to cast advised objects to `Lockable`, +whatever their type and call lock and unlock methods. If we call the `lock()` method, we +want all setter methods to throw a `LockedException`. Thus, we can add an aspect that +provides the ability to make objects immutable without them having any knowledge of it: +a good example of AOP. + +First, we need an `IntroductionInterceptor` that does the heavy lifting. In this +case, we extend the `org.springframework.aop.support.DelegatingIntroductionInterceptor`convenience class. We could implement `IntroductionInterceptor` directly, but using`DelegatingIntroductionInterceptor` is best for most cases. + +The `DelegatingIntroductionInterceptor` is designed to delegate an introduction to an +actual implementation of the introduced interfaces, concealing the use of interception +to do so. You can set the delegate to any object using a constructor argument. The +default delegate (when the no-argument constructor is used) is `this`. Thus, in the next example, +the delegate is the `LockMixin` subclass of `DelegatingIntroductionInterceptor`. +Given a delegate (by default, itself), a `DelegatingIntroductionInterceptor` instance +looks for all interfaces implemented by the delegate (other than`IntroductionInterceptor`) and supports introductions against any of them. +Subclasses such as `LockMixin` can call the `suppressInterface(Class intf)`method to suppress interfaces that should not be exposed. However, no matter how many +interfaces an `IntroductionInterceptor` is prepared to support, the`IntroductionAdvisor` used controls which interfaces are actually exposed. An +introduced interface conceals any implementation of the same interface by the target. + +Thus, `LockMixin` extends `DelegatingIntroductionInterceptor` and implements `Lockable`itself. The superclass automatically picks up that `Lockable` can be supported for +introduction, so we do not need to specify that. We could introduce any number of +interfaces in this way. + +Note the use of the `locked` instance variable. This effectively adds additional state +to that held in the target object. + +The following example shows the example `LockMixin` class: + +Java + +``` +public class LockMixin extends DelegatingIntroductionInterceptor implements Lockable { + + private boolean locked; + + public void lock() { + this.locked = true; + } + + public void unlock() { + this.locked = false; + } + + public boolean locked() { + return this.locked; + } + + public Object invoke(MethodInvocation invocation) throws Throwable { + if (locked() && invocation.getMethod().getName().indexOf("set") == 0) { + throw new LockedException(); + } + return super.invoke(invocation); + } + +} +``` + +Kotlin + +``` +class LockMixin : DelegatingIntroductionInterceptor(), Lockable { + + private var locked: Boolean = false + + fun lock() { + this.locked = true + } + + fun unlock() { + this.locked = false + } + + fun locked(): Boolean { + return this.locked + } + + override fun invoke(invocation: MethodInvocation): Any? { + if (locked() && invocation.method.name.indexOf("set") == 0) { + throw LockedException() + } + return super.invoke(invocation) + } + +} +``` + +Often, you need not override the `invoke()` method. The`DelegatingIntroductionInterceptor` implementation (which calls the `delegate` method if +the method is introduced, otherwise proceeds towards the join point) usually +suffices. In the present case, we need to add a check: no setter method can be invoked +if in locked mode. + +The required introduction only needs to hold a distinct`LockMixin` instance and specify the introduced interfaces (in this case, only`Lockable`). A more complex example might take a reference to the introduction +interceptor (which would be defined as a prototype). In this case, there is no +configuration relevant for a `LockMixin`, so we create it by using `new`. +The following example shows our `LockMixinAdvisor` class: + +Java + +``` +public class LockMixinAdvisor extends DefaultIntroductionAdvisor { + + public LockMixinAdvisor() { + super(new LockMixin(), Lockable.class); + } +} +``` + +Kotlin + +``` +class LockMixinAdvisor : DefaultIntroductionAdvisor(LockMixin(), Lockable::class.java) +``` + +We can apply this advisor very simply, because it requires no configuration. (However, it +is impossible to use an `IntroductionInterceptor` without an`IntroductionAdvisor`.) As usual with introductions, the advisor must be per-instance, +as it is stateful. We need a different instance of `LockMixinAdvisor`, and hence`LockMixin`, for each advised object. The advisor comprises part of the advised object’s +state. + +We can apply this advisor programmatically by using the `Advised.addAdvisor()` method or +(the recommended way) in XML configuration, as any other advisor. All proxy creation +choices discussed below, including “auto proxy creators,” correctly handle introductions +and stateful mixins. + +### 6.3. The Advisor API in Spring + +In Spring, an Advisor is an aspect that contains only a single advice object associated +with a pointcut expression. + +Apart from the special case of introductions, any advisor can be used with any advice.`org.springframework.aop.support.DefaultPointcutAdvisor` is the most commonly used +advisor class. It can be used with a `MethodInterceptor`, `BeforeAdvice`, or`ThrowsAdvice`. + +It is possible to mix advisor and advice types in Spring in the same AOP proxy. For +example, you could use an interception around advice, throws advice, and before advice in +one proxy configuration. Spring automatically creates the necessary interceptor +chain. + +### 6.4. Using the `ProxyFactoryBean` to Create AOP Proxies + +If you use the Spring IoC container (an `ApplicationContext` or `BeanFactory`) for your +business objects (and you should be!), you want to use one of Spring’s AOP`FactoryBean` implementations. (Remember that a factory bean introduces a layer of indirection, letting +it create objects of a different type.) + +| |The Spring AOP support also uses factory beans under the covers.| +|---|----------------------------------------------------------------| + +The basic way to create an AOP proxy in Spring is to use the`org.springframework.aop.framework.ProxyFactoryBean`. This gives complete control over +the pointcuts, any advice that applies, and their ordering. However, there are simpler +options that are preferable if you do not need such control. + +#### 6.4.1. Basics + +The `ProxyFactoryBean`, like other Spring `FactoryBean` implementations, introduces a +level of indirection. If you define a `ProxyFactoryBean` named `foo`, objects that +reference `foo` do not see the `ProxyFactoryBean` instance itself but an object +created by the implementation of the `getObject()` method in the `ProxyFactoryBean` . This +method creates an AOP proxy that wraps a target object. + +One of the most important benefits of using a `ProxyFactoryBean` or another IoC-aware +class to create AOP proxies is that advices and pointcuts can also be +managed by IoC. This is a powerful feature, enabling certain approaches that are hard to +achieve with other AOP frameworks. For example, an advice may itself reference +application objects (besides the target, which should be available in any AOP +framework), benefiting from all the pluggability provided by Dependency Injection. + +#### 6.4.2. JavaBean Properties + +In common with most `FactoryBean` implementations provided with Spring, the`ProxyFactoryBean` class is itself a JavaBean. Its properties are used to: + +* Specify the target you want to proxy. + +* Specify whether to use CGLIB (described later and see also [JDK- and CGLIB-based proxies](#aop-pfb-proxy-types)). + +Some key properties are inherited from `org.springframework.aop.framework.ProxyConfig`(the superclass for all AOP proxy factories in Spring). These key properties include +the following: + +* `proxyTargetClass`: `true` if the target class is to be proxied, rather than the + target class’s interfaces. If this property value is set to `true`, then CGLIB proxies + are created (but see also [JDK- and CGLIB-based proxies](#aop-pfb-proxy-types)). + +* `optimize`: Controls whether or not aggressive optimizations are applied to proxies + created through CGLIB. You should not blithely use this setting unless you fully + understand how the relevant AOP proxy handles optimization. This is currently used + only for CGLIB proxies. It has no effect with JDK dynamic proxies. + +* `frozen`: If a proxy configuration is `frozen`, changes to the configuration are + no longer allowed. This is useful both as a slight optimization and for those cases + when you do not want callers to be able to manipulate the proxy (through the `Advised`interface) after the proxy has been created. The default value of this property is`false`, so changes (such as adding additional advice) are allowed. + +* `exposeProxy`: Determines whether or not the current proxy should be exposed in a`ThreadLocal` so that it can be accessed by the target. If a target needs to obtain + the proxy and the `exposeProxy` property is set to `true`, the target can use the`AopContext.currentProxy()` method. + +Other properties specific to `ProxyFactoryBean` include the following: + +* `proxyInterfaces`: An array of `String` interface names. If this is not supplied, a CGLIB + proxy for the target class is used (but see also [JDK- and CGLIB-based proxies](#aop-pfb-proxy-types)). + +* `interceptorNames`: A `String` array of `Advisor`, interceptor, or other advice names to + apply. Ordering is significant, on a first come-first served basis. That is to say + that the first interceptor in the list is the first to be able to intercept the + invocation. + + The names are bean names in the current factory, including bean names from ancestor + factories. You cannot mention bean references here, since doing so results in the`ProxyFactoryBean` ignoring the singleton setting of the advice. + + You can append an interceptor name with an asterisk (`*`). Doing so results in the + application of all advisor beans with names that start with the part before the asterisk + to be applied. You can find an example of using this feature in [Using “Global” Advisors](#aop-global-advisors). + +* singleton: Whether or not the factory should return a single object, no matter how + often the `getObject()` method is called. Several `FactoryBean` implementations offer + such a method. The default value is `true`. If you want to use stateful advice - for + example, for stateful mixins - use prototype advices along with a singleton value of`false`. + +#### 6.4.3. JDK- and CGLIB-based proxies + +This section serves as the definitive documentation on how the `ProxyFactoryBean`chooses to create either a JDK-based proxy or a CGLIB-based proxy for a particular target +object (which is to be proxied). + +| |The behavior of the `ProxyFactoryBean` with regard to creating JDK- or CGLIB-based
proxies changed between versions 1.2.x and 2.0 of Spring. The `ProxyFactoryBean` now
exhibits similar semantics with regard to auto-detecting interfaces as those of the`TransactionProxyFactoryBean` class.| +|---|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +If the class of a target object that is to be proxied (hereafter simply referred to as +the target class) does not implement any interfaces, a CGLIB-based proxy is +created. This is the easiest scenario, because JDK proxies are interface-based, and no +interfaces means JDK proxying is not even possible. You can plug in the target bean +and specify the list of interceptors by setting the `interceptorNames` property. Note that a +CGLIB-based proxy is created even if the `proxyTargetClass` property of the`ProxyFactoryBean` has been set to `false`. (Doing so makes no sense and is best +removed from the bean definition, because it is, at best, redundant, and, at worst +confusing.) + +If the target class implements one (or more) interfaces, the type of proxy that is +created depends on the configuration of the `ProxyFactoryBean`. + +If the `proxyTargetClass` property of the `ProxyFactoryBean` has been set to `true`, +a CGLIB-based proxy is created. This makes sense and is in keeping with the +principle of least surprise. Even if the `proxyInterfaces` property of the`ProxyFactoryBean` has been set to one or more fully qualified interface names, the fact +that the `proxyTargetClass` property is set to `true` causes CGLIB-based +proxying to be in effect. + +If the `proxyInterfaces` property of the `ProxyFactoryBean` has been set to one or more +fully qualified interface names, a JDK-based proxy is created. The created +proxy implements all of the interfaces that were specified in the `proxyInterfaces`property. If the target class happens to implement a whole lot more interfaces than +those specified in the `proxyInterfaces` property, that is all well and good, but those +additional interfaces are not implemented by the returned proxy. + +If the `proxyInterfaces` property of the `ProxyFactoryBean` has not been set, but +the target class does implement one (or more) interfaces, the`ProxyFactoryBean` auto-detects the fact that the target class does actually +implement at least one interface, and a JDK-based proxy is created. The interfaces +that are actually proxied are all of the interfaces that the target class +implements. In effect, this is the same as supplying a list of each and every +interface that the target class implements to the `proxyInterfaces` property. However, +it is significantly less work and less prone to typographical errors. + +#### 6.4.4. Proxying Interfaces + +Consider a simple example of `ProxyFactoryBean` in action. This example involves: + +* A target bean that is proxied. This is the `personTarget` bean definition in + the example. + +* An `Advisor` and an `Interceptor` used to provide advice. + +* An AOP proxy bean definition to specify the target object (the `personTarget` bean), + the interfaces to proxy, and the advices to apply. + +The following listing shows the example: + +``` + + + + + + + + + + + + + + + + + + + myAdvisor + debugInterceptor + + + +``` + +Note that the `interceptorNames` property takes a list of `String`, which holds the bean names of the +interceptors or advisors in the current factory. You can use advisors, interceptors, before, after +returning, and throws advice objects. The ordering of advisors is significant. + +| |You might be wondering why the list does not hold bean references. The reason for this is
that, if the singleton property of the `ProxyFactoryBean` is set to `false`, it must be able to
return independent proxy instances. If any of the advisors is itself a prototype, an
independent instance would need to be returned, so it is necessary to be able to obtain
an instance of the prototype from the factory. Holding a reference is not sufficient.| +|---|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +The `person` bean definition shown earlier can be used in place of a `Person` implementation, as +follows: + +Java + +``` +Person person = (Person) factory.getBean("person"); +``` + +Kotlin + +``` +val person = factory.getBean("person") as Person; +``` + +Other beans in the same IoC context can express a strongly typed dependency on it, as +with an ordinary Java object. The following example shows how to do so: + +``` + + + +``` + +The `PersonUser` class in this example exposes a property of type `Person`. As far as +it is concerned, the AOP proxy can be used transparently in place of a “real” person +implementation. However, its class would be a dynamic proxy class. It would be possible +to cast it to the `Advised` interface (discussed later). + +You can conceal the distinction between target and proxy by using an anonymous +inner bean. Only the `ProxyFactoryBean` definition is different. The +advice is included only for completeness. The following example shows how to use an +anonymous inner bean: + +``` + + + + + + + + + + + + + + + + + + myAdvisor + debugInterceptor + + + +``` + +Using an anonymous inner bean has the advantage that there is only one object of type `Person`. This is useful if we want +to prevent users of the application context from obtaining a reference to the un-advised +object or need to avoid any ambiguity with Spring IoC autowiring. There is also, +arguably, an advantage in that the `ProxyFactoryBean` definition is self-contained. +However, there are times when being able to obtain the un-advised target from the +factory might actually be an advantage (for example, in certain test scenarios). + +#### 6.4.5. Proxying Classes + +What if you need to proxy a class, rather than one or more interfaces? + +Imagine that in our earlier example, there was no `Person` interface. We needed to advise +a class called `Person` that did not implement any business interface. In this case, you +can configure Spring to use CGLIB proxying rather than dynamic proxies. To do so, set the`proxyTargetClass` property on the `ProxyFactoryBean` shown earlier to `true`. While it is best to +program to interfaces rather than classes, the ability to advise classes that do not +implement interfaces can be useful when working with legacy code. (In general, Spring +is not prescriptive. While it makes it easy to apply good practices, it avoids forcing a +particular approach.) + +If you want to, you can force the use of CGLIB in any case, even if you do have +interfaces. + +CGLIB proxying works by generating a subclass of the target class at runtime. Spring +configures this generated subclass to delegate method calls to the original target. The +subclass is used to implement the Decorator pattern, weaving in the advice. + +CGLIB proxying should generally be transparent to users. However, there are some issues +to consider: + +* `Final` methods cannot be advised, as they cannot be overridden. + +* There is no need to add CGLIB to your classpath. As of Spring 3.2, CGLIB is repackaged + and included in the spring-core JAR. In other words, CGLIB-based AOP works “out of + the box”, as do JDK dynamic proxies. + +There is little performance difference between CGLIB proxying and dynamic proxies. +Performance should not be a decisive consideration in this case. + +#### 6.4.6. Using “Global” Advisors + +By appending an asterisk to an interceptor name, all advisors with bean names that match +the part before the asterisk are added to the advisor chain. This can come in handy +if you need to add a standard set of “global” advisors. The following example defines +two global advisors: + +``` + + + + + global* + + + + + + +``` + +### 6.5. Concise Proxy Definitions + +Especially when defining transactional proxies, you may end up with many similar proxy +definitions. The use of parent and child bean definitions, along with inner bean +definitions, can result in much cleaner and more concise proxy definitions. + +First, we create a parent, template, bean definition for the proxy, as follows: + +``` + + + + + PROPAGATION_REQUIRED + + + +``` + +This is never instantiated itself, so it can actually be incomplete. Then, each proxy +that needs to be created is a child bean definition, which wraps the target of the +proxy as an inner bean definition, since the target is never used on its own anyway. +The following example shows such a child bean: + +``` + + + + + + +``` + +You can override properties from the parent template. In the following example, +we override the transaction propagation settings: + +``` + + + + + + + + PROPAGATION_REQUIRED,readOnly + PROPAGATION_REQUIRED,readOnly + PROPAGATION_REQUIRED,readOnly + PROPAGATION_REQUIRED + + + +``` + +Note that in the parent bean example, we explicitly marked the parent bean definition as +being abstract by setting the `abstract` attribute to `true`, as described[previously](#beans-child-bean-definitions), so that it may not actually ever be +instantiated. Application contexts (but not simple bean factories), by default, +pre-instantiate all singletons. Therefore, it is important (at least for singleton beans) +that, if you have a (parent) bean definition that you intend to use only as a template, +and this definition specifies a class, you must make sure to set the `abstract`attribute to `true`. Otherwise, the application context actually tries to +pre-instantiate it. + +### 6.6. Creating AOP Proxies Programmatically with the `ProxyFactory` + +It is easy to create AOP proxies programmatically with Spring. This lets you use +Spring AOP without dependency on Spring IoC. + +The interfaces implemented by the target object are +automatically proxied. The following listing shows creation of a proxy for a target object, with one +interceptor and one advisor: + +Java + +``` +ProxyFactory factory = new ProxyFactory(myBusinessInterfaceImpl); +factory.addAdvice(myMethodInterceptor); +factory.addAdvisor(myAdvisor); +MyBusinessInterface tb = (MyBusinessInterface) factory.getProxy(); +``` + +Kotlin + +``` +val factory = ProxyFactory(myBusinessInterfaceImpl) +factory.addAdvice(myMethodInterceptor) +factory.addAdvisor(myAdvisor) +val tb = factory.proxy as MyBusinessInterface +``` + +The first step is to construct an object of type`org.springframework.aop.framework.ProxyFactory`. You can create this with a target +object, as in the preceding example, or specify the interfaces to be proxied in an alternate +constructor. + +You can add advices (with interceptors as a specialized kind of advice), advisors, or both +and manipulate them for the life of the `ProxyFactory`. If you add an`IntroductionInterceptionAroundAdvisor`, you can cause the proxy to implement additional +interfaces. + +There are also convenience methods on `ProxyFactory` (inherited from `AdvisedSupport`) +that let you add other advice types, such as before and throws advice.`AdvisedSupport` is the superclass of both `ProxyFactory` and `ProxyFactoryBean`. + +| |Integrating AOP proxy creation with the IoC framework is best practice in most
applications. We recommend that you externalize configuration from Java code with AOP,
as you should in general.| +|---|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +### 6.7. Manipulating Advised Objects + +However you create AOP proxies, you can manipulate them BY using the`org.springframework.aop.framework.Advised` interface. Any AOP proxy can be cast to this +interface, no matter which other interfaces it implements. This interface includes the +following methods: + +Java + +``` +Advisor[] getAdvisors(); + +void addAdvice(Advice advice) throws AopConfigException; + +void addAdvice(int pos, Advice advice) throws AopConfigException; + +void addAdvisor(Advisor advisor) throws AopConfigException; + +void addAdvisor(int pos, Advisor advisor) throws AopConfigException; + +int indexOf(Advisor advisor); + +boolean removeAdvisor(Advisor advisor) throws AopConfigException; + +void removeAdvisor(int index) throws AopConfigException; + +boolean replaceAdvisor(Advisor a, Advisor b) throws AopConfigException; + +boolean isFrozen(); +``` + +Kotlin + +``` +fun getAdvisors(): Array + +@Throws(AopConfigException::class) +fun addAdvice(advice: Advice) + +@Throws(AopConfigException::class) +fun addAdvice(pos: Int, advice: Advice) + +@Throws(AopConfigException::class) +fun addAdvisor(advisor: Advisor) + +@Throws(AopConfigException::class) +fun addAdvisor(pos: Int, advisor: Advisor) + +fun indexOf(advisor: Advisor): Int + +@Throws(AopConfigException::class) +fun removeAdvisor(advisor: Advisor): Boolean + +@Throws(AopConfigException::class) +fun removeAdvisor(index: Int) + +@Throws(AopConfigException::class) +fun replaceAdvisor(a: Advisor, b: Advisor): Boolean + +fun isFrozen(): Boolean +``` + +The `getAdvisors()` method returns an `Advisor` for every advisor, interceptor, or +other advice type that has been added to the factory. If you added an `Advisor`, the +returned advisor at this index is the object that you added. If you added an +interceptor or other advice type, Spring wrapped this in an advisor with a +pointcut that always returns `true`. Thus, if you added a `MethodInterceptor`, the advisor +returned for this index is a `DefaultPointcutAdvisor` that returns your`MethodInterceptor` and a pointcut that matches all classes and methods. + +The `addAdvisor()` methods can be used to add any `Advisor`. Usually, the advisor holding +pointcut and advice is the generic `DefaultPointcutAdvisor`, which you can use with +any advice or pointcut (but not for introductions). + +By default, it is possible to add or remove advisors or interceptors even once a proxy +has been created. The only restriction is that it is impossible to add or remove an +introduction advisor, as existing proxies from the factory do not show the interface +change. (You can obtain a new proxy from the factory to avoid this problem.) + +The following example shows casting an AOP proxy to the `Advised` interface and examining and +manipulating its advice: + +Java + +``` +Advised advised = (Advised) myObject; +Advisor[] advisors = advised.getAdvisors(); +int oldAdvisorCount = advisors.length; +System.out.println(oldAdvisorCount + " advisors"); + +// Add an advice like an interceptor without a pointcut +// Will match all proxied methods +// Can use for interceptors, before, after returning or throws advice +advised.addAdvice(new DebugInterceptor()); + +// Add selective advice using a pointcut +advised.addAdvisor(new DefaultPointcutAdvisor(mySpecialPointcut, myAdvice)); + +assertEquals("Added two advisors", oldAdvisorCount + 2, advised.getAdvisors().length); +``` + +Kotlin + +``` +val advised = myObject as Advised +val advisors = advised.advisors +val oldAdvisorCount = advisors.size +println("$oldAdvisorCount advisors") + +// Add an advice like an interceptor without a pointcut +// Will match all proxied methods +// Can use for interceptors, before, after returning or throws advice +advised.addAdvice(DebugInterceptor()) + +// Add selective advice using a pointcut +advised.addAdvisor(DefaultPointcutAdvisor(mySpecialPointcut, myAdvice)) + +assertEquals("Added two advisors", oldAdvisorCount + 2, advised.advisors.size) +``` + +| |It is questionable whether it is advisable (no pun intended) to modify advice on a
business object in production, although there are, no doubt, legitimate usage cases.
However, it can be very useful in development (for example, in tests). We have sometimes
found it very useful to be able to add test code in the form of an interceptor or other
advice, getting inside a method invocation that we want to test. (For example, the advice can
get inside a transaction created for that method, perhaps to run SQL to check that
a database was correctly updated, before marking the transaction for roll back.)| +|---|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +Depending on how you created the proxy, you can usually set a `frozen` flag. In that +case, the `Advised` `isFrozen()` method returns `true`, and any attempts to modify +advice through addition or removal results in an `AopConfigException`. The ability +to freeze the state of an advised object is useful in some cases (for example, to +prevent calling code removing a security interceptor). + +### 6.8. Using the "auto-proxy" facility + +So far, we have considered explicit creation of AOP proxies by using a `ProxyFactoryBean` or +similar factory bean. + +Spring also lets us use “auto-proxy” bean definitions, which can automatically +proxy selected bean definitions. This is built on Spring’s “bean post processor” +infrastructure, which enables modification of any bean definition as the container loads. + +In this model, you set up some special bean definitions in your XML bean definition file +to configure the auto-proxy infrastructure. This lets you declare the targets +eligible for auto-proxying. You need not use `ProxyFactoryBean`. + +There are two ways to do this: + +* By using an auto-proxy creator that refers to specific beans in the current context. + +* A special case of auto-proxy creation that deserves to be considered separately: + auto-proxy creation driven by source-level metadata attributes. + +#### 6.8.1. Auto-proxy Bean Definitions + +This section covers the auto-proxy creators provided by the`org.springframework.aop.framework.autoproxy` package. + +##### `BeanNameAutoProxyCreator` + +The `BeanNameAutoProxyCreator` class is a `BeanPostProcessor` that automatically creates +AOP proxies for beans with names that match literal values or wildcards. The following +example shows how to create a `BeanNameAutoProxyCreator` bean: + +``` + + + + + myInterceptor + + + +``` + +As with `ProxyFactoryBean`, there is an `interceptorNames` property rather than a list +of interceptors, to allow correct behavior for prototype advisors. Named “interceptors” +can be advisors or any advice type. + +As with auto-proxying in general, the main point of using `BeanNameAutoProxyCreator` is +to apply the same configuration consistently to multiple objects, with minimal volume of +configuration. It is a popular choice for applying declarative transactions to multiple +objects. + +Bean definitions whose names match, such as `jdkMyBean` and `onlyJdk` in the preceding +example, are plain old bean definitions with the target class. An AOP proxy is +automatically created by the `BeanNameAutoProxyCreator`. The same advice is applied +to all matching beans. Note that, if advisors are used (rather than the interceptor in +the preceding example), the pointcuts may apply differently to different beans. + +##### `DefaultAdvisorAutoProxyCreator` + +A more general and extremely powerful auto-proxy creator is`DefaultAdvisorAutoProxyCreator`. This automagically applies eligible advisors in the +current context, without the need to include specific bean names in the auto-proxy +advisor’s bean definition. It offers the same merit of consistent configuration and +avoidance of duplication as `BeanNameAutoProxyCreator`. + +Using this mechanism involves: + +* Specifying a `DefaultAdvisorAutoProxyCreator` bean definition. + +* Specifying any number of advisors in the same or related contexts. Note that these + must be advisors, not interceptors or other advices. This is necessary, + because there must be a pointcut to evaluate, to check the eligibility of each advice + to candidate bean definitions. + +The `DefaultAdvisorAutoProxyCreator` automatically evaluates the pointcut contained +in each advisor, to see what (if any) advice it should apply to each business object +(such as `businessObject1` and `businessObject2` in the example). + +This means that any number of advisors can be applied automatically to each business +object. If no pointcut in any of the advisors matches any method in a business object, +the object is not proxied. As bean definitions are added for new business objects, +they are automatically proxied if necessary. + +Auto-proxying in general has the advantage of making it impossible for callers or +dependencies to obtain an un-advised object. Calling `getBean("businessObject1")` on this`ApplicationContext` returns an AOP proxy, not the target business object. (The “inner +bean” idiom shown earlier also offers this benefit.) + +The following example creates a `DefaultAdvisorAutoProxyCreator` bean and the other +elements discussed in this section: + +``` + + + + + + + + + + + + + +``` + +The `DefaultAdvisorAutoProxyCreator` is very useful if you want to apply the same advice +consistently to many business objects. Once the infrastructure definitions are in place, +you can add new business objects without including specific proxy configuration. +You can also easily drop in additional aspects (for example, tracing or +performance monitoring aspects) with minimal change to configuration. + +The `DefaultAdvisorAutoProxyCreator` offers support for filtering (by using a naming +convention so that only certain advisors are evaluated, which allows the use of multiple, +differently configured, AdvisorAutoProxyCreators in the same factory) and ordering. +Advisors can implement the `org.springframework.core.Ordered` interface to ensure +correct ordering if this is an issue. The `TransactionAttributeSourceAdvisor` used in the +preceding example has a configurable order value. The default setting is unordered. + +### 6.9. Using `TargetSource` Implementations + +Spring offers the concept of a `TargetSource`, expressed in the`org.springframework.aop.TargetSource` interface. This interface is responsible for +returning the “target object” that implements the join point. The `TargetSource`implementation is asked for a target instance each time the AOP proxy handles a method +invocation. + +Developers who use Spring AOP do not normally need to work directly with `TargetSource` implementations, but +this provides a powerful means of supporting pooling, hot swappable, and other +sophisticated targets. For example, a pooling `TargetSource` can return a different target +instance for each invocation, by using a pool to manage instances. + +If you do not specify a `TargetSource`, a default implementation is used to wrap a +local object. The same target is returned for each invocation (as you would expect). + +The rest of this section describes the standard target sources provided with Spring and how you can use them. + +| |When using a custom target source, your target will usually need to be a prototype
rather than a singleton bean definition. This allows Spring to create a new target
instance when required.| +|---|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +#### 6.9.1. Hot-swappable Target Sources + +The `org.springframework.aop.target.HotSwappableTargetSource` exists to let the target +of an AOP proxy be switched while letting callers keep their references to it. + +Changing the target source’s target takes effect immediately. The`HotSwappableTargetSource` is thread-safe. + +You can change the target by using the `swap()` method on HotSwappableTargetSource, as the follow example shows: + +Java + +``` +HotSwappableTargetSource swapper = (HotSwappableTargetSource) beanFactory.getBean("swapper"); +Object oldTarget = swapper.swap(newTarget); +``` + +Kotlin + +``` +val swapper = beanFactory.getBean("swapper") as HotSwappableTargetSource +val oldTarget = swapper.swap(newTarget) +``` + +The following example shows the required XML definitions: + +``` + + + + + + + + + +``` + +The preceding `swap()` call changes the target of the swappable bean. Clients that hold a +reference to that bean are unaware of the change but immediately start hitting +the new target. + +Although this example does not add any advice (it is not necessary to add advice to +use a `TargetSource`), any `TargetSource` can be used in conjunction with +arbitrary advice. + +#### 6.9.2. Pooling Target Sources + +Using a pooling target source provides a similar programming model to stateless session +EJBs, in which a pool of identical instances is maintained, with method invocations +going to free objects in the pool. + +A crucial difference between Spring pooling and SLSB pooling is that Spring pooling can +be applied to any POJO. As with Spring in general, this service can be applied in a +non-invasive way. + +Spring provides support for Commons Pool 2.2, which provides a +fairly efficient pooling implementation. You need the `commons-pool` Jar on your +application’s classpath to use this feature. You can also subclass`org.springframework.aop.target.AbstractPoolingTargetSource` to support any other +pooling API. + +| |Commons Pool 1.5+ is also supported but is deprecated as of Spring Framework 4.2.| +|---|---------------------------------------------------------------------------------| + +The following listing shows an example configuration: + +``` + + ... properties omitted + + + + + + + + + + + +``` + +Note that the target object (`businessObjectTarget` in the preceding example) must be a +prototype. This lets the `PoolingTargetSource` implementation create new instances +of the target to grow the pool as necessary. See the [javadoc of`AbstractPoolingTargetSource`](https://docs.spring.io/spring-framework/docs/5.3.16/javadoc-api/org/springframework/aop/target/AbstractPoolingTargetSource.html) and the concrete subclass you wish to use for information +about its properties. `maxSize` is the most basic and is always guaranteed to be present. + +In this case, `myInterceptor` is the name of an interceptor that would need to be +defined in the same IoC context. However, you need not specify interceptors to +use pooling. If you want only pooling and no other advice, do not set the`interceptorNames` property at all. + +You can configure Spring to be able to cast any pooled object to the`org.springframework.aop.target.PoolingConfig` interface, which exposes information +about the configuration and current size of the pool through an introduction. You +need to define an advisor similar to the following: + +``` + + + + +``` + +This advisor is obtained by calling a convenience method on the`AbstractPoolingTargetSource` class, hence the use of `MethodInvokingFactoryBean`. This +advisor’s name (`poolConfigAdvisor`, here) must be in the list of interceptors names in +the `ProxyFactoryBean` that exposes the pooled object. + +The cast is defined as follows: + +Java + +``` +PoolingConfig conf = (PoolingConfig) beanFactory.getBean("businessObject"); +System.out.println("Max pool size is " + conf.getMaxSize()); +``` + +Kotlin + +``` +val conf = beanFactory.getBean("businessObject") as PoolingConfig +println("Max pool size is " + conf.maxSize) +``` + +| |Pooling stateless service objects is not usually necessary. We do not believe it should
be the default choice, as most stateless objects are naturally thread safe, and instance
pooling is problematic if resources are cached.| +|---|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +Simpler pooling is available by using auto-proxying. You can set the `TargetSource` implementations +used by any auto-proxy creator. + +#### 6.9.3. Prototype Target Sources + +Setting up a “prototype” target source is similar to setting up a pooling `TargetSource`. In this +case, a new instance of the target is created on every method invocation. Although +the cost of creating a new object is not high in a modern JVM, the cost of wiring up the +new object (satisfying its IoC dependencies) may be more expensive. Thus, you should not +use this approach without very good reason. + +To do this, you could modify the `poolTargetSource` definition shown earlier as follows +(we also changed the name, for clarity): + +``` + + + +``` + +The only property is the name of the target bean. Inheritance is used in the`TargetSource` implementations to ensure consistent naming. As with the pooling target +source, the target bean must be a prototype bean definition. + +#### 6.9.4. `ThreadLocal` Target Sources + +`ThreadLocal` target sources are useful if you need an object to be created for each +incoming request (per thread that is). The concept of a `ThreadLocal` provides a JDK-wide +facility to transparently store a resource alongside a thread. Setting up a`ThreadLocalTargetSource` is pretty much the same as was explained for the other types +of target source, as the following example shows: + +``` + + + +``` + +| |`ThreadLocal` instances come with serious issues (potentially resulting in memory leaks) when
incorrectly using them in multi-threaded and multi-classloader environments. You
should always consider wrapping a threadlocal in some other class and never directly use
the `ThreadLocal` itself (except in the wrapper class). Also, you should
always remember to correctly set and unset (where the latter simply involves a call to`ThreadLocal.set(null)`) the resource local to the thread. Unsetting should be done in
any case, since not unsetting it might result in problematic behavior. Spring’s`ThreadLocal` support does this for you and should always be considered in favor of using`ThreadLocal` instances without other proper handling code.| +|---|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +### 6.10. Defining New Advice Types + +Spring AOP is designed to be extensible. While the interception implementation strategy +is presently used internally, it is possible to support arbitrary advice types in +addition to the interception around advice, before, throws advice, and +after returning advice. + +The `org.springframework.aop.framework.adapter` package is an SPI package that lets +support for new custom advice types be added without changing the core framework. +The only constraint on a custom `Advice` type is that it must implement the`org.aopalliance.aop.Advice` marker interface. + +See the [`org.springframework.aop.framework.adapter`](https://docs.spring.io/spring-framework/docs/5.3.16/javadoc-api/org/springframework/aop/framework/adapter/package-frame.html)javadoc for further information. + +## 7. Null-safety + +Although Java does not let you express null-safety with its type system, the Spring Framework +now provides the following annotations in the `org.springframework.lang` package to let you +declare nullability of APIs and fields: + +* [`@Nullable`](https://docs.spring.io/spring-framework/docs/5.3.16/javadoc-api/org/springframework/lang/Nullable.html): Annotation to indicate that a + specific parameter, return value, or field can be `null`. + +* [`@NonNull`](https://docs.spring.io/spring-framework/docs/5.3.16/javadoc-api/org/springframework/lang/NonNull.html): Annotation to indicate that a specific + parameter, return value, or field cannot be `null` (not needed on parameters / return values + and fields where `@NonNullApi` and `@NonNullFields` apply, respectively). + +* [`@NonNullApi`](https://docs.spring.io/spring-framework/docs/5.3.16/javadoc-api/org/springframework/lang/NonNullApi.html): Annotation at the package level + that declares non-null as the default semantics for parameters and return values. + +* [`@NonNullFields`](https://docs.spring.io/spring-framework/docs/5.3.16/javadoc-api/org/springframework/lang/NonNullFields.html): Annotation at the package + level that declares non-null as the default semantics for fields. + +The Spring Framework itself leverages these annotations, but they can also be used in any +Spring-based Java project to declare null-safe APIs and optionally null-safe fields. +Generic type arguments, varargs and array elements nullability are not supported yet but +should be in an upcoming release, see [SPR-15942](https://jira.spring.io/browse/SPR-15942)for up-to-date information. Nullability declarations are expected to be fine-tuned between +Spring Framework releases, including minor ones. Nullability of types used inside method +bodies is outside of the scope of this feature. + +| |Other common libraries such as Reactor and Spring Data provide null-safe APIs that
use a similar nullability arrangement, delivering a consistent overall experience for
Spring application developers.| +|---|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +### 7.1. Use cases + +In addition to providing an explicit declaration for Spring Framework API nullability, +these annotations can be used by an IDE (such as IDEA or Eclipse) to provide useful +warnings related to null-safety in order to avoid `NullPointerException` at runtime. + +They are also used to make Spring API null-safe in Kotlin projects, since Kotlin natively +supports [null-safety](https://kotlinlang.org/docs/reference/null-safety.html). More details +are available in the [Kotlin support documentation](languages.html#kotlin-null-safety). + +### 7.2. JSR-305 meta-annotations + +Spring annotations are meta-annotated with [JSR 305](https://jcp.org/en/jsr/detail?id=305)annotations (a dormant but wide-spread JSR). JSR-305 meta-annotations let tooling vendors +like IDEA or Kotlin provide null-safety support in a generic way, without having to +hard-code support for Spring annotations. + +It is not necessary nor recommended to add a JSR-305 dependency to the project classpath to +take advantage of Spring null-safe API. Only projects such as Spring-based libraries that use +null-safety annotations in their codebase should add `com.google.code.findbugs:jsr305:3.0.2`with `compileOnly` Gradle configuration or Maven `provided` scope to avoid compile warnings. + +## 8. Data Buffers and Codecs + +Java NIO provides `ByteBuffer` but many libraries build their own byte buffer API on top, +especially for network operations where reusing buffers and/or using direct buffers is +beneficial for performance. For example Netty has the `ByteBuf` hierarchy, Undertow uses +XNIO, Jetty uses pooled byte buffers with a callback to be released, and so on. +The `spring-core` module provides a set of abstractions to work with various byte buffer +APIs as follows: + +* [`DataBufferFactory`](#databuffers-factory) abstracts the creation of a data buffer. + +* [`DataBuffer`](#databuffers-buffer) represents a byte buffer, which may be[pooled](#databuffers-buffer-pooled). + +* [`DataBufferUtils`](#databuffers-utils) offers utility methods for data buffers. + +* [Codecs](#codecs) decode or encode data buffer streams into higher level objects. + +### 8.1. `DataBufferFactory` + +`DataBufferFactory` is used to create data buffers in one of two ways: + +1. Allocate a new data buffer, optionally specifying capacity upfront, if known, which is + more efficient even though implementations of `DataBuffer` can grow and shrink on demand. + +2. Wrap an existing `byte[]` or `java.nio.ByteBuffer`, which decorates the given data with + a `DataBuffer` implementation and that does not involve allocation. + +Note that WebFlux applications do not create a `DataBufferFactory` directly but instead +access it through the `ServerHttpResponse` or the `ClientHttpRequest` on the client side. +The type of factory depends on the underlying client or server, e.g.`NettyDataBufferFactory` for Reactor Netty, `DefaultDataBufferFactory` for others. + +### 8.2. `DataBuffer` + +The `DataBuffer` interface offers similar operations as `java.nio.ByteBuffer` but also +brings a few additional benefits some of which are inspired by the Netty `ByteBuf`. +Below is a partial list of benefits: + +* Read and write with independent positions, i.e. not requiring a call to `flip()` to + alternate between read and write. + +* Capacity expanded on demand as with `java.lang.StringBuilder`. + +* Pooled buffers and reference counting via [`PooledDataBuffer`](#databuffers-buffer-pooled). + +* View a buffer as `java.nio.ByteBuffer`, `InputStream`, or `OutputStream`. + +* Determine the index, or the last index, for a given byte. + +### 8.3. `PooledDataBuffer` + +As explained in the Javadoc for[ByteBuffer](https://docs.oracle.com/javase/8/docs/api/java/nio/ByteBuffer.html), +byte buffers can be direct or non-direct. Direct buffers may reside outside the Java heap +which eliminates the need for copying for native I/O operations. That makes direct buffers +particularly useful for receiving and sending data over a socket, but they’re also more +expensive to create and release, which leads to the idea of pooling buffers. + +`PooledDataBuffer` is an extension of `DataBuffer` that helps with reference counting which +is essential for byte buffer pooling. How does it work? When a `PooledDataBuffer` is +allocated the reference count is at 1. Calls to `retain()` increment the count, while +calls to `release()` decrement it. As long as the count is above 0, the buffer is +guaranteed not to be released. When the count is decreased to 0, the pooled buffer can be +released, which in practice could mean the reserved memory for the buffer is returned to +the memory pool. + +Note that instead of operating on `PooledDataBuffer` directly, in most cases it’s better +to use the convenience methods in `DataBufferUtils` that apply release or retain to a`DataBuffer` only if it is an instance of `PooledDataBuffer`. + +### 8.4. `DataBufferUtils` + +`DataBufferUtils` offers a number of utility methods to operate on data buffers: + +* Join a stream of data buffers into a single buffer possibly with zero copy, e.g. via + composite buffers, if that’s supported by the underlying byte buffer API. + +* Turn `InputStream` or NIO `Channel` into `Flux`, and vice versa a`Publisher` into `OutputStream` or NIO `Channel`. + +* Methods to release or retain a `DataBuffer` if the buffer is an instance of`PooledDataBuffer`. + +* Skip or take from a stream of bytes until a specific byte count. + +### 8.5. Codecs + +The `org.springframework.core.codec` package provides the following strategy interfaces: + +* `Encoder` to encode `Publisher` into a stream of data buffers. + +* `Decoder` to decode `Publisher` into a stream of higher level objects. + +The `spring-core` module provides `byte[]`, `ByteBuffer`, `DataBuffer`, `Resource`, and`String` encoder and decoder implementations. The `spring-web` module adds Jackson JSON, +Jackson Smile, JAXB2, Protocol Buffers and other encoders and decoders. See[Codecs](web-reactive.html#webflux-codecs) in the WebFlux section. + +### 8.6. Using `DataBuffer` + +When working with data buffers, special care must be taken to ensure buffers are released +since they may be [pooled](#databuffers-buffer-pooled). We’ll use codecs to illustrate +how that works but the concepts apply more generally. Let’s see what codecs must do +internally to manage data buffers. + +A `Decoder` is the last to read input data buffers, before creating higher level +objects, and therefore it must release them as follows: + +1. If a `Decoder` simply reads each input buffer and is ready to + release it immediately, it can do so via `DataBufferUtils.release(dataBuffer)`. + +2. If a `Decoder` is using `Flux` or `Mono` operators such as `flatMap`, `reduce`, and + others that prefetch and cache data items internally, or is using operators such as`filter`, `skip`, and others that leave out items, then`doOnDiscard(PooledDataBuffer.class, DataBufferUtils::release)` must be added to the + composition chain to ensure such buffers are released prior to being discarded, possibly + also as a result of an error or cancellation signal. + +3. If a `Decoder` holds on to one or more data buffers in any other way, it must + ensure they are released when fully read, or in case of an error or cancellation signals that + take place before the cached data buffers have been read and released. + +Note that `DataBufferUtils#join` offers a safe and efficient way to aggregate a data +buffer stream into a single data buffer. Likewise `skipUntilByteCount` and`takeUntilByteCount` are additional safe methods for decoders to use. + +An `Encoder` allocates data buffers that others must read (and release). So an `Encoder`doesn’t have much to do. However an `Encoder` must take care to release a data buffer if +a serialization error occurs while populating the buffer with data. For example: + +Java + +``` +DataBuffer buffer = factory.allocateBuffer(); +boolean release = true; +try { + // serialize and populate buffer.. + release = false; +} +finally { + if (release) { + DataBufferUtils.release(buffer); + } +} +return buffer; +``` + +Kotlin + +``` +val buffer = factory.allocateBuffer() +var release = true +try { + // serialize and populate buffer.. + release = false +} finally { + if (release) { + DataBufferUtils.release(buffer) + } +} +return buffer +``` + +The consumer of an `Encoder` is responsible for releasing the data buffers it receives. +In a WebFlux application, the output of the `Encoder` is used to write to the HTTP server +response, or to the client HTTP request, in which case releasing the data buffers is the +responsibility of the code writing to the server response, or to the client request. + +Note that when running on Netty, there are debugging options for[troubleshooting buffer leaks](https://github.com/netty/netty/wiki/Reference-counted-objects#troubleshooting-buffer-leaks). + +## 9. Logging + +Since Spring Framework 5.0, Spring comes with its own Commons Logging bridge implemented +in the `spring-jcl` module. The implementation checks for the presence of the Log4j 2.x +API and the SLF4J 1.7 API in the classpath and uses the first one of those found as the +logging implementation, falling back to the Java platform’s core logging facilities (also +known as *JUL* or `java.util.logging`) if neither Log4j 2.x nor SLF4J is available. + +Put Log4j 2.x or Logback (or another SLF4J provider) in your classpath, without any extra +bridges, and let the framework auto-adapt to your choice. For further information see the[Spring +Boot Logging Reference Documentation](https://docs.spring.io/spring-boot/docs/current/reference/htmlsingle/#boot-features-logging). + +| |Spring’s Commons Logging variant is only meant to be used for infrastructure logging
purposes in the core framework and in extensions.

For logging needs within application code, prefer direct use of Log4j 2.x, SLF4J, or JUL.| +|---|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +A `Log` implementation may be retrieved via `org.apache.commons.logging.LogFactory` as in +the following example. + +Java + +``` +public class MyBean { + private final Log log = LogFactory.getLog(getClass()); + // ... +} +``` + +Kotlin + +``` +class MyBean { + private val log = LogFactory.getLog(javaClass) + // ... +} +``` + +## 10. Appendix + +### 10.1. XML Schemas + +This part of the appendix lists XML schemas related to the core container. + +#### 10.1.1. The `util` Schema + +As the name implies, the `util` tags deal with common, utility configuration +issues, such as configuring collections, referencing constants, and so forth. +To use the tags in the `util` schema, you need to have the following preamble at the top +of your Spring XML configuration file (the text in the snippet references the +correct schema so that the tags in the `util` namespace are available to you): + +``` + + + + + + +``` + +##### Using `` + +Consider the following bean definition: + +``` + + + + + +``` + +The preceding configuration uses a Spring `FactoryBean` implementation (the`FieldRetrievingFactoryBean`) to set the value of the `isolation` property on a bean +to the value of the `java.sql.Connection.TRANSACTION_SERIALIZABLE` constant. This is +all well and good, but it is verbose and (unnecessarily) exposes Spring’s internal +plumbing to the end user. + +The following XML Schema-based version is more concise, clearly expresses the +developer’s intent (“inject this constant value”), and it reads better: + +``` + + + + + +``` + +###### Setting a Bean Property or Constructor Argument from a Field Value + +[`FieldRetrievingFactoryBean`](https://docs.spring.io/spring-framework/docs/5.3.16/javadoc-api/org/springframework/beans/factory/config/FieldRetrievingFactoryBean.html)is a `FactoryBean` that retrieves a `static` or non-static field value. It is typically +used for retrieving `public` `static` `final` constants, which may then be used to set a +property value or constructor argument for another bean. + +The following example shows how a `static` field is exposed, by using the[`staticField`](https://docs.spring.io/spring-framework/docs/5.3.16/javadoc-api/org/springframework/beans/factory/config/FieldRetrievingFactoryBean.html#setStaticField(java.lang.String))property: + +``` + + + +``` + +There is also a convenience usage form where the `static` field is specified as the bean +name, as the following example shows: + +``` + +``` + +This does mean that there is no longer any choice in what the bean `id` is (so any other +bean that refers to it also has to use this longer name), but this form is very +concise to define and very convenient to use as an inner bean since the `id` does not have +to be specified for the bean reference, as the following example shows: + +``` + + + + + +``` + +You can also access a non-static (instance) field of another bean, as +described in the API documentation for the[`FieldRetrievingFactoryBean`](https://docs.spring.io/spring-framework/docs/5.3.16/javadoc-api/org/springframework/beans/factory/config/FieldRetrievingFactoryBean.html)class. + +Injecting enumeration values into beans as either property or constructor arguments is +easy to do in Spring. You do not actually have to do anything or know anything about +the Spring internals (or even about classes such as the `FieldRetrievingFactoryBean`). +The following example enumeration shows how easy injecting an enum value is: + +Java + +``` +package javax.persistence; + +public enum PersistenceContextType { + + TRANSACTION, + EXTENDED +} +``` + +Kotlin + +``` +package javax.persistence + +enum class PersistenceContextType { + + TRANSACTION, + EXTENDED +} +``` + +Now consider the following setter of type `PersistenceContextType` and the corresponding bean definition: + +Java + +``` +package example; + +public class Client { + + private PersistenceContextType persistenceContextType; + + public void setPersistenceContextType(PersistenceContextType type) { + this.persistenceContextType = type; + } +} +``` + +Kotlin + +``` +package example + +class Client { + + lateinit var persistenceContextType: PersistenceContextType +} +``` + +``` + + + +``` + +##### Using `` + +Consider the following example: + +``` + + + + + + + + + + + + +``` + +The preceding configuration uses a Spring `FactoryBean` implementation (the`PropertyPathFactoryBean`) to create a bean (of type `int`) called `testBean.age` that +has a value equal to the `age` property of the `testBean` bean. + +Now consider the following example, which adds a `` element: + +``` + + + + + + + + + + + + +``` + +The value of the `path` attribute of the `` element follows the form of`beanName.beanProperty`. In this case, it picks up the `age` property of the bean named`testBean`. The value of that `age` property is `10`. + +###### Using `` to Set a Bean Property or Constructor Argument ###### + +`PropertyPathFactoryBean` is a `FactoryBean` that evaluates a property path on a given +target object. The target object can be specified directly or by a bean name. You can then use this +value in another bean definition as a property value or constructor +argument. + +The following example shows a path being used against another bean, by name: + +``` + + + + + + + + + + + + + + + +``` + +In the following example, a path is evaluated against an inner bean: + +``` + + + + + + + + + +``` + +There is also a shortcut form, where the bean name is the property path. +The following example shows the shortcut form: + +``` + + +``` + +This form does mean that there is no choice in the name of the bean. Any reference to it +also has to use the same `id`, which is the path. If used as an inner +bean, there is no need to refer to it at all, as the following example shows: + +``` + + + + + +``` + +You can specifically set the result type in the actual definition. This is not necessary +for most use cases, but it can sometimes be useful. See the javadoc for more info on +this feature. + +##### Using `` + +Consider the following example: + +``` + + + + +``` + +The preceding configuration uses a Spring `FactoryBean` implementation (the`PropertiesFactoryBean`) to instantiate a `java.util.Properties` instance with values +loaded from the supplied [`Resource`](#resources) location). + +The following example uses a `util:properties` element to make a more concise representation: + +``` + + +``` + +##### Using `` + +Consider the following example: + +``` + + + + + [email protected] + [email protected] + [email protected] + [email protected] + + + +``` + +The preceding configuration uses a Spring `FactoryBean` implementation (the`ListFactoryBean`) to create a `java.util.List` instance and initialize it with values taken +from the supplied `sourceList`. + +The following example uses a `` element to make a more concise representation: + +``` + + + [email protected] + [email protected] + [email protected] + [email protected] + +``` + +You can also explicitly control the exact type of `List` that is instantiated and +populated by using the `list-class` attribute on the `` element. For +example, if we really need a `java.util.LinkedList` to be instantiated, we could use the +following configuration: + +``` + + [email protected] + [email protected] + [email protected] + d'[email protected] + +``` + +If no `list-class` attribute is supplied, the container chooses a `List` implementation. + +##### Using `` + +Consider the following example: + +``` + + + + + + + + + + + +``` + +The preceding configuration uses a Spring `FactoryBean` implementation (the`MapFactoryBean`) to create a `java.util.Map` instance initialized with key-value pairs +taken from the supplied `'sourceMap'`. + +The following example uses a `` element to make a more concise representation: + +``` + + + + + + + +``` + +You can also explicitly control the exact type of `Map` that is instantiated and +populated by using the `'map-class'` attribute on the `` element. For +example, if we really need a `java.util.TreeMap` to be instantiated, we could use the +following configuration: + +``` + + + + + + +``` + +If no `'map-class'` attribute is supplied, the container chooses a `Map` implementation. + +##### Using `` + +Consider the following example: + +``` + + + + + [email protected] + [email protected] + [email protected] + [email protected] + + + +``` + +The preceding configuration uses a Spring `FactoryBean` implementation (the`SetFactoryBean`) to create a `java.util.Set` instance initialized with values taken +from the supplied `sourceSet`. + +The following example uses a `` element to make a more concise representation: + +``` + + + [email protected] + [email protected] + [email protected] + [email protected] + +``` + +You can also explicitly control the exact type of `Set` that is instantiated and +populated by using the `set-class` attribute on the `` element. For +example, if we really need a `java.util.TreeSet` to be instantiated, we could use the +following configuration: + +``` + + [email protected] + [email protected] + [email protected] + [email protected] + +``` + +If no `set-class` attribute is supplied, the container chooses a `Set` implementation. + +#### 10.1.2. The `aop` Schema + +The `aop` tags deal with configuring all things AOP in Spring, including Spring’s +own proxy-based AOP framework and Spring’s integration with the AspectJ AOP framework. +These tags are comprehensively covered in the chapter entitled [Aspect Oriented Programming with Spring](#aop). + +In the interest of completeness, to use the tags in the `aop` schema, you need to have +the following preamble at the top of your Spring XML configuration file (the text in the +snippet references the correct schema so that the tags in the `aop` namespace +are available to you): + +``` + + + + + + +``` + +#### 10.1.3. The `context` Schema + +The `context` tags deal with `ApplicationContext` configuration that relates to plumbing — that is, not usually beans that are important to an end-user but rather beans that do +a lot of the “grunt” work in Spring, such as `BeanfactoryPostProcessors`. The following +snippet references the correct schema so that the elements in the `context` namespace are +available to you: + +``` + + + + + + +``` + +##### Using `` + +This element activates the replacement of `${…​}` placeholders, which are resolved against a +specified properties file (as a [Spring resource location](#resources)). This element +is a convenience mechanism that sets up a [`PropertySourcesPlaceholderConfigurer`](#beans-factory-placeholderconfigurer) for you. If you need more control over the specific`PropertySourcesPlaceholderConfigurer` setup, you can explicitly define it as a bean yourself. + +##### Using `` + +This element activates the Spring infrastructure to detect annotations in bean classes: + +* Spring’s [`@Configuration`](#beans-factory-metadata) model + +* [`@Autowired`/`@Inject`](#beans-annotation-config), `@Value`, and `@Lookup` + +* JSR-250’s `@Resource`, `@PostConstruct`, and `@PreDestroy` (if available) + +* JAX-WS’s `@WebServiceRef` and EJB 3’s `@EJB` (if available) + +* JPA’s `@PersistenceContext` and `@PersistenceUnit` (if available) + +* Spring’s [`@EventListener`](#context-functionality-events-annotation) + +Alternatively, you can choose to explicitly activate the individual `BeanPostProcessors`for those annotations. + +| |This element does not activate processing of Spring’s[`@Transactional`](data-access.html#transaction-declarative-annotations) annotation;
you can use the [``](data-access.html#tx-decl-explained)element for that purpose. Similarly, Spring’s[caching annotations](integration.html#cache-annotations) need to be explicitly[enabled](integration.html#cache-annotation-enable) as well.| +|---|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +##### Using `` + +This element is detailed in the section on [annotation-based container configuration](#beans-annotation-config). + +##### Using `` + +This element is detailed in the section on [load-time weaving with AspectJ in the Spring Framework](#aop-aj-ltw). + +##### Using `` + +This element is detailed in the section on [using AspectJ to dependency inject domain objects with Spring](#aop-atconfigurable). + +##### Using `` + +This element is detailed in the section on [configuring annotation-based MBean export](integration.html#jmx-context-mbeanexport). + +#### 10.1.4. The Beans Schema + +Last but not least, we have the elements in the `beans` schema. These elements +have been in Spring since the very dawn of the framework. Examples of the various elements +in the `beans` schema are not shown here because they are quite comprehensively covered +in [dependencies and configuration in detail](#beans-factory-properties-detailed)(and, indeed, in that entire [chapter](#beans)). + +Note that you can add zero or more key-value pairs to `` XML definitions. +What, if anything, is done with this extra metadata is totally up to your own custom +logic (and so is typically only of use if you write your own custom elements as described +in the appendix entitled [XML Schema Authoring](#xml-custom)). + +The following example shows the `` element in the context of a surrounding ``(note that, without any logic to interpret it, the metadata is effectively useless +as it stands). + +``` + + + + + (1) + + + + +``` + +|**1**|This is the example `meta` element| +|-----|----------------------------------| + +In the case of the preceding example, you could assume that there is some logic that consumes +the bean definition and sets up some caching infrastructure that uses the supplied metadata. + +### 10.2. XML Schema Authoring + +Since version 2.0, Spring has featured a mechanism for adding schema-based extensions to the +basic Spring XML format for defining and configuring beans. This section covers +how to write your own custom XML bean definition parsers and +integrate such parsers into the Spring IoC container. + +To facilitate authoring configuration files that use a schema-aware XML editor, +Spring’s extensible XML configuration mechanism is based on XML Schema. If you are not +familiar with Spring’s current XML configuration extensions that come with the standard +Spring distribution, you should first read the previous section on [XML Schemas](#xsd-schemas). + +To create new XML configuration extensions: + +1. [Author](#xsd-custom-schema) an XML schema to describe your custom element(s). + +2. [Code](#xsd-custom-namespacehandler) a custom `NamespaceHandler` implementation. + +3. [Code](#xsd-custom-parser) one or more `BeanDefinitionParser` implementations + (this is where the real work is done). + +4. [Register](#xsd-custom-registration) your new artifacts with Spring. + +For a unified example, we create an +XML extension (a custom XML element) that lets us configure objects of the type`SimpleDateFormat` (from the `java.text` package). When we are done, +we will be able to define bean definitions of type `SimpleDateFormat` as follows: + +``` + +``` + +(We include much more detailed +examples follow later in this appendix. The intent of this first simple example is to walk you +through the basic steps of making a custom extension.) + +#### 10.2.1. Authoring the Schema + +Creating an XML configuration extension for use with Spring’s IoC container starts with +authoring an XML Schema to describe the extension. For our example, we use the following schema +to configure `SimpleDateFormat` objects: + +``` + + + + + + + + + + + (1) + + + + + + + +``` + +|**1**|The indicated line contains an extension base for all identifiable tags
(meaning they have an `id` attribute that we can use as the bean identifier in the
container). We can use this attribute because we imported the Spring-provided`beans` namespace.| +|-----|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +The preceding schema lets us configure `SimpleDateFormat` objects directly in an +XML application context file by using the `` element, as the following +example shows: + +``` + +``` + +Note that, after we have created the infrastructure classes, the preceding snippet of XML is +essentially the same as the following XML snippet: + +``` + + + + +``` + +The second of the two preceding snippets +creates a bean in the container (identified by the name `dateFormat` of type`SimpleDateFormat`) with a couple of properties set. + +| |The schema-based approach to creating configuration format allows for tight integration
with an IDE that has a schema-aware XML editor. By using a properly authored schema, you
can use autocompletion to let a user choose between several configuration options
defined in the enumeration.| +|---|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +#### 10.2.2. Coding a `NamespaceHandler` + +In addition to the schema, we need a `NamespaceHandler` to parse all elements of +this specific namespace that Spring encounters while parsing configuration files. For this example, the`NamespaceHandler` should take care of the parsing of the `myns:dateformat`element. + +The `NamespaceHandler` interface features three methods: + +* `init()`: Allows for initialization of the `NamespaceHandler` and is called by + Spring before the handler is used. + +* `BeanDefinition parse(Element, ParserContext)`: Called when Spring encounters a + top-level element (not nested inside a bean definition or a different namespace). + This method can itself register bean definitions, return a bean definition, or both. + +* `BeanDefinitionHolder decorate(Node, BeanDefinitionHolder, ParserContext)`: Called + when Spring encounters an attribute or nested element of a different namespace. + The decoration of one or more bean definitions is used (for example) with the[scopes that Spring supports](#beans-factory-scopes). + We start by highlighting a simple example, without using decoration, after which + we show decoration in a somewhat more advanced example. + +Although you can code your own `NamespaceHandler` for the entire +namespace (and hence provide code that parses each and every element in the namespace), +it is often the case that each top-level XML element in a Spring XML configuration file +results in a single bean definition (as in our case, where a single ``element results in a single `SimpleDateFormat` bean definition). Spring features a +number of convenience classes that support this scenario. In the following example, we +use the `NamespaceHandlerSupport` class: + +Java + +``` +package org.springframework.samples.xml; + +import org.springframework.beans.factory.xml.NamespaceHandlerSupport; + +public class MyNamespaceHandler extends NamespaceHandlerSupport { + + public void init() { + registerBeanDefinitionParser("dateformat", new SimpleDateFormatBeanDefinitionParser()); + } +} +``` + +Kotlin + +``` +package org.springframework.samples.xml + +import org.springframework.beans.factory.xml.NamespaceHandlerSupport + +class MyNamespaceHandler : NamespaceHandlerSupport { + + override fun init() { + registerBeanDefinitionParser("dateformat", SimpleDateFormatBeanDefinitionParser()) + } +} +``` + +You may notice that there is not actually a whole lot of parsing logic +in this class. Indeed, the `NamespaceHandlerSupport` class has a built-in notion of +delegation. It supports the registration of any number of `BeanDefinitionParser`instances, to which it delegates to when it needs to parse an element in its +namespace. This clean separation of concerns lets a `NamespaceHandler` handle the +orchestration of the parsing of all of the custom elements in its namespace while +delegating to `BeanDefinitionParsers` to do the grunt work of the XML parsing. This +means that each `BeanDefinitionParser` contains only the logic for parsing a single +custom element, as we can see in the next step. + +#### 10.2.3. Using `BeanDefinitionParser` + +A `BeanDefinitionParser` is used if the `NamespaceHandler` encounters an XML +element of the type that has been mapped to the specific bean definition parser +(`dateformat` in this case). In other words, the `BeanDefinitionParser` is +responsible for parsing one distinct top-level XML element defined in the schema. In +the parser, we' have access to the XML element (and thus to its subelements, too) so that +we can parse our custom XML content, as you can see in the following example: + +Java + +``` +package org.springframework.samples.xml; + +import org.springframework.beans.factory.support.BeanDefinitionBuilder; +import org.springframework.beans.factory.xml.AbstractSingleBeanDefinitionParser; +import org.springframework.util.StringUtils; +import org.w3c.dom.Element; + +import java.text.SimpleDateFormat; + +public class SimpleDateFormatBeanDefinitionParser extends AbstractSingleBeanDefinitionParser { (1) + + protected Class getBeanClass(Element element) { + return SimpleDateFormat.class; (2) + } + + protected void doParse(Element element, BeanDefinitionBuilder bean) { + // this will never be null since the schema explicitly requires that a value be supplied + String pattern = element.getAttribute("pattern"); + bean.addConstructorArgValue(pattern); + + // this however is an optional property + String lenient = element.getAttribute("lenient"); + if (StringUtils.hasText(lenient)) { + bean.addPropertyValue("lenient", Boolean.valueOf(lenient)); + } + } + +} +``` + +|**1**|We use the Spring-provided `AbstractSingleBeanDefinitionParser` to handle a lot of
the basic grunt work of creating a single `BeanDefinition`.| +|-----|--------------------------------------------------------------------------------------------------------------------------------------------------| +|**2**| We supply the `AbstractSingleBeanDefinitionParser` superclass with the type that our
single `BeanDefinition` represents. | + +Kotlin + +``` +package org.springframework.samples.xml + +import org.springframework.beans.factory.support.BeanDefinitionBuilder +import org.springframework.beans.factory.xml.AbstractSingleBeanDefinitionParser +import org.springframework.util.StringUtils +import org.w3c.dom.Element + +import java.text.SimpleDateFormat + +class SimpleDateFormatBeanDefinitionParser : AbstractSingleBeanDefinitionParser() { (1) + + override fun getBeanClass(element: Element): Class<*>? { (2) + return SimpleDateFormat::class.java + } + + override fun doParse(element: Element, bean: BeanDefinitionBuilder) { + // this will never be null since the schema explicitly requires that a value be supplied + val pattern = element.getAttribute("pattern") + bean.addConstructorArgValue(pattern) + + // this however is an optional property + val lenient = element.getAttribute("lenient") + if (StringUtils.hasText(lenient)) { + bean.addPropertyValue("lenient", java.lang.Boolean.valueOf(lenient)) + } + } +} +``` + +|**1**|We use the Spring-provided `AbstractSingleBeanDefinitionParser` to handle a lot of
the basic grunt work of creating a single `BeanDefinition`.| +|-----|--------------------------------------------------------------------------------------------------------------------------------------------------| +|**2**| We supply the `AbstractSingleBeanDefinitionParser` superclass with the type that our
single `BeanDefinition` represents. | + +In this simple case, this is all that we need to do. The creation of our single`BeanDefinition` is handled by the `AbstractSingleBeanDefinitionParser` superclass, as +is the extraction and setting of the bean definition’s unique identifier. + +#### 10.2.4. Registering the Handler and the Schema + +The coding is finished. All that remains to be done is to make the Spring XML +parsing infrastructure aware of our custom element. We do so by registering our custom`namespaceHandler` and custom XSD file in two special-purpose properties files. These +properties files are both placed in a `META-INF` directory in your application and +can, for example, be distributed alongside your binary classes in a JAR file. The Spring +XML parsing infrastructure automatically picks up your new extension by consuming +these special properties files, the formats of which are detailed in the next two sections. + +##### Writing `META-INF/spring.handlers` + +The properties file called `spring.handlers` contains a mapping of XML Schema URIs to +namespace handler classes. For our example, we need to write the following: + +``` +http\://www.mycompany.example/schema/myns=org.springframework.samples.xml.MyNamespaceHandler +``` + +(The `:` character is a valid delimiter in the Java properties format, so`:` character in the URI needs to be escaped with a backslash.) + +The first part (the key) of the key-value pair is the URI associated with your custom +namespace extension and needs to exactly match exactly the value of the `targetNamespace`attribute, as specified in your custom XSD schema. + +##### Writing 'META-INF/spring.schemas' + +The properties file called `spring.schemas` contains a mapping of XML Schema locations +(referred to, along with the schema declaration, in XML files that use the schema as part +of the `xsi:schemaLocation` attribute) to classpath resources. This file is needed +to prevent Spring from absolutely having to use a default `EntityResolver` that requires +Internet access to retrieve the schema file. If you specify the mapping in this +properties file, Spring searches for the schema (in this case,`myns.xsd` in the `org.springframework.samples.xml` package) on the classpath. +The following snippet shows the line we need to add for our custom schema: + +``` +http\://www.mycompany.example/schema/myns/myns.xsd=org/springframework/samples/xml/myns.xsd +``` + +(Remember that the `:` character must be escaped.) + +You are encouraged to deploy your XSD file (or files) right alongside +the `NamespaceHandler` and `BeanDefinitionParser` classes on the classpath. + +#### 10.2.5. Using a Custom Extension in Your Spring XML Configuration + +Using a custom extension that you yourself have implemented is no different from using +one of the “custom” extensions that Spring provides. The following +example uses the custom `` element developed in the previous steps +in a Spring XML configuration file: + +``` + + + + + (1) + + + + + + + + + +``` + +|**1**|Our custom bean.| +|-----|----------------| + +#### 10.2.6. More Detailed Examples + +This section presents some more detailed examples of custom XML extensions. + +##### Nesting Custom Elements within Custom Elements + +The example presented in this section shows how you to write the various artifacts required +to satisfy a target of the following configuration: + +``` + + + + + + + + + + + + +``` + +The preceding configuration nests custom extensions within each other. The class +that is actually configured by the `` element is the `Component`class (shown in the next example). Notice how the `Component` class does not expose a +setter method for the `components` property. This makes it hard (or rather impossible) +to configure a bean definition for the `Component` class by using setter injection. +The following listing shows the `Component` class: + +Java + +``` +package com.foo; + +import java.util.ArrayList; +import java.util.List; + +public class Component { + + private String name; + private List components = new ArrayList (); + + // mmm, there is no setter method for the 'components' + public void addComponent(Component component) { + this.components.add(component); + } + + public List getComponents() { + return components; + } + + public String getName() { + return name; + } + + public void setName(String name) { + this.name = name; + } +} +``` + +Kotlin + +``` +package com.foo + +import java.util.ArrayList + +class Component { + + var name: String? = null + private val components = ArrayList() + + // mmm, there is no setter method for the 'components' + fun addComponent(component: Component) { + this.components.add(component) + } + + fun getComponents(): List { + return components + } +} +``` + +The typical solution to this issue is to create a custom `FactoryBean` that exposes a +setter property for the `components` property. The following listing shows such a custom`FactoryBean`: + +Java + +``` +package com.foo; + +import org.springframework.beans.factory.FactoryBean; + +import java.util.List; + +public class ComponentFactoryBean implements FactoryBean { + + private Component parent; + private List children; + + public void setParent(Component parent) { + this.parent = parent; + } + + public void setChildren(List children) { + this.children = children; + } + + public Component getObject() throws Exception { + if (this.children != null && this.children.size() > 0) { + for (Component child : children) { + this.parent.addComponent(child); + } + } + return this.parent; + } + + public Class getObjectType() { + return Component.class; + } + + public boolean isSingleton() { + return true; + } +} +``` + +Kotlin + +``` +package com.foo + +import org.springframework.beans.factory.FactoryBean +import org.springframework.stereotype.Component + +class ComponentFactoryBean : FactoryBean { + + private var parent: Component? = null + private var children: List? = null + + fun setParent(parent: Component) { + this.parent = parent + } + + fun setChildren(children: List) { + this.children = children + } + + override fun getObject(): Component? { + if (this.children != null && this.children!!.isNotEmpty()) { + for (child in children!!) { + this.parent!!.addComponent(child) + } + } + return this.parent + } + + override fun getObjectType(): Class? { + return Component::class.java + } + + override fun isSingleton(): Boolean { + return true + } +} +``` + +This works nicely, but it exposes a lot of Spring plumbing to the end user. What we are +going to do is write a custom extension that hides away all of this Spring plumbing. +If we stick to [the steps described previously](#xsd-custom-introduction), we start off +by creating the XSD schema to define the structure of our custom tag, as the following +listing shows: + +``` + + + + + + + + + + + + + + + +``` + +Again following [the process described earlier](#xsd-custom-introduction), +we then create a custom `NamespaceHandler`: + +Java + +``` +package com.foo; + +import org.springframework.beans.factory.xml.NamespaceHandlerSupport; + +public class ComponentNamespaceHandler extends NamespaceHandlerSupport { + + public void init() { + registerBeanDefinitionParser("component", new ComponentBeanDefinitionParser()); + } +} +``` + +Kotlin + +``` +package com.foo + +import org.springframework.beans.factory.xml.NamespaceHandlerSupport + +class ComponentNamespaceHandler : NamespaceHandlerSupport() { + + override fun init() { + registerBeanDefinitionParser("component", ComponentBeanDefinitionParser()) + } +} +``` + +Next up is the custom `BeanDefinitionParser`. Remember that we are creating +a `BeanDefinition` that describes a `ComponentFactoryBean`. The following +listing shows our custom `BeanDefinitionParser` implementation: + +Java + +``` +package com.foo; + +import org.springframework.beans.factory.config.BeanDefinition; +import org.springframework.beans.factory.support.AbstractBeanDefinition; +import org.springframework.beans.factory.support.BeanDefinitionBuilder; +import org.springframework.beans.factory.support.ManagedList; +import org.springframework.beans.factory.xml.AbstractBeanDefinitionParser; +import org.springframework.beans.factory.xml.ParserContext; +import org.springframework.util.xml.DomUtils; +import org.w3c.dom.Element; + +import java.util.List; + +public class ComponentBeanDefinitionParser extends AbstractBeanDefinitionParser { + + protected AbstractBeanDefinition parseInternal(Element element, ParserContext parserContext) { + return parseComponentElement(element); + } + + private static AbstractBeanDefinition parseComponentElement(Element element) { + BeanDefinitionBuilder factory = BeanDefinitionBuilder.rootBeanDefinition(ComponentFactoryBean.class); + factory.addPropertyValue("parent", parseComponent(element)); + + List childElements = DomUtils.getChildElementsByTagName(element, "component"); + if (childElements != null && childElements.size() > 0) { + parseChildComponents(childElements, factory); + } + + return factory.getBeanDefinition(); + } + + private static BeanDefinition parseComponent(Element element) { + BeanDefinitionBuilder component = BeanDefinitionBuilder.rootBeanDefinition(Component.class); + component.addPropertyValue("name", element.getAttribute("name")); + return component.getBeanDefinition(); + } + + private static void parseChildComponents(List childElements, BeanDefinitionBuilder factory) { + ManagedList children = new ManagedList(childElements.size()); + for (Element element : childElements) { + children.add(parseComponentElement(element)); + } + factory.addPropertyValue("children", children); + } +} +``` + +Kotlin + +``` +package com.foo + +import org.springframework.beans.factory.config.BeanDefinition +import org.springframework.beans.factory.support.AbstractBeanDefinition +import org.springframework.beans.factory.support.BeanDefinitionBuilder +import org.springframework.beans.factory.support.ManagedList +import org.springframework.beans.factory.xml.AbstractBeanDefinitionParser +import org.springframework.beans.factory.xml.ParserContext +import org.springframework.util.xml.DomUtils +import org.w3c.dom.Element + +import java.util.List + +class ComponentBeanDefinitionParser : AbstractBeanDefinitionParser() { + + override fun parseInternal(element: Element, parserContext: ParserContext): AbstractBeanDefinition? { + return parseComponentElement(element) + } + + private fun parseComponentElement(element: Element): AbstractBeanDefinition { + val factory = BeanDefinitionBuilder.rootBeanDefinition(ComponentFactoryBean::class.java) + factory.addPropertyValue("parent", parseComponent(element)) + + val childElements = DomUtils.getChildElementsByTagName(element, "component") + if (childElements != null && childElements.size > 0) { + parseChildComponents(childElements, factory) + } + + return factory.getBeanDefinition() + } + + private fun parseComponent(element: Element): BeanDefinition { + val component = BeanDefinitionBuilder.rootBeanDefinition(Component::class.java) + component.addPropertyValue("name", element.getAttribute("name")) + return component.beanDefinition + } + + private fun parseChildComponents(childElements: List, factory: BeanDefinitionBuilder) { + val children = ManagedList(childElements.size) + for (element in childElements) { + children.add(parseComponentElement(element)) + } + factory.addPropertyValue("children", children) + } +} +``` + +Finally, the various artifacts need to be registered with the Spring XML infrastructure, +by modifying the `META-INF/spring.handlers` and `META-INF/spring.schemas` files, as follows: + +``` +# in 'META-INF/spring.handlers' +http\://www.foo.example/schema/component=com.foo.ComponentNamespaceHandler +``` + +``` +# in 'META-INF/spring.schemas' +http\://www.foo.example/schema/component/component.xsd=com/foo/component.xsd +``` + +##### Custom Attributes on “Normal” Elements + +Writing your own custom parser and the associated artifacts is not hard. However, +it is sometimes not the right thing to do. Consider a scenario where you need to +add metadata to already existing bean definitions. In this case, you certainly +do not want to have to write your own entire custom extension. Rather, you merely +want to add an additional attribute to the existing bean definition element. + +By way of another example, suppose that you define a bean definition for a +service object that (unknown to it) accesses a clustered[JCache](https://jcp.org/en/jsr/detail?id=107), and you want to ensure that the +named JCache instance is eagerly started within the surrounding cluster. +The following listing shows such a definition: + +``` + + + +``` + +We can then create another `BeanDefinition` when the`'jcache:cache-name'` attribute is parsed. This `BeanDefinition` then initializes +the named JCache for us. We can also modify the existing `BeanDefinition` for the`'checkingAccountService'` so that it has a dependency on this new +JCache-initializing `BeanDefinition`. The following listing shows our `JCacheInitializer`: + +Java + +``` +package com.foo; + +public class JCacheInitializer { + + private String name; + + public JCacheInitializer(String name) { + this.name = name; + } + + public void initialize() { + // lots of JCache API calls to initialize the named cache... + } +} +``` + +Kotlin + +``` +package com.foo + +class JCacheInitializer(private val name: String) { + + fun initialize() { + // lots of JCache API calls to initialize the named cache... + } +} +``` + +Now we can move onto the custom extension. First, we need to author +the XSD schema that describes the custom attribute, as follows: + +``` + + + + + + + +``` + +Next, we need to create the associated `NamespaceHandler`, as follows: + +Java + +``` +package com.foo; + +import org.springframework.beans.factory.xml.NamespaceHandlerSupport; + +public class JCacheNamespaceHandler extends NamespaceHandlerSupport { + + public void init() { + super.registerBeanDefinitionDecoratorForAttribute("cache-name", + new JCacheInitializingBeanDefinitionDecorator()); + } + +} +``` + +Kotlin + +``` +package com.foo + +import org.springframework.beans.factory.xml.NamespaceHandlerSupport + +class JCacheNamespaceHandler : NamespaceHandlerSupport() { + + override fun init() { + super.registerBeanDefinitionDecoratorForAttribute("cache-name", + JCacheInitializingBeanDefinitionDecorator()) + } + +} +``` + +Next, we need to create the parser. Note that, in this case, because we are going to parse +an XML attribute, we write a `BeanDefinitionDecorator` rather than a `BeanDefinitionParser`. +The following listing shows our `BeanDefinitionDecorator` implementation: + +Java + +``` +package com.foo; + +import org.springframework.beans.factory.config.BeanDefinitionHolder; +import org.springframework.beans.factory.support.AbstractBeanDefinition; +import org.springframework.beans.factory.support.BeanDefinitionBuilder; +import org.springframework.beans.factory.xml.BeanDefinitionDecorator; +import org.springframework.beans.factory.xml.ParserContext; +import org.w3c.dom.Attr; +import org.w3c.dom.Node; + +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; + +public class JCacheInitializingBeanDefinitionDecorator implements BeanDefinitionDecorator { + + private static final String[] EMPTY_STRING_ARRAY = new String[0]; + + public BeanDefinitionHolder decorate(Node source, BeanDefinitionHolder holder, + ParserContext ctx) { + String initializerBeanName = registerJCacheInitializer(source, ctx); + createDependencyOnJCacheInitializer(holder, initializerBeanName); + return holder; + } + + private void createDependencyOnJCacheInitializer(BeanDefinitionHolder holder, + String initializerBeanName) { + AbstractBeanDefinition definition = ((AbstractBeanDefinition) holder.getBeanDefinition()); + String[] dependsOn = definition.getDependsOn(); + if (dependsOn == null) { + dependsOn = new String[]{initializerBeanName}; + } else { + List dependencies = new ArrayList(Arrays.asList(dependsOn)); + dependencies.add(initializerBeanName); + dependsOn = (String[]) dependencies.toArray(EMPTY_STRING_ARRAY); + } + definition.setDependsOn(dependsOn); + } + + private String registerJCacheInitializer(Node source, ParserContext ctx) { + String cacheName = ((Attr) source).getValue(); + String beanName = cacheName + "-initializer"; + if (!ctx.getRegistry().containsBeanDefinition(beanName)) { + BeanDefinitionBuilder initializer = BeanDefinitionBuilder.rootBeanDefinition(JCacheInitializer.class); + initializer.addConstructorArg(cacheName); + ctx.getRegistry().registerBeanDefinition(beanName, initializer.getBeanDefinition()); + } + return beanName; + } +} +``` + +Kotlin + +``` +package com.foo + +import org.springframework.beans.factory.config.BeanDefinitionHolder +import org.springframework.beans.factory.support.AbstractBeanDefinition +import org.springframework.beans.factory.support.BeanDefinitionBuilder +import org.springframework.beans.factory.xml.BeanDefinitionDecorator +import org.springframework.beans.factory.xml.ParserContext +import org.w3c.dom.Attr +import org.w3c.dom.Node + +import java.util.ArrayList + +class JCacheInitializingBeanDefinitionDecorator : BeanDefinitionDecorator { + + override fun decorate(source: Node, holder: BeanDefinitionHolder, + ctx: ParserContext): BeanDefinitionHolder { + val initializerBeanName = registerJCacheInitializer(source, ctx) + createDependencyOnJCacheInitializer(holder, initializerBeanName) + return holder + } + + private fun createDependencyOnJCacheInitializer(holder: BeanDefinitionHolder, + initializerBeanName: String) { + val definition = holder.beanDefinition as AbstractBeanDefinition + var dependsOn = definition.dependsOn + dependsOn = if (dependsOn == null) { + arrayOf(initializerBeanName) + } else { + val dependencies = ArrayList(listOf(*dependsOn)) + dependencies.add(initializerBeanName) + dependencies.toTypedArray() + } + definition.setDependsOn(*dependsOn) + } + + private fun registerJCacheInitializer(source: Node, ctx: ParserContext): String { + val cacheName = (source as Attr).value + val beanName = "$cacheName-initializer" + if (!ctx.registry.containsBeanDefinition(beanName)) { + val initializer = BeanDefinitionBuilder.rootBeanDefinition(JCacheInitializer::class.java) + initializer.addConstructorArg(cacheName) + ctx.registry.registerBeanDefinition(beanName, initializer.getBeanDefinition()) + } + return beanName + } +} +``` + +Finally, we need to register the various artifacts with the Spring XML infrastructure +by modifying the `META-INF/spring.handlers` and `META-INF/spring.schemas` files, as follows: + +``` +# in 'META-INF/spring.handlers' +http\://www.foo.example/schema/jcache=com.foo.JCacheNamespaceHandler +``` + +``` +# in 'META-INF/spring.schemas' +http\://www.foo.example/schema/jcache/jcache.xsd=com/foo/jcache.xsd +``` + +### 10.3. Application Startup Steps + +This part of the appendix lists the existing `StartupSteps` that the core container is instrumented with. + +| |The name and detailed information about each startup step is not part of the public contract and
is subject to change; this is considered as an implementation detail of the core container and will follow
its behavior changes.| +|---|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +| Name | Description | Tags | +|----------------------------------------------|----------------------------------------------------------------------------------------|-------------------------------------------------------------------------------------| +| `spring.beans.instantiate` | Instantiation of a bean and its dependencies. |`beanName` the name of the bean, `beanType` the type required at the injection point.| +| `spring.beans.smart-initialize` | Initialization of `SmartInitializingSingleton` beans. | `beanName` the name of the bean. | +|`spring.context.annotated-bean-reader.create` | Creation of the `AnnotatedBeanDefinitionReader`. | | +| `spring.context.base-packages.scan` | Scanning of base packages. | `packages` array of base packages for scanning. | +| `spring.context.beans.post-process` | Beans post-processing phase. | | +| `spring.context.bean-factory.post-process` | Invocation of the `BeanFactoryPostProcessor` beans. | `postProcessor` the current post-processor. | +|`spring.context.beandef-registry.post-process`| Invocation of the `BeanDefinitionRegistryPostProcessor` beans. | `postProcessor` the current post-processor. | +| `spring.context.component-classes.register` |Registration of component classes through `AnnotationConfigApplicationContext#register`.| `classes` array of given classes for registration. | +| `spring.context.config-classes.enhance` | Enhancement of configuration classes with CGLIB proxies. | `classCount` count of enhanced classes. | +| `spring.context.config-classes.parse` | Configuration classes parsing phase with the `ConfigurationClassPostProcessor`. | `classCount` count of processed classes. | +| `spring.context.refresh` | Application context refresh phase. | | diff --git a/docs/en/spring-framework/data-access.md b/docs/en/spring-framework/data-access.md new file mode 100644 index 0000000000000000000000000000000000000000..9b3b5f92b2aac57796fceaa8ee19efbd82032ec1 --- /dev/null +++ b/docs/en/spring-framework/data-access.md @@ -0,0 +1,8180 @@ +# Data Access + +This part of the reference documentation is concerned with data access and the +interaction between the data access layer and the business or service layer. + +Spring’s comprehensive transaction management support is covered in some detail, +followed by thorough coverage of the various data access frameworks and technologies +with which the Spring Framework integrates. + +## 1. Transaction Management + +Comprehensive transaction support is among the most compelling reasons to use the Spring +Framework. The Spring Framework provides a consistent abstraction for transaction +management that delivers the following benefits: + +* A consistent programming model across different transaction APIs, such as Java + Transaction API (JTA), JDBC, Hibernate, and the Java Persistence API (JPA). + +* Support for [declarative transaction management](#transaction-declarative). + +* A simpler API for [programmatic](#transaction-programmatic) transaction management + than complex transaction APIs, such as JTA. + +* Excellent integration with Spring’s data access abstractions. + +The following sections describe the Spring Framework’s transaction features and +technologies: + +* [Advantages of the Spring Framework’s transaction support + model](#transaction-motivation) describes why you would use the Spring Framework’s transaction abstraction + instead of EJB Container-Managed Transactions (CMT) or choosing to drive local + transactions through a proprietary API, such as Hibernate. + +* [Understanding the Spring Framework transaction abstraction](#transaction-strategies)outlines the core classes and describes how to configure and obtain `DataSource`instances from a variety of sources. + +* [Synchronizing resources with transactions](#tx-resource-synchronization) describes + how the application code ensures that resources are created, reused, and cleaned up + properly. + +* [Declarative transaction management](#transaction-declarative) describes support for + declarative transaction management. + +* [Programmatic transaction management](#transaction-programmatic) covers support for + programmatic (that is, explicitly coded) transaction management. + +* [Transaction bound event](#transaction-event) describes how you could use application + events within a transaction. + +The chapter also includes discussions of best practices,[application server integration](#transaction-application-server-integration), +and [solutions to common problems](#transaction-solutions-to-common-problems). + +### 1.1. Advantages of the Spring Framework’s Transaction Support Model + +Traditionally, Java EE developers have had two choices for transaction management: +global or local transactions, both of which have profound limitations. Global +and local transaction management is reviewed in the next two sections, followed by a +discussion of how the Spring Framework’s transaction management support addresses the +limitations of the global and local transaction models. + +#### 1.1.1. Global Transactions + +Global transactions let you work with multiple transactional resources, typically +relational databases and message queues. The application server manages global +transactions through the JTA, which is a cumbersome API (partly due to its +exception model). Furthermore, a JTA `UserTransaction` normally needs to be sourced from +JNDI, meaning that you also need to use JNDI in order to use JTA. The use +of global transactions limits any potential reuse of application code, as JTA is +normally only available in an application server environment. + +Previously, the preferred way to use global transactions was through EJB CMT +(Container Managed Transaction). CMT is a form of declarative transaction +management (as distinguished from programmatic transaction management). EJB CMT +removes the need for transaction-related JNDI lookups, although the use of EJB +itself necessitates the use of JNDI. It removes most but not all of the need to write +Java code to control transactions. The significant downside is that CMT is tied to JTA +and an application server environment. Also, it is only available if one chooses to +implement business logic in EJBs (or at least behind a transactional EJB facade). The +negatives of EJB in general are so great that this is not an attractive proposition, +especially in the face of compelling alternatives for declarative transaction management. + +#### 1.1.2. Local Transactions + +Local transactions are resource-specific, such as a transaction associated with a JDBC +connection. Local transactions may be easier to use but have a significant disadvantage: +They cannot work across multiple transactional resources. For example, code that manages +transactions by using a JDBC connection cannot run within a global JTA transaction. Because +the application server is not involved in transaction management, it cannot help ensure +correctness across multiple resources. (It is worth noting that most applications use a +single transaction resource.) Another downside is that local transactions are invasive +to the programming model. + +#### 1.1.3. Spring Framework’s Consistent Programming Model + +Spring resolves the disadvantages of global and local transactions. It lets +application developers use a consistent programming model in any environment. +You write your code once, and it can benefit from different transaction management +strategies in different environments. The Spring Framework provides both declarative and +programmatic transaction management. Most users prefer declarative transaction +management, which we recommend in most cases. + +With programmatic transaction management, developers work with the Spring Framework +transaction abstraction, which can run over any underlying transaction infrastructure. +With the preferred declarative model, developers typically write little or no code +related to transaction management and, hence, do not depend on the Spring Framework +transaction API or any other transaction API. + +Do you need an application server for transaction management? + +The Spring Framework’s transaction management support changes traditional rules as to +when an enterprise Java application requires an application server. + +In particular, you do not need an application server purely for declarative transactions +through EJBs. In fact, even if your application server has powerful JTA capabilities, +you may decide that the Spring Framework’s declarative transactions offer more power and +a more productive programming model than EJB CMT. + +Typically, you need an application server’s JTA capability only if your application needs +to handle transactions across multiple resources, which is not a requirement for many +applications. Many high-end applications use a single, highly scalable database (such as +Oracle RAC) instead. Stand-alone transaction managers (such as[Atomikos Transactions](https://www.atomikos.com/) and [JOTM](http://jotm.objectweb.org/)) +are other options. Of course, you may need other application server capabilities, such as +Java Message Service (JMS) and Java EE Connector Architecture (JCA). + +The Spring Framework gives you the choice of when to scale your application to a fully +loaded application server. Gone are the days when the only alternative to using EJB +CMT or JTA was to write code with local transactions (such as those on JDBC connections) +and face a hefty rework if you need that code to run within global, container-managed +transactions. With the Spring Framework, only some of the bean definitions in your +configuration file need to change (rather than your code). + +### 1.2. Understanding the Spring Framework Transaction Abstraction + +The key to the Spring transaction abstraction is the notion of a transaction strategy. A +transaction strategy is defined by a `TransactionManager`, specifically the`org.springframework.transaction.PlatformTransactionManager` interface for imperative +transaction management and the`org.springframework.transaction.ReactiveTransactionManager` interface for reactive +transaction management. The following listing shows the definition of the`PlatformTransactionManager` API: + +``` +public interface PlatformTransactionManager extends TransactionManager { + + TransactionStatus getTransaction(TransactionDefinition definition) throws TransactionException; + + void commit(TransactionStatus status) throws TransactionException; + + void rollback(TransactionStatus status) throws TransactionException; +} +``` + +This is primarily a service provider interface (SPI), although you can use it[programmatically](#transaction-programmatic-ptm) from your application code. Because`PlatformTransactionManager` is an interface, it can be easily mocked or stubbed as +necessary. It is not tied to a lookup strategy, such as JNDI.`PlatformTransactionManager` implementations are defined like any other object (or bean) +in the Spring Framework IoC container. This benefit alone makes Spring Framework +transactions a worthwhile abstraction, even when you work with JTA. You can test +transactional code much more easily than if it used JTA directly. + +Again, in keeping with Spring’s philosophy, the `TransactionException` that can be thrown +by any of the `PlatformTransactionManager` interface’s methods is unchecked (that +is, it extends the `java.lang.RuntimeException` class). Transaction infrastructure +failures are almost invariably fatal. In rare cases where application code can actually +recover from a transaction failure, the application developer can still choose to catch +and handle `TransactionException`. The salient point is that developers are not*forced* to do so. + +The `getTransaction(..)` method returns a `TransactionStatus` object, depending on a`TransactionDefinition` parameter. The returned `TransactionStatus` might represent a +new transaction or can represent an existing transaction, if a matching transaction +exists in the current call stack. The implication in this latter case is that, as with +Java EE transaction contexts, a `TransactionStatus` is associated with a thread of +execution. + +As of Spring Framework 5.2, Spring also provides a transaction management abstraction for +reactive applications that make use of reactive types or Kotlin Coroutines. The following +listing shows the transaction strategy defined by`org.springframework.transaction.ReactiveTransactionManager`: + +``` +public interface ReactiveTransactionManager extends TransactionManager { + + Mono getReactiveTransaction(TransactionDefinition definition) throws TransactionException; + + Mono commit(ReactiveTransaction status) throws TransactionException; + + Mono rollback(ReactiveTransaction status) throws TransactionException; +} +``` + +The reactive transaction manager is primarily a service provider interface (SPI), +although you can use it [programmatically](#transaction-programmatic-rtm) from your +application code. Because `ReactiveTransactionManager` is an interface, it can be easily +mocked or stubbed as necessary. + +The `TransactionDefinition` interface specifies: + +* Propagation: Typically, all code within a transaction scope runs in + that transaction. However, you can specify the behavior if + a transactional method is run when a transaction context already exists. For + example, code can continue running in the existing transaction (the common case), or + the existing transaction can be suspended and a new transaction created. Spring + offers all of the transaction propagation options familiar from EJB CMT. To read + about the semantics of transaction propagation in Spring, see [Transaction Propagation](#tx-propagation). + +* Isolation: The degree to which this transaction is isolated from the work of other + transactions. For example, can this transaction see uncommitted writes from other + transactions? + +* Timeout: How long this transaction runs before timing out and being automatically rolled back + by the underlying transaction infrastructure. + +* Read-only status: You can use a read-only transaction when your code reads but + does not modify data. Read-only transactions can be a useful optimization in some + cases, such as when you use Hibernate. + +These settings reflect standard transactional concepts. If necessary, refer to resources +that discuss transaction isolation levels and other core transaction concepts. +Understanding these concepts is essential to using the Spring Framework or any +transaction management solution. + +The `TransactionStatus` interface provides a simple way for transactional code to +control transaction execution and query transaction status. The concepts should be +familiar, as they are common to all transaction APIs. The following listing shows the`TransactionStatus` interface: + +``` +public interface TransactionStatus extends TransactionExecution, SavepointManager, Flushable { + + @Override + boolean isNewTransaction(); + + boolean hasSavepoint(); + + @Override + void setRollbackOnly(); + + @Override + boolean isRollbackOnly(); + + void flush(); + + @Override + boolean isCompleted(); +} +``` + +Regardless of whether you opt for declarative or programmatic transaction management in +Spring, defining the correct `TransactionManager` implementation is absolutely essential. +You typically define this implementation through dependency injection. + +`TransactionManager` implementations normally require knowledge of the environment in +which they work: JDBC, JTA, Hibernate, and so on. The following examples show how you can +define a local `PlatformTransactionManager` implementation (in this case, with plain +JDBC.) + +You can define a JDBC `DataSource` by creating a bean similar to the following: + +``` + + + + + + +``` + +The related `PlatformTransactionManager` bean definition then has a reference to the`DataSource` definition. It should resemble the following example: + +``` + + + +``` + +If you use JTA in a Java EE container, then you use a container `DataSource`, obtained +through JNDI, in conjunction with Spring’s `JtaTransactionManager`. The following example +shows what the JTA and JNDI lookup version would look like: + +``` + + + + + + + + + + +``` + +The `JtaTransactionManager` does not need to know about the `DataSource` (or any other +specific resources) because it uses the container’s global transaction management +infrastructure. + +| |The preceding definition of the `dataSource` bean uses the `` tag
from the `jee` namespace. For more information see[The JEE Schema](integration.html#xsd-schemas-jee).| +|---|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +| |If you use JTA, your transaction manager definition should look the same, regardless
of what data access technology you use, be it JDBC, Hibernate JPA, or any other supported
technology. This is due to the fact that JTA transactions are global transactions, which
can enlist any transactional resource.| +|---|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +In all Spring transaction setups, application code does not need to change. You can change +how transactions are managed merely by changing configuration, even if that change means +moving from local to global transactions or vice versa. + +#### 1.2.1. Hibernate Transaction Setup + +You can also easily use Hibernate local transactions, as shown in the following examples. +In this case, you need to define a Hibernate `LocalSessionFactoryBean`, which your +application code can use to obtain Hibernate `Session` instances. + +The `DataSource` bean definition is similar to the local JDBC example shown previously +and, thus, is not shown in the following example. + +| |If the `DataSource` (used by any non-JTA transaction manager) is looked up through
JNDI and managed by a Java EE container, it should be non-transactional, because the
Spring Framework (rather than the Java EE container) manages the transactions.| +|---|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +The `txManager` bean in this case is of the `HibernateTransactionManager` type. In the +same way as the `DataSourceTransactionManager` needs a reference to the `DataSource`, the`HibernateTransactionManager` needs a reference to the `SessionFactory`. The following +example declares `sessionFactory` and `txManager` beans: + +``` + + + + + org/springframework/samples/petclinic/hibernate/petclinic.hbm.xml + + + + + hibernate.dialect=${hibernate.dialect} + + + + + + + +``` + +If you use Hibernate and Java EE container-managed JTA transactions, you should use the +same `JtaTransactionManager` as in the previous JTA example for JDBC, as the following +example shows. Also, it is recommended to make Hibernate aware of JTA through its +transaction coordinator and possibly also its connection release mode configuration: + +``` + + + + + org/springframework/samples/petclinic/hibernate/petclinic.hbm.xml + + + + + hibernate.dialect=${hibernate.dialect} + hibernate.transaction.coordinator_class=jta + hibernate.connection.handling_mode=DELAYED_ACQUISITION_AND_RELEASE_AFTER_STATEMENT + + + + + +``` + +Or alternatively, you may pass the `JtaTransactionManager` into your `LocalSessionFactoryBean`for enforcing the same defaults: + +``` + + + + + org/springframework/samples/petclinic/hibernate/petclinic.hbm.xml + + + + + hibernate.dialect=${hibernate.dialect} + + + + + + +``` + +### 1.3. Synchronizing Resources with Transactions + +How to create different transaction managers and how they are linked to related resources +that need to be synchronized to transactions (for example `DataSourceTransactionManager`to a JDBC `DataSource`, `HibernateTransactionManager` to a Hibernate `SessionFactory`, +and so forth) should now be clear. This section describes how the application code +(directly or indirectly, by using a persistence API such as JDBC, Hibernate, or JPA) +ensures that these resources are created, reused, and cleaned up properly. The section +also discusses how transaction synchronization is (optionally) triggered through the +relevant `TransactionManager`. + +#### 1.3.1. High-level Synchronization Approach + +The preferred approach is to use Spring’s highest-level template-based persistence +integration APIs or to use native ORM APIs with transaction-aware factory beans or +proxies for managing the native resource factories. These transaction-aware solutions +internally handle resource creation and reuse, cleanup, optional transaction +synchronization of the resources, and exception mapping. Thus, user data access code does +not have to address these tasks but can focus purely on non-boilerplate +persistence logic. Generally, you use the native ORM API or take a template approach +for JDBC access by using the `JdbcTemplate`. These solutions are detailed in subsequent +sections of this reference documentation. + +#### 1.3.2. Low-level Synchronization Approach + +Classes such as `DataSourceUtils` (for JDBC), `EntityManagerFactoryUtils` (for JPA),`SessionFactoryUtils` (for Hibernate), and so on exist at a lower level. When you want the +application code to deal directly with the resource types of the native persistence APIs, +you use these classes to ensure that proper Spring Framework-managed instances are obtained, +transactions are (optionally) synchronized, and exceptions that occur in the process are +properly mapped to a consistent API. + +For example, in the case of JDBC, instead of the traditional JDBC approach of calling +the `getConnection()` method on the `DataSource`, you can instead use Spring’s`org.springframework.jdbc.datasource.DataSourceUtils` class, as follows: + +``` +Connection conn = DataSourceUtils.getConnection(dataSource); +``` + +If an existing transaction already has a connection synchronized (linked) to it, that +instance is returned. Otherwise, the method call triggers the creation of a new +connection, which is (optionally) synchronized to any existing transaction and made +available for subsequent reuse in that same transaction. As mentioned earlier, any`SQLException` is wrapped in a Spring Framework `CannotGetJdbcConnectionException`, one +of the Spring Framework’s hierarchy of unchecked `DataAccessException` types. This approach +gives you more information than can be obtained easily from the `SQLException` and +ensures portability across databases and even across different persistence technologies. + +This approach also works without Spring transaction management (transaction +synchronization is optional), so you can use it whether or not you use Spring for +transaction management. + +Of course, once you have used Spring’s JDBC support, JPA support, or Hibernate support, +you generally prefer not to use `DataSourceUtils` or the other helper classes, +because you are much happier working through the Spring abstraction than directly +with the relevant APIs. For example, if you use the Spring `JdbcTemplate` or`jdbc.object` package to simplify your use of JDBC, correct connection retrieval occurs +behind the scenes and you need not write any special code. + +#### 1.3.3. `TransactionAwareDataSourceProxy` + +At the very lowest level exists the `TransactionAwareDataSourceProxy` class. This is a +proxy for a target `DataSource`, which wraps the target `DataSource` to add awareness of +Spring-managed transactions. In this respect, it is similar to a transactional JNDI`DataSource`, as provided by a Java EE server. + +You should almost never need or want to use this class, except when existing +code must be called and passed a standard JDBC `DataSource` interface implementation. In +that case, it is possible that this code is usable but is participating in Spring-managed +transactions. You can write your new code by using the higher-level +abstractions mentioned earlier. + +### 1.4. Declarative Transaction Management + +| |Most Spring Framework users choose declarative transaction management. This option has
the least impact on application code and, hence, is most consistent with the ideals of a
non-invasive lightweight container.| +|---|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +The Spring Framework’s declarative transaction management is made possible with Spring +aspect-oriented programming (AOP). However, as the transactional aspects code comes +with the Spring Framework distribution and may be used in a boilerplate fashion, AOP +concepts do not generally have to be understood to make effective use of this code. + +The Spring Framework’s declarative transaction management is similar to EJB CMT, in that +you can specify transaction behavior (or lack of it) down to the individual method level. +You can make a `setRollbackOnly()` call within a transaction context, if +necessary. The differences between the two types of transaction management are: + +* Unlike EJB CMT, which is tied to JTA, the Spring Framework’s declarative transaction + management works in any environment. It can work with JTA transactions or local + transactions by using JDBC, JPA, or Hibernate by adjusting the configuration + files. + +* You can apply the Spring Framework declarative transaction management to any class, + not merely special classes such as EJBs. + +* The Spring Framework offers declarative[rollback rules](#transaction-declarative-rolling-back), a feature with no EJB + equivalent. Both programmatic and declarative support for rollback rules is provided. + +* The Spring Framework lets you customize transactional behavior by using AOP. + For example, you can insert custom behavior in the case of transaction rollback. You + can also add arbitrary advice, along with transactional advice. With EJB CMT, you + cannot influence the container’s transaction management, except with`setRollbackOnly()`. + +* The Spring Framework does not support propagation of transaction contexts across + remote calls, as high-end application servers do. If you need this feature, we + recommend that you use EJB. However, consider carefully before using such a feature, + because, normally, one does not want transactions to span remote calls. + +The concept of rollback rules is important. They let you specify which exceptions +(and throwables) should cause automatic rollback. You can specify this declaratively, in +configuration, not in Java code. So, although you can still call `setRollbackOnly()` on +the `TransactionStatus` object to roll back the current transaction back, most often you +can specify a rule that `MyApplicationException` must always result in rollback. The +significant advantage to this option is that business objects do not depend on the +transaction infrastructure. For example, they typically do not need to import Spring +transaction APIs or other Spring APIs. + +Although EJB container default behavior automatically rolls back the transaction on a +system exception (usually a runtime exception), EJB CMT does not roll back the +transaction automatically on an application exception (that is, a checked exception +other than `java.rmi.RemoteException`). While the Spring default behavior for +declarative transaction management follows EJB convention (roll back is automatic only +on unchecked exceptions), it is often useful to customize this behavior. + +#### 1.4.1. Understanding the Spring Framework’s Declarative Transaction Implementation + +It is not sufficient merely to tell you to annotate your classes with the`@Transactional` annotation, add `@EnableTransactionManagement` to your configuration, +and expect you to understand how it all works. To provide a deeper understanding, this +section explains the inner workings of the Spring Framework’s declarative transaction +infrastructure in the context of transaction-related issues. + +The most important concepts to grasp with regard to the Spring Framework’s declarative +transaction support are that this support is enabled[via AOP proxies](core.html#aop-understanding-aop-proxies) and that the transactional +advice is driven by metadata (currently XML- or annotation-based). The combination of AOP +with transactional metadata yields an AOP proxy that uses a `TransactionInterceptor` in +conjunction with an appropriate `TransactionManager` implementation to drive transactions +around method invocations. + +| |Spring AOP is covered in [the AOP section](core.html#aop).| +|---|----------------------------------------------------------| + +Spring Framework’s `TransactionInterceptor` provides transaction management for +imperative and reactive programming models. The interceptor detects the desired flavor of +transaction management by inspecting the method return type. Methods returning a reactive +type such as `Publisher` or Kotlin `Flow` (or a subtype of those) qualify for reactive +transaction management. All other return types including `void` use the code path for +imperative transaction management. + +Transaction management flavors impact which transaction manager is required. Imperative +transactions require a `PlatformTransactionManager`, while reactive transactions use`ReactiveTransactionManager` implementations. + +| |`@Transactional` commonly works with thread-bound transactions managed by`PlatformTransactionManager`, exposing a transaction to all data access operations within
the current execution thread. Note: This does *not* propagate to newly started threads
within the method.

A reactive transaction managed by `ReactiveTransactionManager` uses the Reactor context
instead of thread-local attributes. As a consequence, all participating data access
operations need to execute within the same Reactor context in the same reactive pipeline.| +|---|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +The following image shows a conceptual view of calling a method on a transactional proxy: + +![tx](images/tx.png) + +#### 1.4.2. Example of Declarative Transaction Implementation + +Consider the following interface and its attendant implementation. This example uses`Foo` and `Bar` classes as placeholders so that you can concentrate on the transaction +usage without focusing on a particular domain model. For the purposes of this example, +the fact that the `DefaultFooService` class throws `UnsupportedOperationException`instances in the body of each implemented method is good. That behavior lets you see +transactions being created and then rolled back in response to the`UnsupportedOperationException` instance. The following listing shows the `FooService`interface: + +Java + +``` +// the service interface that we want to make transactional + +package x.y.service; + +public interface FooService { + + Foo getFoo(String fooName); + + Foo getFoo(String fooName, String barName); + + void insertFoo(Foo foo); + + void updateFoo(Foo foo); + +} +``` + +Kotlin + +``` +// the service interface that we want to make transactional + +package x.y.service + +interface FooService { + + fun getFoo(fooName: String): Foo + + fun getFoo(fooName: String, barName: String): Foo + + fun insertFoo(foo: Foo) + + fun updateFoo(foo: Foo) +} +``` + +The following example shows an implementation of the preceding interface: + +Java + +``` +package x.y.service; + +public class DefaultFooService implements FooService { + + @Override + public Foo getFoo(String fooName) { + // ... + } + + @Override + public Foo getFoo(String fooName, String barName) { + // ... + } + + @Override + public void insertFoo(Foo foo) { + // ... + } + + @Override + public void updateFoo(Foo foo) { + // ... + } +} +``` + +Kotlin + +``` +package x.y.service + +class DefaultFooService : FooService { + + override fun getFoo(fooName: String): Foo { + // ... + } + + override fun getFoo(fooName: String, barName: String): Foo { + // ... + } + + override fun insertFoo(foo: Foo) { + // ... + } + + override fun updateFoo(foo: Foo) { + // ... + } +} +``` + +Assume that the first two methods of the `FooService` interface, `getFoo(String)` and`getFoo(String, String)`, must run in the context of a transaction with read-only +semantics and that the other methods, `insertFoo(Foo)` and `updateFoo(Foo)`, must +run in the context of a transaction with read-write semantics. The following +configuration is explained in detail in the next few paragraphs: + +``` + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +``` + +Examine the preceding configuration. It assumes that you want to make a service object, +the `fooService` bean, transactional. The transaction semantics to apply are encapsulated +in the `` definition. The `` definition reads as "all methods +starting with `get` are to run in the context of a read-only transaction, and all +other methods are to run with the default transaction semantics". The`transaction-manager` attribute of the `` tag is set to the name of the`TransactionManager` bean that is going to drive the transactions (in this case, the`txManager` bean). + +| |You can omit the `transaction-manager` attribute in the transactional advice
(``) if the bean name of the `TransactionManager` that you want to
wire in has the name `transactionManager`. If the `TransactionManager` bean that
you want to wire in has any other name, you must use the `transaction-manager`attribute explicitly, as in the preceding example.| +|---|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +The `` definition ensures that the transactional advice defined by the`txAdvice` bean runs at the appropriate points in the program. First, you define a +pointcut that matches the execution of any operation defined in the `FooService` interface +(`fooServiceOperation`). Then you associate the pointcut with the `txAdvice` by using an +advisor. The result indicates that, at the execution of a `fooServiceOperation`, +the advice defined by `txAdvice` is run. + +The expression defined within the `` element is an AspectJ pointcut +expression. See [the AOP section](core.html#aop) for more details on pointcut +expressions in Spring. + +A common requirement is to make an entire service layer transactional. The best way to +do this is to change the pointcut expression to match any operation in your +service layer. The following example shows how to do so: + +``` + + + + +``` + +| |In the preceding example, it is assumed that all your service interfaces are defined
in the `x.y.service` package. See [the AOP section](core.html#aop) for more details.| +|---|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +Now that we have analyzed the configuration, you may be asking yourself, +"What does all this configuration actually do?" + +The configuration shown earlier is used to create a transactional proxy around the object +that is created from the `fooService` bean definition. The proxy is configured with +the transactional advice so that, when an appropriate method is invoked on the proxy, +a transaction is started, suspended, marked as read-only, and so on, depending on the +transaction configuration associated with that method. Consider the following program +that test drives the configuration shown earlier: + +Java + +``` +public final class Boot { + + public static void main(final String[] args) throws Exception { + ApplicationContext ctx = new ClassPathXmlApplicationContext("context.xml"); + FooService fooService = ctx.getBean(FooService.class); + fooService.insertFoo(new Foo()); + } +} +``` + +Kotlin + +``` +import org.springframework.beans.factory.getBean + +fun main() { + val ctx = ClassPathXmlApplicationContext("context.xml") + val fooService = ctx.getBean("fooService") + fooService.insertFoo(Foo()) +} +``` + +The output from running the preceding program should resemble the following (the Log4J +output and the stack trace from the `UnsupportedOperationException` thrown by the`insertFoo(..)` method of the `DefaultFooService` class have been truncated for clarity): + +``` + +[AspectJInvocationContextExposingAdvisorAutoProxyCreator] - Creating implicit proxy for bean 'fooService' with 0 common interceptors and 1 specific interceptors + + +[JdkDynamicAopProxy] - Creating JDK dynamic proxy for [x.y.service.DefaultFooService] + + +[TransactionInterceptor] - Getting transaction for x.y.service.FooService.insertFoo + + +[DataSourceTransactionManager] - Creating new transaction with name [x.y.service.FooService.insertFoo] +[DataSourceTransactionManager] - Acquired Connection [[email protected]] for JDBC transaction + + +[RuleBasedTransactionAttribute] - Applying rules to determine whether transaction should rollback on java.lang.UnsupportedOperationException +[TransactionInterceptor] - Invoking rollback for transaction on x.y.service.FooService.insertFoo due to throwable [java.lang.UnsupportedOperationException] + + +[DataSourceTransactionManager] - Rolling back JDBC transaction on Connection [[email protected]] +[DataSourceTransactionManager] - Releasing JDBC Connection after transaction +[DataSourceUtils] - Returning JDBC Connection to DataSource + +Exception in thread "main" java.lang.UnsupportedOperationException at x.y.service.DefaultFooService.insertFoo(DefaultFooService.java:14) + +at $Proxy0.insertFoo(Unknown Source) +at Boot.main(Boot.java:11) +``` + +To use reactive transaction management the code has to use reactive types. + +| |Spring Framework uses the `ReactiveAdapterRegistry` to determine whether a method
return type is reactive.| +|---|--------------------------------------------------------------------------------------------------------------| + +The following listing shows a modified version of the previously used `FooService`, but +this time the code uses reactive types: + +Java + +``` +// the reactive service interface that we want to make transactional + +package x.y.service; + +public interface FooService { + + Flux getFoo(String fooName); + + Publisher getFoo(String fooName, String barName); + + Mono insertFoo(Foo foo); + + Mono updateFoo(Foo foo); + +} +``` + +Kotlin + +``` +// the reactive service interface that we want to make transactional + +package x.y.service + +interface FooService { + + fun getFoo(fooName: String): Flow + + fun getFoo(fooName: String, barName: String): Publisher + + fun insertFoo(foo: Foo) : Mono + + fun updateFoo(foo: Foo) : Mono +} +``` + +The following example shows an implementation of the preceding interface: + +Java + +``` +package x.y.service; + +public class DefaultFooService implements FooService { + + @Override + public Flux getFoo(String fooName) { + // ... + } + + @Override + public Publisher getFoo(String fooName, String barName) { + // ... + } + + @Override + public Mono insertFoo(Foo foo) { + // ... + } + + @Override + public Mono updateFoo(Foo foo) { + // ... + } +} +``` + +Kotlin + +``` +package x.y.service + +class DefaultFooService : FooService { + + override fun getFoo(fooName: String): Flow { + // ... + } + + override fun getFoo(fooName: String, barName: String): Publisher { + // ... + } + + override fun insertFoo(foo: Foo): Mono { + // ... + } + + override fun updateFoo(foo: Foo): Mono { + // ... + } +} +``` + +Imperative and reactive transaction management share the same semantics for transaction +boundary and transaction attribute definitions. The main difference between imperative +and reactive transactions is the deferred nature of the latter. `TransactionInterceptor`decorates the returned reactive type with a transactional operator to begin and clean up +the transaction. Therefore, calling a transactional reactive method defers the actual +transaction management to a subscription type that activates processing of the reactive +type. + +Another aspect of reactive transaction management relates to data escaping which is a +natural consequence of the programming model. + +Method return values of imperative transactions are returned from transactional methods +upon successful termination of a method so that partially computed results do not escape +the method closure. + +Reactive transaction methods return a reactive wrapper type which represents a +computation sequence along with a promise to begin and complete the computation. + +A `Publisher` can emit data while a transaction is ongoing but not necessarily completed. +Therefore, methods that depend upon successful completion of an entire transaction need +to ensure completion and buffer results in the calling code. + +#### 1.4.3. Rolling Back a Declarative Transaction + +The previous section outlined the basics of how to specify transactional settings for +classes, typically service layer classes, declaratively in your application. This +section describes how you can control the rollback of transactions in a simple, +declarative fashion. + +The recommended way to indicate to the Spring Framework’s transaction infrastructure +that a transaction’s work is to be rolled back is to throw an `Exception` from code that +is currently executing in the context of a transaction. The Spring Framework’s +transaction infrastructure code catches any unhandled `Exception` as it bubbles up +the call stack and makes a determination whether to mark the transaction for rollback. + +In its default configuration, the Spring Framework’s transaction infrastructure code +marks a transaction for rollback only in the case of runtime, unchecked exceptions. +That is, when the thrown exception is an instance or subclass of `RuntimeException`. (`Error` instances also, by default, result in a rollback). Checked exceptions that are +thrown from a transactional method do not result in rollback in the default +configuration. + +You can configure exactly which `Exception` types mark a transaction for rollback, +including checked exceptions. The following XML snippet demonstrates how you configure +rollback for a checked, application-specific `Exception` type: + +``` + + + + + + +``` + +If you do not want a transaction rolled +back when an exception is thrown, you can also specify 'no rollback rules'. The following example tells the Spring Framework’s +transaction infrastructure to commit the attendant transaction even in the face of an +unhandled `InstrumentNotFoundException`: + +``` + + + + + + +``` + +When the Spring Framework’s transaction infrastructure catches an exception and it +consults the configured rollback rules to determine whether to mark the transaction for +rollback, the strongest matching rule wins. So, in the case of the following +configuration, any exception other than an `InstrumentNotFoundException` results in a +rollback of the attendant transaction: + +``` + + + + + +``` + +You can also indicate a required rollback programmatically. Although simple, +this process is quite invasive and tightly couples your code to the Spring Framework’s +transaction infrastructure. The following example shows how to programmatically indicate +a required rollback: + +Java + +``` +public void resolvePosition() { + try { + // some business logic... + } catch (NoProductInStockException ex) { + // trigger rollback programmatically + TransactionAspectSupport.currentTransactionStatus().setRollbackOnly(); + } +} +``` + +Kotlin + +``` +fun resolvePosition() { + try { + // some business logic... + } catch (ex: NoProductInStockException) { + // trigger rollback programmatically + TransactionAspectSupport.currentTransactionStatus().setRollbackOnly(); + } +} +``` + +You are strongly encouraged to use the declarative approach to rollback, if at all +possible. Programmatic rollback is available should you absolutely need it, but its +usage flies in the face of achieving a clean POJO-based architecture. + +#### 1.4.4. Configuring Different Transactional Semantics for Different Beans #### + +Consider the scenario where you have a number of service layer objects, and you want to +apply a totally different transactional configuration to each of them. You can do so +by defining distinct `` elements with differing `pointcut` and`advice-ref` attribute values. + +As a point of comparison, first assume that all of your service layer classes are +defined in a root `x.y.service` package. To make all beans that are instances of classes +defined in that package (or in subpackages) and that have names ending in `Service` have +the default transactional configuration, you could write the following: + +``` + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +``` + +The following example shows how to configure two distinct beans with totally different +transactional settings: + +``` + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +``` + +#### 1.4.5. \ Settings + +This section summarizes the various transactional settings that you can specify by using +the `` tag. The default `` settings are: + +* The [propagation setting](#tx-propagation) is `REQUIRED.` + +* The isolation level is `DEFAULT.` + +* The transaction is read-write. + +* The transaction timeout defaults to the default timeout of the underlying transaction + system or none if timeouts are not supported. + +* Any `RuntimeException` triggers rollback, and any checked `Exception` does not. + +You can change these default settings. The following table summarizes the various attributes of the `` tags +that are nested within `` and `` tags: + +| Attribute |Required?| Default | Description | +|-----------------|---------|----------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| `name` | Yes | |Method names with which the transaction attributes are to be associated. The
wildcard (\*) character can be used to associate the same transaction attribute
settings with a number of methods (for example, `get*`, `handle*`, `on*Event`, and so
forth).| +| `propagation` | No |`REQUIRED`| Transaction propagation behavior. | +| `isolation` | No |`DEFAULT` | Transaction isolation level. Only applicable to propagation settings of `REQUIRED` or `REQUIRES_NEW`. | +| `timeout` | No | \-1 | Transaction timeout (seconds). Only applicable to propagation `REQUIRED` or `REQUIRES_NEW`. | +| `read-only` | No | false | Read-write versus read-only transaction. Applies only to `REQUIRED` or `REQUIRES_NEW`. | +| `rollback-for` | No | | Comma-delimited list of `Exception` instances that trigger rollback. For example,`com.foo.MyBusinessException,ServletException`. | +|`no-rollback-for`| No | | Comma-delimited list of `Exception` instances that do not trigger rollback. For example,`com.foo.MyBusinessException,ServletException`. | + +#### 1.4.6. Using `@Transactional` + +In addition to the XML-based declarative approach to transaction configuration, you can +use an annotation-based approach. Declaring transaction semantics directly in the Java +source code puts the declarations much closer to the affected code. There is not much +danger of undue coupling, because code that is meant to be used transactionally is +almost always deployed that way anyway. + +| |The standard `javax.transaction.Transactional` annotation is also supported as a
drop-in replacement to Spring’s own annotation. Please refer to JTA 1.2 documentation
for more details.| +|---|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +The ease-of-use afforded by the use of the `@Transactional` annotation is best +illustrated with an example, which is explained in the text that follows. +Consider the following class definition: + +Java + +``` +// the service class that we want to make transactional +@Transactional +public class DefaultFooService implements FooService { + + @Override + public Foo getFoo(String fooName) { + // ... + } + + @Override + public Foo getFoo(String fooName, String barName) { + // ... + } + + @Override + public void insertFoo(Foo foo) { + // ... + } + + @Override + public void updateFoo(Foo foo) { + // ... + } +} +``` + +Kotlin + +``` +// the service class that we want to make transactional +@Transactional +class DefaultFooService : FooService { + + override fun getFoo(fooName: String): Foo { + // ... + } + + override fun getFoo(fooName: String, barName: String): Foo { + // ... + } + + override fun insertFoo(foo: Foo) { + // ... + } + + override fun updateFoo(foo: Foo) { + // ... + } +} +``` + +Used at the class level as above, the annotation indicates a default for all methods of +the declaring class (as well as its subclasses). Alternatively, each method can be +annotated individually. See [Method visibility and `@Transactional`](#transaction-declarative-annotations-method-visibility) for +further details on which methods Spring considers transactional. Note that a class-level +annotation does not apply to ancestor classes up the class hierarchy; in such a scenario, +inherited methods need to be locally redeclared in order to participate in a +subclass-level annotation. + +When a POJO class such as the one above is defined as a bean in a Spring context, +you can make the bean instance transactional through an `@EnableTransactionManagement`annotation in a `@Configuration` class. See the[javadoc](https://docs.spring.io/spring-framework/docs/5.3.16/javadoc-api/org/springframework/transaction/annotation/EnableTransactionManagement.html)for full details. + +In XML configuration, the `` tag provides similar convenience: + +``` + + + + + + + + + + (1) + + + + + + + + + +``` + +|**1**|The line that makes the bean instance transactional.| +|-----|----------------------------------------------------| + +| |You can omit the `transaction-manager` attribute in the ``tag if the bean name of the `TransactionManager` that you want to wire in has the name`transactionManager`. If the `TransactionManager` bean that you want to dependency-inject
has any other name, you have to use the `transaction-manager` attribute, as in the
preceding example.| +|---|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +Reactive transactional methods use reactive return types in contrast to imperative +programming arrangements as the following listing shows: + +Java + +``` +// the reactive service class that we want to make transactional +@Transactional +public class DefaultFooService implements FooService { + + @Override + public Publisher getFoo(String fooName) { + // ... + } + + @Override + public Mono getFoo(String fooName, String barName) { + // ... + } + + @Override + public Mono insertFoo(Foo foo) { + // ... + } + + @Override + public Mono updateFoo(Foo foo) { + // ... + } +} +``` + +Kotlin + +``` +// the reactive service class that we want to make transactional +@Transactional +class DefaultFooService : FooService { + + override fun getFoo(fooName: String): Flow { + // ... + } + + override fun getFoo(fooName: String, barName: String): Mono { + // ... + } + + override fun insertFoo(foo: Foo): Mono { + // ... + } + + override fun updateFoo(foo: Foo): Mono { + // ... + } +} +``` + +Note that there are special considerations for the returned `Publisher` with regards to +Reactive Streams cancellation signals. See the [Cancel Signals](#tx-prog-operator-cancel) section under +"Using the TransactionOperator" for more details. + +| |Method visibility and `@Transactional`

When you use transactional proxies with Spring’s standard configuration, you should apply
the `@Transactional` annotation only to methods with `public` visibility. If you do
annotate `protected`, `private`, or package-visible methods with the `@Transactional`annotation, no error is raised, but the annotated method does not exhibit the configured
transactional settings. If you need to annotate non-public methods, consider the tip in
the following paragraph for class-based proxies or consider using AspectJ compile-time or
load-time weaving (described later).

When using `@EnableTransactionManagement` in a `@Configuration` class, `protected` or
package-visible methods can also be made transactional for class-based proxies by
registering a custom `transactionAttributeSource` bean like in the following example.
Note, however, that transactional methods in interface-based proxies must always be`public` and defined in the proxied interface.

```
/**
* Register a custom AnnotationTransactionAttributeSource with the
* publicMethodsOnly flag set to false to enable support for
* protected and package-private @Transactional methods in
* class-based proxies.
*
* @see ProxyTransactionManagementConfiguration#transactionAttributeSource()
*/
@Bean
TransactionAttributeSource transactionAttributeSource() {
return new AnnotationTransactionAttributeSource(false);
}
```

The *Spring TestContext Framework* supports non-private `@Transactional` test methods by
default. See [Transaction Management](testing.html#testcontext-tx) in the testing
chapter for examples.| +|---|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +You can apply the `@Transactional` annotation to an interface definition, a method +on an interface, a class definition, or a method on a class. However, the +mere presence of the `@Transactional` annotation is not enough to activate the +transactional behavior. The `@Transactional` annotation is merely metadata that can +be consumed by some runtime infrastructure that is `@Transactional`-aware and that +can use the metadata to configure the appropriate beans with transactional behavior. +In the preceding example, the `` element switches on the +transactional behavior. + +| |The Spring team recommends that you annotate only concrete classes (and methods of
concrete classes) with the `@Transactional` annotation, as opposed to annotating interfaces.
You certainly can place the `@Transactional` annotation on an interface (or an interface
method), but this works only as you would expect it to if you use interface-based
proxies. The fact that Java annotations are not inherited from interfaces means that,
if you use class-based proxies (`proxy-target-class="true"`) or the weaving-based
aspect (`mode="aspectj"`), the transaction settings are not recognized by the proxying
and weaving infrastructure, and the object is not wrapped in a transactional proxy.| +|---|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +| |In proxy mode (which is the default), only external method calls coming in through
the proxy are intercepted. This means that self-invocation (in effect, a method within
the target object calling another method of the target object) does not lead to an actual
transaction at runtime even if the invoked method is marked with `@Transactional`. Also,
the proxy must be fully initialized to provide the expected behavior, so you should not
rely on this feature in your initialization code — for example, in a `@PostConstruct`method.| +|---|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +Consider using AspectJ mode (see the `mode` attribute in the following table) if you +expect self-invocations to be wrapped with transactions as well. In this case, there is +no proxy in the first place. Instead, the target class is woven (that is, its byte code +is modified) to support `@Transactional` runtime behavior on any kind of method. + +| XML Attribute | Annotation Attribute | Default | Description | +|---------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|---------------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +|`transaction-manager`|N/A (see [`TransactionManagementConfigurer`](https://docs.spring.io/spring-framework/docs/5.3.16/javadoc-api/org/springframework/transaction/annotation/TransactionManagementConfigurer.html) javadoc)| `transactionManager` | Name of the transaction manager to use. Required only if the name of the transaction
manager is not `transactionManager`, as in the preceding example. | +| `mode` | `mode` | `proxy` |The default mode (`proxy`) processes annotated beans to be proxied by using Spring’s AOP
framework (following proxy semantics, as discussed earlier, applying to method calls
coming in through the proxy only). The alternative mode (`aspectj`) instead weaves the
affected classes with Spring’s AspectJ transaction aspect, modifying the target class
byte code to apply to any kind of method call. AspectJ weaving requires`spring-aspects.jar` in the classpath as well as having load-time weaving (or compile-time
weaving) enabled. (See [Spring configuration](core.html#aop-aj-ltw-spring)for details on how to set up load-time weaving.)| +|`proxy-target-class` | `proxyTargetClass` | `false` | Applies to `proxy` mode only. Controls what type of transactional proxies are created
for classes annotated with the `@Transactional` annotation. If the`proxy-target-class` attribute is set to `true`, class-based proxies are created.
If `proxy-target-class` is `false` or if the attribute is omitted, then standard JDK
interface-based proxies are created. (See [Proxying Mechanisms](core.html#aop-proxying)for a detailed examination of the different proxy types.) | +| `order` | `order` |`Ordered.LOWEST_PRECEDENCE`| Defines the order of the transaction advice that is applied to beans annotated with`@Transactional`. (For more information about the rules related to ordering of AOP
advice, see [Advice Ordering](core.html#aop-ataspectj-advice-ordering).)
No specified ordering means that the AOP subsystem determines the order of the advice. | + +| |The default advice mode for processing `@Transactional` annotations is `proxy`,
which allows for interception of calls through the proxy only. Local calls within the
same class cannot get intercepted that way. For a more advanced mode of interception,
consider switching to `aspectj` mode in combination with compile-time or load-time weaving.| +|---|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +| |The `proxy-target-class` attribute controls what type of transactional proxies are
created for classes annotated with the `@Transactional` annotation. If`proxy-target-class` is set to `true`, class-based proxies are created. If`proxy-target-class` is `false` or if the attribute is omitted, standard JDK
interface-based proxies are created. (See [Proxying Mechanisms](core.html#aop-proxying)for a discussion of the different proxy types.)| +|---|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +| |`@EnableTransactionManagement` and `` look for`@Transactional` only on beans in the same application context in which they are defined.
This means that, if you put annotation-driven configuration in a `WebApplicationContext`for a `DispatcherServlet`, it checks for `@Transactional` beans only in your controllers
and not in your services. See [MVC](web.html#mvc-servlet) for more information.| +|---|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +The most derived location takes precedence when evaluating the transactional settings +for a method. In the case of the following example, the `DefaultFooService` class is +annotated at the class level with the settings for a read-only transaction, but the`@Transactional` annotation on the `updateFoo(Foo)` method in the same class takes +precedence over the transactional settings defined at the class level. + +Java + +``` +@Transactional(readOnly = true) +public class DefaultFooService implements FooService { + + public Foo getFoo(String fooName) { + // ... + } + + // these settings have precedence for this method + @Transactional(readOnly = false, propagation = Propagation.REQUIRES_NEW) + public void updateFoo(Foo foo) { + // ... + } +} +``` + +Kotlin + +``` +@Transactional(readOnly = true) +class DefaultFooService : FooService { + + override fun getFoo(fooName: String): Foo { + // ... + } + + // these settings have precedence for this method + @Transactional(readOnly = false, propagation = Propagation.REQUIRES_NEW) + override fun updateFoo(foo: Foo) { + // ... + } +} +``` + +##### `@Transactional` Settings + +The `@Transactional` annotation is metadata that specifies that an interface, class, +or method must have transactional semantics (for example, "start a brand new read-only +transaction when this method is invoked, suspending any existing transaction"). +The default `@Transactional` settings are as follows: + +* The propagation setting is `PROPAGATION_REQUIRED.` + +* The isolation level is `ISOLATION_DEFAULT.` + +* The transaction is read-write. + +* The transaction timeout defaults to the default timeout of the underlying transaction + system, or to none if timeouts are not supported. + +* Any `RuntimeException` triggers rollback, and any checked `Exception` does not. + +You can change these default settings. The following table summarizes the various +properties of the `@Transactional` annotation: + +| Property | Type | Description | +|--------------------------------------------------|-----------------------------------------------------------------------------|------------------------------------------------------------------------------------------------------------------------------| +|[value](#tx-multiple-tx-mgrs-with-attransactional)| `String` | Optional qualifier that specifies the transaction manager to be used. | +| [propagation](#tx-propagation) | `enum`: `Propagation` | Optional propagation setting. | +| `isolation` | `enum`: `Isolation` | Optional isolation level. Applies only to propagation values of `REQUIRED` or `REQUIRES_NEW`. | +| `timeout` | `int` (in seconds of granularity) | Optional transaction timeout. Applies only to propagation values of `REQUIRED` or `REQUIRES_NEW`. | +| `readOnly` | `boolean` | Read-write versus read-only transaction. Only applicable to values of `REQUIRED` or `REQUIRES_NEW`. | +| `rollbackFor` | Array of `Class` objects, which must be derived from `Throwable.` | Optional array of exception classes that must cause rollback. | +| `rollbackForClassName` | Array of class names. The classes must be derived from `Throwable.` | Optional array of names of exception classes that must cause rollback. | +| `noRollbackFor` | Array of `Class` objects, which must be derived from `Throwable.` | Optional array of exception classes that must not cause rollback. | +| `noRollbackForClassName` | Array of `String` class names, which must be derived from `Throwable.` | Optional array of names of exception classes that must not cause rollback. | +| `label` |Array of `String` labels to add an expressive description to the transaction.|Labels may be evaluated by transaction managers to associate
implementation-specific behavior with the actual transaction.| + +Currently, you cannot have explicit control over the name of a transaction, where 'name' +means the transaction name that appears in a transaction monitor, if applicable +(for example, WebLogic’s transaction monitor), and in logging output. For declarative +transactions, the transaction name is always the fully-qualified class name + `.`+ the method name of the transactionally advised class. For example, if the`handlePayment(..)` method of the `BusinessService` class started a transaction, the +name of the transaction would be: `com.example.BusinessService.handlePayment`. + +##### Multiple Transaction Managers with `@Transactional` + +Most Spring applications need only a single transaction manager, but there may be +situations where you want multiple independent transaction managers in a single +application. You can use the `value` or `transactionManager` attribute of the`@Transactional` annotation to optionally specify the identity of the`TransactionManager` to be used. This can either be the bean name or the qualifier value +of the transaction manager bean. For example, using the qualifier notation, you can +combine the following Java code with the following transaction manager bean declarations +in the application context: + +Java + +``` +public class TransactionalService { + + @Transactional("order") + public void setSomething(String name) { ... } + + @Transactional("account") + public void doSomething() { ... } + + @Transactional("reactive-account") + public Mono doSomethingReactive() { ... } +} +``` + +Kotlin + +``` +class TransactionalService { + + @Transactional("order") + fun setSomething(name: String) { + // ... + } + + @Transactional("account") + fun doSomething() { + // ... + } + + @Transactional("reactive-account") + fun doSomethingReactive(): Mono { + // ... + } +} +``` + +The following listing shows the bean declarations: + +``` + + + + ... + + + + + ... + + + + + ... + + +``` + +In this case, the individual methods on `TransactionalService` run under separate +transaction managers, differentiated by the `order`, `account`, and `reactive-account`qualifiers. The default `` target bean name, `transactionManager`, +is still used if no specifically qualified `TransactionManager` bean is found. + +##### Custom Composed Annotations + +If you find you repeatedly use the same attributes with `@Transactional` on many different +methods, [Spring’s meta-annotation support](core.html#beans-meta-annotations) lets you +define custom composed annotations for your specific use cases. For example, consider the +following annotation definitions: + +Java + +``` +@Target({ElementType.METHOD, ElementType.TYPE}) +@Retention(RetentionPolicy.RUNTIME) +@Transactional(transactionManager = "order", label = "causal-consistency") +public @interface OrderTx { +} + +@Target({ElementType.METHOD, ElementType.TYPE}) +@Retention(RetentionPolicy.RUNTIME) +@Transactional(transactionManager = "account", label = "retryable") +public @interface AccountTx { +} +``` + +Kotlin + +``` +@Target(AnnotationTarget.FUNCTION, AnnotationTarget.TYPE) +@Retention(AnnotationRetention.RUNTIME) +@Transactional(transactionManager = "order", label = ["causal-consistency"]) +annotation class OrderTx + +@Target(AnnotationTarget.FUNCTION, AnnotationTarget.TYPE) +@Retention(AnnotationRetention.RUNTIME) +@Transactional(transactionManager = "account", label = ["retryable"]) +annotation class AccountTx +``` + +The preceding annotations let us write the example from the previous section as follows: + +Java + +``` +public class TransactionalService { + + @OrderTx + public void setSomething(String name) { + // ... + } + + @AccountTx + public void doSomething() { + // ... + } +} +``` + +Kotlin + +``` +class TransactionalService { + + @OrderTx + fun setSomething(name: String) { + // ... + } + + @AccountTx + fun doSomething() { + // ... + } +} +``` + +In the preceding example, we used the syntax to define the transaction manager qualifier +and transactional labels, but we could also have included propagation behavior, +rollback rules, timeouts, and other features. + +#### 1.4.7. Transaction Propagation + +This section describes some semantics of transaction propagation in Spring. Note +that this section is not a proper introduction to transaction propagation. Rather, it +details some of the semantics regarding transaction propagation in Spring. + +In Spring-managed transactions, be aware of the difference between physical and +logical transactions, and how the propagation setting applies to this difference. + +##### Understanding `PROPAGATION_REQUIRED` + +![tx prop required](images/tx_prop_required.png) + +`PROPAGATION_REQUIRED` enforces a physical transaction, either locally for the current +scope if no transaction exists yet or participating in an existing 'outer' transaction +defined for a larger scope. This is a fine default in common call stack arrangements +within the same thread (for example, a service facade that delegates to several repository methods +where all the underlying resources have to participate in the service-level transaction). + +| |By default, a participating transaction joins the characteristics of the outer scope,
silently ignoring the local isolation level, timeout value, or read-only flag (if any).
Consider switching the `validateExistingTransactions` flag to `true` on your transaction
manager if you want isolation level declarations to be rejected when participating in
an existing transaction with a different isolation level. This non-lenient mode also
rejects read-only mismatches (that is, an inner read-write transaction that tries to participate
in a read-only outer scope).| +|---|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +When the propagation setting is `PROPAGATION_REQUIRED`, a logical transaction scope +is created for each method upon which the setting is applied. Each such logical +transaction scope can determine rollback-only status individually, with an outer +transaction scope being logically independent from the inner transaction scope. +In the case of standard `PROPAGATION_REQUIRED` behavior, all these scopes are +mapped to the same physical transaction. So a rollback-only marker set in the inner +transaction scope does affect the outer transaction’s chance to actually commit. + +However, in the case where an inner transaction scope sets the rollback-only marker, the +outer transaction has not decided on the rollback itself, so the rollback (silently +triggered by the inner transaction scope) is unexpected. A corresponding`UnexpectedRollbackException` is thrown at that point. This is expected behavior so +that the caller of a transaction can never be misled to assume that a commit was +performed when it really was not. So, if an inner transaction (of which the outer caller +is not aware) silently marks a transaction as rollback-only, the outer caller still +calls commit. The outer caller needs to receive an `UnexpectedRollbackException` to +indicate clearly that a rollback was performed instead. + +##### Understanding `PROPAGATION_REQUIRES_NEW` + +![tx prop requires new](images/tx_prop_requires_new.png) + +`PROPAGATION_REQUIRES_NEW`, in contrast to `PROPAGATION_REQUIRED`, always uses an +independent physical transaction for each affected transaction scope, never +participating in an existing transaction for an outer scope. In such an arrangement, +the underlying resource transactions are different and, hence, can commit or roll back +independently, with an outer transaction not affected by an inner transaction’s rollback +status and with an inner transaction’s locks released immediately after its completion. +Such an independent inner transaction can also declare its own isolation level, timeout, +and read-only settings and not inherit an outer transaction’s characteristics. + +##### Understanding `PROPAGATION_NESTED` + +`PROPAGATION_NESTED` uses a single physical transaction with multiple savepoints +that it can roll back to. Such partial rollbacks let an inner transaction scope +trigger a rollback for its scope, with the outer transaction being able to continue +the physical transaction despite some operations having been rolled back. This setting +is typically mapped onto JDBC savepoints, so it works only with JDBC resource +transactions. See Spring’s [`DataSourceTransactionManager`](https://docs.spring.io/spring-framework/docs/5.3.16/javadoc-api/org/springframework/jdbc/datasource/DataSourceTransactionManager.html). + +#### 1.4.8. Advising Transactional Operations + +Suppose you want to run both transactional operations and some basic profiling advice. +How do you effect this in the context of ``? + +When you invoke the `updateFoo(Foo)` method, you want to see the following actions: + +* The configured profiling aspect starts. + +* The transactional advice runs. + +* The method on the advised object runs. + +* The transaction commits. + +* The profiling aspect reports the exact duration of the whole transactional method invocation. + +| |This chapter is not concerned with explaining AOP in any great detail (except as it
applies to transactions). See [AOP](core.html#aop) for detailed coverage of the AOP
configuration and AOP in general.| +|---|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +The following code shows the simple profiling aspect discussed earlier: + +Java + +``` +package x.y; + +import org.aspectj.lang.ProceedingJoinPoint; +import org.springframework.util.StopWatch; +import org.springframework.core.Ordered; + +public class SimpleProfiler implements Ordered { + + private int order; + + // allows us to control the ordering of advice + public int getOrder() { + return this.order; + } + + public void setOrder(int order) { + this.order = order; + } + + // this method is the around advice + public Object profile(ProceedingJoinPoint call) throws Throwable { + Object returnValue; + StopWatch clock = new StopWatch(getClass().getName()); + try { + clock.start(call.toShortString()); + returnValue = call.proceed(); + } finally { + clock.stop(); + System.out.println(clock.prettyPrint()); + } + return returnValue; + } +} +``` + +Kotlin + +``` +class SimpleProfiler : Ordered { + + private var order: Int = 0 + + // allows us to control the ordering of advice + override fun getOrder(): Int { + return this.order + } + + fun setOrder(order: Int) { + this.order = order + } + + // this method is the around advice + fun profile(call: ProceedingJoinPoint): Any { + var returnValue: Any + val clock = StopWatch(javaClass.name) + try { + clock.start(call.toShortString()) + returnValue = call.proceed() + } finally { + clock.stop() + println(clock.prettyPrint()) + } + return returnValue + } +} +``` + +The ordering of advice +is controlled through the `Ordered` interface. For full details on advice ordering, see[Advice ordering](core.html#aop-ataspectj-advice-ordering). + +The following configuration creates a `fooService` bean that has profiling and +transactional aspects applied to it in the desired order: + +``` + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +``` + +You can configure any number +of additional aspects in similar fashion. + +The following example creates the same setup as the previous two examples but uses the purely XML +declarative approach: + +``` + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +``` + +The result of the preceding configuration is a `fooService` bean that has profiling and +transactional aspects applied to it in that order. If you want the profiling advice +to run after the transactional advice on the way in and before the +transactional advice on the way out, you can swap the value of the profiling +aspect bean’s `order` property so that it is higher than the transactional advice’s +order value. + +You can configure additional aspects in similar fashion. + +#### 1.4.9. Using `@Transactional` with AspectJ + +You can also use the Spring Framework’s `@Transactional` support outside of a Spring +container by means of an AspectJ aspect. To do so, first annotate your classes +(and optionally your classes' methods) with the `@Transactional` annotation, +and then link (weave) your application with the`org.springframework.transaction.aspectj.AnnotationTransactionAspect` defined in the`spring-aspects.jar` file. You must also configure the aspect with a transaction +manager. You can use the Spring Framework’s IoC container to take care of +dependency-injecting the aspect. The simplest way to configure the transaction +management aspect is to use the `` element and specify the `mode`attribute to `aspectj` as described in [Using `@Transactional`](#transaction-declarative-annotations). Because +we focus here on applications that run outside of a Spring container, we show +you how to do it programmatically. + +| |Prior to continuing, you may want to read [Using `@Transactional`](#transaction-declarative-annotations) and[AOP](core.html#aop) respectively.| +|---|----------------------------------------------------------------------------------------------------------------------------------------------| + +The following example shows how to create a transaction manager and configure the`AnnotationTransactionAspect` to use it: + +Java + +``` +// construct an appropriate transaction manager +DataSourceTransactionManager txManager = new DataSourceTransactionManager(getDataSource()); + +// configure the AnnotationTransactionAspect to use it; this must be done before executing any transactional methods +AnnotationTransactionAspect.aspectOf().setTransactionManager(txManager); +``` + +Kotlin + +``` +// construct an appropriate transaction manager +val txManager = DataSourceTransactionManager(getDataSource()) + +// configure the AnnotationTransactionAspect to use it; this must be done before executing any transactional methods +AnnotationTransactionAspect.aspectOf().transactionManager = txManager +``` + +| |When you use this aspect, you must annotate the implementation class (or the methods
within that class or both), not the interface (if any) that the class implements. AspectJ
follows Java’s rule that annotations on interfaces are not inherited.| +|---|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +The `@Transactional` annotation on a class specifies the default transaction semantics +for the execution of any public method in the class. + +The `@Transactional` annotation on a method within the class overrides the default +transaction semantics given by the class annotation (if present). You can annotate any method, +regardless of visibility. + +To weave your applications with the `AnnotationTransactionAspect`, you must either build +your application with AspectJ (see the[AspectJ Development +Guide](https://www.eclipse.org/aspectj/doc/released/devguide/index.html)) or use load-time weaving. See [Load-time weaving with +AspectJ in the Spring Framework](core.html#aop-aj-ltw) for a discussion of load-time weaving with AspectJ. + +### 1.5. Programmatic Transaction Management + +The Spring Framework provides two means of programmatic transaction management, by using: + +* The `TransactionTemplate` or `TransactionalOperator`. + +* A `TransactionManager` implementation directly. + +The Spring team generally recommends the `TransactionTemplate` for programmatic +transaction management in imperative flows and `TransactionalOperator` for reactive code. +The second approach is similar to using the JTA `UserTransaction` API, although exception +handling is less cumbersome. + +#### 1.5.1. Using the `TransactionTemplate` + +The `TransactionTemplate` adopts the same approach as other Spring templates, such as +the `JdbcTemplate`. It uses a callback approach (to free application code from having to +do the boilerplate acquisition and release transactional resources) and results in +code that is intention driven, in that your code focuses solely on what +you want to do. + +| |As the examples that follow show, using the `TransactionTemplate` absolutely
couples you to Spring’s transaction infrastructure and APIs. Whether or not programmatic
transaction management is suitable for your development needs is a decision that you
have to make yourself.| +|---|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +Application code that must run in a transactional context and that explicitly uses the`TransactionTemplate` resembles the next example. You, as an application +developer, can write a `TransactionCallback` implementation (typically expressed as an +anonymous inner class) that contains the code that you need to run in the context of +a transaction. You can then pass an instance of your custom `TransactionCallback` to the`execute(..)` method exposed on the `TransactionTemplate`. The following example shows how to do so: + +Java + +``` +public class SimpleService implements Service { + + // single TransactionTemplate shared amongst all methods in this instance + private final TransactionTemplate transactionTemplate; + + // use constructor-injection to supply the PlatformTransactionManager + public SimpleService(PlatformTransactionManager transactionManager) { + this.transactionTemplate = new TransactionTemplate(transactionManager); + } + + public Object someServiceMethod() { + return transactionTemplate.execute(new TransactionCallback() { + // the code in this method runs in a transactional context + public Object doInTransaction(TransactionStatus status) { + updateOperation1(); + return resultOfUpdateOperation2(); + } + }); + } +} +``` + +Kotlin + +``` +// use constructor-injection to supply the PlatformTransactionManager +class SimpleService(transactionManager: PlatformTransactionManager) : Service { + + // single TransactionTemplate shared amongst all methods in this instance + private val transactionTemplate = TransactionTemplate(transactionManager) + + fun someServiceMethod() = transactionTemplate.execute { + updateOperation1() + resultOfUpdateOperation2() + } +} +``` + +If there is no return value, you can use the convenient `TransactionCallbackWithoutResult` class +with an anonymous class, as follows: + +Java + +``` +transactionTemplate.execute(new TransactionCallbackWithoutResult() { + protected void doInTransactionWithoutResult(TransactionStatus status) { + updateOperation1(); + updateOperation2(); + } +}); +``` + +Kotlin + +``` +transactionTemplate.execute(object : TransactionCallbackWithoutResult() { + override fun doInTransactionWithoutResult(status: TransactionStatus) { + updateOperation1() + updateOperation2() + } +}) +``` + +Code within the callback can roll the transaction back by calling the`setRollbackOnly()` method on the supplied `TransactionStatus` object, as follows: + +Java + +``` +transactionTemplate.execute(new TransactionCallbackWithoutResult() { + + protected void doInTransactionWithoutResult(TransactionStatus status) { + try { + updateOperation1(); + updateOperation2(); + } catch (SomeBusinessException ex) { + status.setRollbackOnly(); + } + } +}); +``` + +Kotlin + +``` +transactionTemplate.execute(object : TransactionCallbackWithoutResult() { + + override fun doInTransactionWithoutResult(status: TransactionStatus) { + try { + updateOperation1() + updateOperation2() + } catch (ex: SomeBusinessException) { + status.setRollbackOnly() + } + } +}) +``` + +##### Specifying Transaction Settings + +You can specify transaction settings (such as the propagation mode, the isolation level, +the timeout, and so forth) on the `TransactionTemplate` either programmatically or in +configuration. By default, `TransactionTemplate` instances have the[default transactional settings](#transaction-declarative-txadvice-settings). The +following example shows the programmatic customization of the transactional settings for +a specific `TransactionTemplate:` + +Java + +``` +public class SimpleService implements Service { + + private final TransactionTemplate transactionTemplate; + + public SimpleService(PlatformTransactionManager transactionManager) { + this.transactionTemplate = new TransactionTemplate(transactionManager); + + // the transaction settings can be set here explicitly if so desired + this.transactionTemplate.setIsolationLevel(TransactionDefinition.ISOLATION_READ_UNCOMMITTED); + this.transactionTemplate.setTimeout(30); // 30 seconds + // and so forth... + } +} +``` + +Kotlin + +``` +class SimpleService(transactionManager: PlatformTransactionManager) : Service { + + private val transactionTemplate = TransactionTemplate(transactionManager).apply { + // the transaction settings can be set here explicitly if so desired + isolationLevel = TransactionDefinition.ISOLATION_READ_UNCOMMITTED + timeout = 30 // 30 seconds + // and so forth... + } +} +``` + +The following example defines a `TransactionTemplate` with some custom transactional +settings by using Spring XML configuration: + +``` + + + + +``` + +You can then inject the `sharedTransactionTemplate`into as many services as are required. + +Finally, instances of the `TransactionTemplate` class are thread-safe, in that instances +do not maintain any conversational state. `TransactionTemplate` instances do, however, +maintain configuration state. So, while a number of classes may share a single instance +of a `TransactionTemplate`, if a class needs to use a `TransactionTemplate` with +different settings (for example, a different isolation level), you need to create +two distinct `TransactionTemplate` instances. + +#### 1.5.2. Using the `TransactionOperator` + +The `TransactionOperator` follows an operator design that is similar to other reactive +operators. It uses a callback approach (to free application code from having to do the +boilerplate acquisition and release transactional resources) and results in code that is +intention driven, in that your code focuses solely on what you want to do. + +| |As the examples that follow show, using the `TransactionOperator` absolutely
couples you to Spring’s transaction infrastructure and APIs. Whether or not programmatic
transaction management is suitable for your development needs is a decision that you have
to make yourself.| +|---|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +Application code that must run in a transactional context and that explicitly uses +the `TransactionOperator` resembles the next example: + +Java + +``` +public class SimpleService implements Service { + + // single TransactionOperator shared amongst all methods in this instance + private final TransactionalOperator transactionalOperator; + + // use constructor-injection to supply the ReactiveTransactionManager + public SimpleService(ReactiveTransactionManager transactionManager) { + this.transactionOperator = TransactionalOperator.create(transactionManager); + } + + public Mono someServiceMethod() { + + // the code in this method runs in a transactional context + + Mono update = updateOperation1(); + + return update.then(resultOfUpdateOperation2).as(transactionalOperator::transactional); + } +} +``` + +Kotlin + +``` +// use constructor-injection to supply the ReactiveTransactionManager +class SimpleService(transactionManager: ReactiveTransactionManager) : Service { + + // single TransactionalOperator shared amongst all methods in this instance + private val transactionalOperator = TransactionalOperator.create(transactionManager) + + suspend fun someServiceMethod() = transactionalOperator.executeAndAwait { + updateOperation1() + resultOfUpdateOperation2() + } +} +``` + +`TransactionalOperator` can be used in two ways: + +* Operator-style using Project Reactor types (`mono.as(transactionalOperator::transactional)`) + +* Callback-style for every other case (`transactionalOperator.execute(TransactionCallback)`) + +Code within the callback can roll the transaction back by calling the `setRollbackOnly()`method on the supplied `ReactiveTransaction` object, as follows: + +Java + +``` +transactionalOperator.execute(new TransactionCallback<>() { + + public Mono doInTransaction(ReactiveTransaction status) { + return updateOperation1().then(updateOperation2) + .doOnError(SomeBusinessException.class, e -> status.setRollbackOnly()); + } + } +}); +``` + +Kotlin + +``` +transactionalOperator.execute(object : TransactionCallback() { + + override fun doInTransactionWithoutResult(status: ReactiveTransaction) { + updateOperation1().then(updateOperation2) + .doOnError(SomeBusinessException.class, e -> status.setRollbackOnly()) + } +}) +``` + +##### Cancel Signals + +In Reactive Streams, a `Subscriber` can cancel its `Subscription` and stop its`Publisher`. Operators in Project Reactor, as well as in other libraries, such as `next()`,`take(long)`, `timeout(Duration)`, and others can issue cancellations. There is no way to +know the reason for the cancellation, whether it is due to an error or a simply lack of +interest to consume further. Since version 5.3 cancel signals lead to a roll back. +As a result it is important to consider the operators used downstream from a transaction`Publisher`. In particular in the case of a `Flux` or other multi-value `Publisher`, +the full output must be consumed to allow the transaction to complete. + +##### Specifying Transaction Settings + +You can specify transaction settings (such as the propagation mode, the isolation level, +the timeout, and so forth) for the `TransactionalOperator`. By default,`TransactionalOperator` instances have[default transactional settings](#transaction-declarative-txadvice-settings). The +following example shows customization of the transactional settings for a specific`TransactionalOperator:` + +Java + +``` +public class SimpleService implements Service { + + private final TransactionalOperator transactionalOperator; + + public SimpleService(ReactiveTransactionManager transactionManager) { + DefaultTransactionDefinition definition = new DefaultTransactionDefinition(); + + // the transaction settings can be set here explicitly if so desired + definition.setIsolationLevel(TransactionDefinition.ISOLATION_READ_UNCOMMITTED); + definition.setTimeout(30); // 30 seconds + // and so forth... + + this.transactionalOperator = TransactionalOperator.create(transactionManager, definition); + } +} +``` + +Kotlin + +``` +class SimpleService(transactionManager: ReactiveTransactionManager) : Service { + + private val definition = DefaultTransactionDefinition().apply { + // the transaction settings can be set here explicitly if so desired + isolationLevel = TransactionDefinition.ISOLATION_READ_UNCOMMITTED + timeout = 30 // 30 seconds + // and so forth... + } + private val transactionalOperator = TransactionalOperator(transactionManager, definition) +} +``` + +#### 1.5.3. Using the `TransactionManager` + +The following sections explain programmatic usage of imperative and reactive transaction +managers. + +##### Using the `PlatformTransactionManager` + +For imperative transactions, you can use a`org.springframework.transaction.PlatformTransactionManager` directly to manage your +transaction. To do so, pass the implementation of the `PlatformTransactionManager` you +use to your bean through a bean reference. Then, by using the `TransactionDefinition` and`TransactionStatus` objects, you can initiate transactions, roll back, and commit. The +following example shows how to do so: + +Java + +``` +DefaultTransactionDefinition def = new DefaultTransactionDefinition(); +// explicitly setting the transaction name is something that can be done only programmatically +def.setName("SomeTxName"); +def.setPropagationBehavior(TransactionDefinition.PROPAGATION_REQUIRED); + +TransactionStatus status = txManager.getTransaction(def); +try { + // put your business logic here +} catch (MyException ex) { + txManager.rollback(status); + throw ex; +} +txManager.commit(status); +``` + +Kotlin + +``` +val def = DefaultTransactionDefinition() +// explicitly setting the transaction name is something that can be done only programmatically +def.setName("SomeTxName") +def.propagationBehavior = TransactionDefinition.PROPAGATION_REQUIRED + +val status = txManager.getTransaction(def) +try { + // put your business logic here +} catch (ex: MyException) { + txManager.rollback(status) + throw ex +} + +txManager.commit(status) +``` + +##### Using the `ReactiveTransactionManager` + +When working with reactive transactions, you can use a`org.springframework.transaction.ReactiveTransactionManager` directly to manage your +transaction. To do so, pass the implementation of the `ReactiveTransactionManager` you +use to your bean through a bean reference. Then, by using the `TransactionDefinition` and`ReactiveTransaction` objects, you can initiate transactions, roll back, and commit. The +following example shows how to do so: + +Java + +``` +DefaultTransactionDefinition def = new DefaultTransactionDefinition(); +// explicitly setting the transaction name is something that can be done only programmatically +def.setName("SomeTxName"); +def.setPropagationBehavior(TransactionDefinition.PROPAGATION_REQUIRED); + +Mono reactiveTx = txManager.getReactiveTransaction(def); + +reactiveTx.flatMap(status -> { + + Mono tx = ...; // put your business logic here + + return tx.then(txManager.commit(status)) + .onErrorResume(ex -> txManager.rollback(status).then(Mono.error(ex))); +}); +``` + +Kotlin + +``` +val def = DefaultTransactionDefinition() +// explicitly setting the transaction name is something that can be done only programmatically +def.setName("SomeTxName") +def.propagationBehavior = TransactionDefinition.PROPAGATION_REQUIRED + +val reactiveTx = txManager.getReactiveTransaction(def) +reactiveTx.flatMap { status -> + + val tx = ... // put your business logic here + + tx.then(txManager.commit(status)) + .onErrorResume { ex -> txManager.rollback(status).then(Mono.error(ex)) } +} +``` + +### 1.6. Choosing Between Programmatic and Declarative Transaction Management + +Programmatic transaction management is usually a good idea only if you have a small +number of transactional operations. For example, if you have a web application that +requires transactions only for certain update operations, you may not want to set up +transactional proxies by using Spring or any other technology. In this case, using the`TransactionTemplate` may be a good approach. Being able to set the transaction name +explicitly is also something that can be done only by using the programmatic approach +to transaction management. + +On the other hand, if your application has numerous transactional operations, +declarative transaction management is usually worthwhile. It keeps transaction +management out of business logic and is not difficult to configure. When using the +Spring Framework, rather than EJB CMT, the configuration cost of declarative transaction +management is greatly reduced. + +### 1.7. Transaction-bound Events + +As of Spring 4.2, the listener of an event can be bound to a phase of the transaction. +The typical example is to handle the event when the transaction has completed successfully. +Doing so lets events be used with more flexibility when the outcome of the current +transaction actually matters to the listener. + +You can register a regular event listener by using the `@EventListener` annotation. +If you need to bind it to the transaction, use `@TransactionalEventListener`. +When you do so, the listener is bound to the commit phase of the transaction by default. + +The next example shows this concept. Assume that a component publishes an order-created +event and that we want to define a listener that should only handle that event once the +transaction in which it has been published has committed successfully. The following +example sets up such an event listener: + +Java + +``` +@Component +public class MyComponent { + + @TransactionalEventListener + public void handleOrderCreatedEvent(CreationEvent creationEvent) { + // ... + } +} +``` + +Kotlin + +``` +@Component +class MyComponent { + + @TransactionalEventListener + fun handleOrderCreatedEvent(creationEvent: CreationEvent) { + // ... + } +} +``` + +The `@TransactionalEventListener` annotation exposes a `phase` attribute that lets you +customize the phase of the transaction to which the listener should be bound. +The valid phases are `BEFORE_COMMIT`, `AFTER_COMMIT` (default), `AFTER_ROLLBACK`, as well as`AFTER_COMPLETION` which aggregates the transaction completion (be it a commit or a rollback). + +If no transaction is running, the listener is not invoked at all, since we cannot honor the +required semantics. You can, however, override that behavior by setting the `fallbackExecution`attribute of the annotation to `true`. + +| |`@TransactionalEventListener` only works with thread-bound transactions managed by`PlatformTransactionManager`. A reactive transaction managed by `ReactiveTransactionManager`uses the Reactor context instead of thread-local attributes, so from the perspective of
an event listener, there is no compatible active transaction that it can participate in.| +|---|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +### 1.8. Application server-specific integration + +Spring’s transaction abstraction is generally application server-agnostic. Additionally, +Spring’s `JtaTransactionManager` class (which can optionally perform a JNDI lookup for +the JTA `UserTransaction` and `TransactionManager` objects) autodetects the location for +the latter object, which varies by application server. Having access to the JTA`TransactionManager` allows for enhanced transaction semantics — in particular, +supporting transaction suspension. See the[`JtaTransactionManager`](https://docs.spring.io/spring-framework/docs/5.3.16/javadoc-api/org/springframework/transaction/jta/JtaTransactionManager.html)javadoc for details. + +Spring’s `JtaTransactionManager` is the standard choice to run on Java EE application +servers and is known to work on all common servers. Advanced functionality, such as +transaction suspension, works on many servers as well (including GlassFish, JBoss and +Geronimo) without any special configuration required. However, for fully supported +transaction suspension and further advanced integration, Spring includes special adapters +for WebLogic Server and WebSphere. These adapters are discussed in the following +sections. + +For standard scenarios, including WebLogic Server and WebSphere, consider using the +convenient `` configuration element. When configured, +this element automatically detects the underlying server and chooses the best +transaction manager available for the platform. This means that you need not explicitly +configure server-specific adapter classes (as discussed in the following sections). +Rather, they are chosen automatically, with the standard`JtaTransactionManager` as the default fallback. + +#### 1.8.1. IBM WebSphere + +On WebSphere 6.1.0.9 and above, the recommended Spring JTA transaction manager to use is`WebSphereUowTransactionManager`. This special adapter uses IBM’s `UOWManager` API, +which is available in WebSphere Application Server 6.1.0.9 and later. With this adapter, +Spring-driven transaction suspension (suspend and resume as initiated by`PROPAGATION_REQUIRES_NEW`) is officially supported by IBM. + +#### 1.8.2. Oracle WebLogic Server + +On WebLogic Server 9.0 or above, you would typically use the`WebLogicJtaTransactionManager` instead of the stock `JtaTransactionManager` class. This +special WebLogic-specific subclass of the normal `JtaTransactionManager` supports the +full power of Spring’s transaction definitions in a WebLogic-managed transaction +environment, beyond standard JTA semantics. Features include transaction names, +per-transaction isolation levels, and proper resuming of transactions in all cases. + +### 1.9. Solutions to Common Problems + +This section describes solutions to some common problems. + +#### 1.9.1. Using the Wrong Transaction Manager for a Specific `DataSource` #### + +Use the correct `PlatformTransactionManager` implementation based on your choice of +transactional technologies and requirements. Used properly, the Spring Framework merely +provides a straightforward and portable abstraction. If you use global +transactions, you must use the`org.springframework.transaction.jta.JtaTransactionManager` class (or an[application server-specific subclass](#transaction-application-server-integration) of +it) for all your transactional operations. Otherwise, the transaction infrastructure +tries to perform local transactions on such resources as container `DataSource`instances. Such local transactions do not make sense, and a good application server +treats them as errors. + +### 1.10. Further Resources + +For more information about the Spring Framework’s transaction support, see: + +* [Distributed + transactions in Spring, with and without XA](https://www.javaworld.com/javaworld/jw-01-2009/jw-01-spring-transactions.html) is a JavaWorld presentation in which + Spring’s David Syer guides you through seven patterns for distributed + transactions in Spring applications, three of them with XA and four without. + +* [*Java Transaction Design Strategies*](https://www.infoq.com/minibooks/JTDS) is a book + available from [InfoQ](https://www.infoq.com/) that provides a well-paced introduction + to transactions in Java. It also includes side-by-side examples of how to configure + and use transactions with both the Spring Framework and EJB3. + +## 2. DAO Support + +The Data Access Object (DAO) support in Spring is aimed at making it easy to work with +data access technologies (such as JDBC, Hibernate, or JPA) in a consistent way. This +lets you switch between the aforementioned persistence technologies fairly easily, +and it also lets you code without worrying about catching exceptions that are +specific to each technology. + +### 2.1. Consistent Exception Hierarchy + +Spring provides a convenient translation from technology-specific exceptions, such as`SQLException` to its own exception class hierarchy, which has `DataAccessException` as +the root exception. These exceptions wrap the original exception so that there is never +any risk that you might lose any information about what might have gone wrong. + +In addition to JDBC exceptions, Spring can also wrap JPA- and Hibernate-specific exceptions, +converting them to a set of focused runtime exceptions. This lets you handle most +non-recoverable persistence exceptions in only the appropriate layers, without having +annoying boilerplate catch-and-throw blocks and exception declarations in your DAOs. +(You can still trap and handle exceptions anywhere you need to though.) As mentioned above, +JDBC exceptions (including database-specific dialects) are also converted to the same +hierarchy, meaning that you can perform some operations with JDBC within a consistent +programming model. + +The preceding discussion holds true for the various template classes in Spring’s support +for various ORM frameworks. If you use the interceptor-based classes, the application must +care about handling `HibernateExceptions` and `PersistenceExceptions` itself, preferably by +delegating to the `convertHibernateAccessException(..)` or `convertJpaAccessException(..)`methods, respectively, of `SessionFactoryUtils`. These methods convert the exceptions +to exceptions that are compatible with the exceptions in the `org.springframework.dao`exception hierarchy. As `PersistenceExceptions` are unchecked, they can get thrown, too +(sacrificing generic DAO abstraction in terms of exceptions, though). + +The following image shows the exception hierarchy that Spring provides. +(Note that the class hierarchy detailed in the image shows only a subset of the entire`DataAccessException` hierarchy.) + +![DataAccessException](images/DataAccessException.png) + +### 2.2. Annotations Used to Configure DAO or Repository Classes + +The best way to guarantee that your Data Access Objects (DAOs) or repositories provide +exception translation is to use the `@Repository` annotation. This annotation also +lets the component scanning support find and configure your DAOs and repositories +without having to provide XML configuration entries for them. The following example shows +how to use the `@Repository` annotation: + +Java + +``` +@Repository (1) +public class SomeMovieFinder implements MovieFinder { + // ... +} +``` + +|**1**|The `@Repository` annotation.| +|-----|-----------------------------| + +Kotlin + +``` +@Repository (1) +class SomeMovieFinder : MovieFinder { + // ... +} +``` + +|**1**|The `@Repository` annotation.| +|-----|-----------------------------| + +Any DAO or repository implementation needs access to a persistence resource, +depending on the persistence technology used. For example, a JDBC-based repository +needs access to a JDBC `DataSource`, and a JPA-based repository needs access to an`EntityManager`. The easiest way to accomplish this is to have this resource dependency +injected by using one of the `@Autowired`, `@Inject`, `@Resource` or `@PersistenceContext`annotations. The following example works for a JPA repository: + +Java + +``` +@Repository +public class JpaMovieFinder implements MovieFinder { + + @PersistenceContext + private EntityManager entityManager; + + // ... +} +``` + +Kotlin + +``` +@Repository +class JpaMovieFinder : MovieFinder { + + @PersistenceContext + private lateinit var entityManager: EntityManager + + // ... +} +``` + +If you use the classic Hibernate APIs, you can inject `SessionFactory`, as the following +example shows: + +Java + +``` +@Repository +public class HibernateMovieFinder implements MovieFinder { + + private SessionFactory sessionFactory; + + @Autowired + public void setSessionFactory(SessionFactory sessionFactory) { + this.sessionFactory = sessionFactory; + } + + // ... +} +``` + +Kotlin + +``` +@Repository +class HibernateMovieFinder(private val sessionFactory: SessionFactory) : MovieFinder { + // ... +} +``` + +The last example we show here is for typical JDBC support. You could have the`DataSource` injected into an initialization method or a constructor, where you would create a`JdbcTemplate` and other data access support classes (such as `SimpleJdbcCall` and others) by using +this `DataSource`. The following example autowires a `DataSource`: + +Java + +``` +@Repository +public class JdbcMovieFinder implements MovieFinder { + + private JdbcTemplate jdbcTemplate; + + @Autowired + public void init(DataSource dataSource) { + this.jdbcTemplate = new JdbcTemplate(dataSource); + } + + // ... +} +``` + +Kotlin + +``` +@Repository +class JdbcMovieFinder(dataSource: DataSource) : MovieFinder { + + private val jdbcTemplate = JdbcTemplate(dataSource) + + // ... +} +``` + +| |See the specific coverage of each persistence technology for details on how to
configure the application context to take advantage of these annotations.| +|---|------------------------------------------------------------------------------------------------------------------------------------------------------------| + +## 3. Data Access with JDBC + +The value provided by the Spring Framework JDBC abstraction is perhaps best shown by +the sequence of actions outlined in the following table below. The table shows which actions Spring +takes care of and which actions are your responsibility. + +| Action |Spring|You| +|--------------------------------------------------------|------|---| +| Define connection parameters. | | X | +| Open the connection. | X | | +| Specify the SQL statement. | | X | +| Declare parameters and provide parameter values | | X | +| Prepare and run the statement. | X | | +|Set up the loop to iterate through the results (if any).| X | | +| Do the work for each iteration. | | X | +| Process any exception. | X | | +| Handle transactions. | X | | +|Close the connection, the statement, and the resultset. | X | | + +The Spring Framework takes care of all the low-level details that can make JDBC such a +tedious API. + +### 3.1. Choosing an Approach for JDBC Database Access + +You can choose among several approaches to form the basis for your JDBC database access. +In addition to three flavors of `JdbcTemplate`, a new `SimpleJdbcInsert` and`SimpleJdbcCall` approach optimizes database metadata, and the RDBMS Object style takes a +more object-oriented approach similar to that of JDO Query design. Once you start using +one of these approaches, you can still mix and match to include a feature from a +different approach. All approaches require a JDBC 2.0-compliant driver, and some +advanced features require a JDBC 3.0 driver. + +* `JdbcTemplate` is the classic and most popular Spring JDBC approach. This + “lowest-level” approach and all others use a JdbcTemplate under the covers. + +* `NamedParameterJdbcTemplate` wraps a `JdbcTemplate` to provide named parameters + instead of the traditional JDBC `?` placeholders. This approach provides better + documentation and ease of use when you have multiple parameters for an SQL statement. + +* `SimpleJdbcInsert` and `SimpleJdbcCall` optimize database metadata to limit the amount + of necessary configuration. This approach simplifies coding so that you need to + provide only the name of the table or procedure and provide a map of parameters matching + the column names. This works only if the database provides adequate metadata. If the + database does not provide this metadata, you have to provide explicit + configuration of the parameters. + +* RDBMS objects — including `MappingSqlQuery`, `SqlUpdate`, and `StoredProcedure` — + require you to create reusable and thread-safe objects during initialization of your + data-access layer. This approach is modeled after JDO Query, wherein you define your + query string, declare parameters, and compile the query. Once you do that,`execute(…​)`, `update(…​)`, and `findObject(…​)` methods can be called multiple + times with various parameter values. + +### 3.2. Package Hierarchy + +The Spring Framework’s JDBC abstraction framework consists of four different packages: + +* `core`: The `org.springframework.jdbc.core` package contains the `JdbcTemplate` class and its + various callback interfaces, plus a variety of related classes. A subpackage named`org.springframework.jdbc.core.simple` contains the `SimpleJdbcInsert` and`SimpleJdbcCall` classes. Another subpackage named`org.springframework.jdbc.core.namedparam` contains the `NamedParameterJdbcTemplate`class and the related support classes. See [Using the JDBC Core Classes to Control Basic JDBC Processing and Error Handling](#jdbc-core), [JDBC Batch Operations](#jdbc-advanced-jdbc), and[Simplifying JDBC Operations with the `SimpleJdbc` Classes](#jdbc-simple-jdbc). + +* `datasource`: The `org.springframework.jdbc.datasource` package contains a utility class for easy`DataSource` access and various simple `DataSource` implementations that you can use for + testing and running unmodified JDBC code outside of a Java EE container. A subpackage + named `org.springfamework.jdbc.datasource.embedded` provides support for creating + embedded databases by using Java database engines, such as HSQL, H2, and Derby. See[Controlling Database Connections](#jdbc-connections) and [Embedded Database Support](#jdbc-embedded-database-support). + +* `object`: The `org.springframework.jdbc.object` package contains classes that represent RDBMS + queries, updates, and stored procedures as thread-safe, reusable objects. See[Modeling JDBC Operations as Java Objects](#jdbc-object). This approach is modeled by JDO, although objects returned by queries + are naturally disconnected from the database. This higher-level of JDBC abstraction + depends on the lower-level abstraction in the `org.springframework.jdbc.core` package. + +* `support`: The `org.springframework.jdbc.support` package provides `SQLException` translation + functionality and some utility classes. Exceptions thrown during JDBC processing are + translated to exceptions defined in the `org.springframework.dao` package. This means + that code using the Spring JDBC abstraction layer does not need to implement JDBC or + RDBMS-specific error handling. All translated exceptions are unchecked, which gives you + the option of catching the exceptions from which you can recover while letting other + exceptions be propagated to the caller. See [Using `SQLExceptionTranslator`](#jdbc-SQLExceptionTranslator). + +### 3.3. Using the JDBC Core Classes to Control Basic JDBC Processing and Error Handling + +This section covers how to use the JDBC core classes to control basic JDBC processing, +including error handling. It includes the following topics: + +* [Using `JdbcTemplate`](#jdbc-JdbcTemplate) + +* [Using `NamedParameterJdbcTemplate`](#jdbc-NamedParameterJdbcTemplate) + +* [Using `SQLExceptionTranslator`](#jdbc-SQLExceptionTranslator) + +* [Running Statements](#jdbc-statements-executing) + +* [Running Queries](#jdbc-statements-querying) + +* [Updating the Database](#jdbc-updates) + +* [Retrieving Auto-generated Keys](#jdbc-auto-generated-keys) + +#### 3.3.1. Using `JdbcTemplate` + +`JdbcTemplate` is the central class in the JDBC core package. It handles the +creation and release of resources, which helps you avoid common errors, such as +forgetting to close the connection. It performs the basic tasks of the core JDBC +workflow (such as statement creation and execution), leaving application code to provide +SQL and extract results. The `JdbcTemplate` class: + +* Runs SQL queries + +* Updates statements and stored procedure calls + +* Performs iteration over `ResultSet` instances and extraction of returned parameter values. + +* Catches JDBC exceptions and translates them to the generic, more informative, exception + hierarchy defined in the `org.springframework.dao` package. (See [Consistent Exception Hierarchy](#dao-exceptions).) + +When you use the `JdbcTemplate` for your code, you need only to implement callback +interfaces, giving them a clearly defined contract. Given a `Connection` provided by the`JdbcTemplate` class, the `PreparedStatementCreator` callback interface creates a prepared +statement, providing SQL and any necessary parameters. The same is true for the`CallableStatementCreator` interface, which creates callable statements. The`RowCallbackHandler` interface extracts values from each row of a `ResultSet`. + +You can use `JdbcTemplate` within a DAO implementation through direct instantiation +with a `DataSource` reference, or you can configure it in a Spring IoC container and give it to +DAOs as a bean reference. + +| |The `DataSource` should always be configured as a bean in the Spring IoC container. In
the first case the bean is given to the service directly; in the second case it is given
to the prepared template.| +|---|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +All SQL issued by this class is logged at the `DEBUG` level under the category +corresponding to the fully qualified class name of the template instance (typically`JdbcTemplate`, but it may be different if you use a custom subclass of the`JdbcTemplate` class). + +The following sections provide some examples of `JdbcTemplate` usage. These examples +are not an exhaustive list of all of the functionality exposed by the `JdbcTemplate`. +See the attendant [javadoc](https://docs.spring.io/spring-framework/docs/5.3.16/javadoc-api/org/springframework/jdbc/core/JdbcTemplate.html) for that. + +##### Querying (`SELECT`) + +The following query gets the number of rows in a relation: + +Java + +``` +int rowCount = this.jdbcTemplate.queryForObject("select count(*) from t_actor", Integer.class); +``` + +Kotlin + +``` +val rowCount = jdbcTemplate.queryForObject("select count(*) from t_actor")!! +``` + +The following query uses a bind variable: + +Java + +``` +int countOfActorsNamedJoe = this.jdbcTemplate.queryForObject( + "select count(*) from t_actor where first_name = ?", Integer.class, "Joe"); +``` + +Kotlin + +``` +val countOfActorsNamedJoe = jdbcTemplate.queryForObject( + "select count(*) from t_actor where first_name = ?", arrayOf("Joe"))!! +``` + +The following query looks for a `String`: + +Java + +``` +String lastName = this.jdbcTemplate.queryForObject( + "select last_name from t_actor where id = ?", + String.class, 1212L); +``` + +Kotlin + +``` +val lastName = this.jdbcTemplate.queryForObject( + "select last_name from t_actor where id = ?", + arrayOf(1212L))!! +``` + +The following query finds and populates a single domain object: + +Java + +``` +Actor actor = jdbcTemplate.queryForObject( + "select first_name, last_name from t_actor where id = ?", + (resultSet, rowNum) -> { + Actor newActor = new Actor(); + newActor.setFirstName(resultSet.getString("first_name")); + newActor.setLastName(resultSet.getString("last_name")); + return newActor; + }, + 1212L); +``` + +Kotlin + +``` +val actor = jdbcTemplate.queryForObject( + "select first_name, last_name from t_actor where id = ?", + arrayOf(1212L)) { rs, _ -> + Actor(rs.getString("first_name"), rs.getString("last_name")) + } +``` + +The following query finds and populates a list of domain objects: + +Java + +``` +List actors = this.jdbcTemplate.query( + "select first_name, last_name from t_actor", + (resultSet, rowNum) -> { + Actor actor = new Actor(); + actor.setFirstName(resultSet.getString("first_name")); + actor.setLastName(resultSet.getString("last_name")); + return actor; + }); +``` + +Kotlin + +``` +val actors = jdbcTemplate.query("select first_name, last_name from t_actor") { rs, _ -> + Actor(rs.getString("first_name"), rs.getString("last_name")) +``` + +If the last two snippets of code actually existed in the same application, it would make +sense to remove the duplication present in the two `RowMapper` lambda expressions and +extract them out into a single field that could then be referenced by DAO methods as needed. +For example, it may be better to write the preceding code snippet as follows: + +Java + +``` +private final RowMapper actorRowMapper = (resultSet, rowNum) -> { + Actor actor = new Actor(); + actor.setFirstName(resultSet.getString("first_name")); + actor.setLastName(resultSet.getString("last_name")); + return actor; +}; + +public List findAllActors() { + return this.jdbcTemplate.query("select first_name, last_name from t_actor", actorRowMapper); +} +``` + +Kotlin + +``` +val actorMapper = RowMapper { rs: ResultSet, rowNum: Int -> + Actor(rs.getString("first_name"), rs.getString("last_name")) +} + +fun findAllActors(): List { + return jdbcTemplate.query("select first_name, last_name from t_actor", actorMapper) +} +``` + +##### Updating (`INSERT`, `UPDATE`, and `DELETE`) with `JdbcTemplate` + +You can use the `update(..)` method to perform insert, update, and delete operations. +Parameter values are usually provided as variable arguments or, alternatively, as an object array. + +The following example inserts a new entry: + +Java + +``` +this.jdbcTemplate.update( + "insert into t_actor (first_name, last_name) values (?, ?)", + "Leonor", "Watling"); +``` + +Kotlin + +``` +jdbcTemplate.update( + "insert into t_actor (first_name, last_name) values (?, ?)", + "Leonor", "Watling") +``` + +The following example updates an existing entry: + +Java + +``` +this.jdbcTemplate.update( + "update t_actor set last_name = ? where id = ?", + "Banjo", 5276L); +``` + +Kotlin + +``` +jdbcTemplate.update( + "update t_actor set last_name = ? where id = ?", + "Banjo", 5276L) +``` + +The following example deletes an entry: + +Java + +``` +this.jdbcTemplate.update( + "delete from t_actor where id = ?", + Long.valueOf(actorId)); +``` + +Kotlin + +``` +jdbcTemplate.update("delete from t_actor where id = ?", actorId.toLong()) +``` + +##### Other `JdbcTemplate` Operations + +You can use the `execute(..)` method to run any arbitrary SQL. Consequently, the +method is often used for DDL statements. It is heavily overloaded with variants that take +callback interfaces, binding variable arrays, and so on. The following example creates a +table: + +Java + +``` +this.jdbcTemplate.execute("create table mytable (id integer, name varchar(100))"); +``` + +Kotlin + +``` +jdbcTemplate.execute("create table mytable (id integer, name varchar(100))") +``` + +The following example invokes a stored procedure: + +Java + +``` +this.jdbcTemplate.update( + "call SUPPORT.REFRESH_ACTORS_SUMMARY(?)", + Long.valueOf(unionId)); +``` + +Kotlin + +``` +jdbcTemplate.update( + "call SUPPORT.REFRESH_ACTORS_SUMMARY(?)", + unionId.toLong()) +``` + +More sophisticated stored procedure support is [covered later](#jdbc-StoredProcedure). + +##### `JdbcTemplate` Best Practices + +Instances of the `JdbcTemplate` class are thread-safe, once configured. This is +important because it means that you can configure a single instance of a `JdbcTemplate`and then safely inject this shared reference into multiple DAOs (or repositories). +The `JdbcTemplate` is stateful, in that it maintains a reference to a `DataSource`, but +this state is not conversational state. + +A common practice when using the `JdbcTemplate` class (and the associated[`NamedParameterJdbcTemplate`](#jdbc-NamedParameterJdbcTemplate) class) is to +configure a `DataSource` in your Spring configuration file and then dependency-inject +that shared `DataSource` bean into your DAO classes. The `JdbcTemplate` is created in +the setter for the `DataSource`. This leads to DAOs that resemble the following: + +Java + +``` +public class JdbcCorporateEventDao implements CorporateEventDao { + + private JdbcTemplate jdbcTemplate; + + public void setDataSource(DataSource dataSource) { + this.jdbcTemplate = new JdbcTemplate(dataSource); + } + + // JDBC-backed implementations of the methods on the CorporateEventDao follow... +} +``` + +Kotlin + +``` +class JdbcCorporateEventDao(dataSource: DataSource) : CorporateEventDao { + + private val jdbcTemplate = JdbcTemplate(dataSource) + + // JDBC-backed implementations of the methods on the CorporateEventDao follow... +} +``` + +The following example shows the corresponding XML configuration: + +``` + + + + + + + + + + + + + + + + + +``` + +An alternative to explicit configuration is to use component-scanning and annotation +support for dependency injection. In this case, you can annotate the class with `@Repository`(which makes it a candidate for component-scanning) and annotate the `DataSource` setter +method with `@Autowired`. The following example shows how to do so: + +Java + +``` +@Repository (1) +public class JdbcCorporateEventDao implements CorporateEventDao { + + private JdbcTemplate jdbcTemplate; + + @Autowired (2) + public void setDataSource(DataSource dataSource) { + this.jdbcTemplate = new JdbcTemplate(dataSource); (3) + } + + // JDBC-backed implementations of the methods on the CorporateEventDao follow... +} +``` + +|**1**| Annotate the class with `@Repository`. | +|-----|----------------------------------------------------------| +|**2**|Annotate the `DataSource` setter method with `@Autowired`.| +|**3**| Create a new `JdbcTemplate` with the `DataSource`. | + +Kotlin + +``` +@Repository (1) +class JdbcCorporateEventDao(dataSource: DataSource) : CorporateEventDao { (2) + + private val jdbcTemplate = JdbcTemplate(dataSource) (3) + + // JDBC-backed implementations of the methods on the CorporateEventDao follow... +} +``` + +|**1**| Annotate the class with `@Repository`. | +|-----|--------------------------------------------------| +|**2**| Constructor injection of the `DataSource`. | +|**3**|Create a new `JdbcTemplate` with the `DataSource`.| + +The following example shows the corresponding XML configuration: + +``` + + + + + + + + + + + + + + + + +``` + +If you use Spring’s `JdbcDaoSupport` class and your various JDBC-backed DAO classes +extend from it, your sub-class inherits a `setDataSource(..)` method from the`JdbcDaoSupport` class. You can choose whether to inherit from this class. The`JdbcDaoSupport` class is provided as a convenience only. + +Regardless of which of the above template initialization styles you choose to use (or +not), it is seldom necessary to create a new instance of a `JdbcTemplate` class each +time you want to run SQL. Once configured, a `JdbcTemplate` instance is thread-safe. +If your application accesses multiple +databases, you may want multiple `JdbcTemplate` instances, which requires multiple `DataSources` and, subsequently, multiple differently +configured `JdbcTemplate` instances. + +#### 3.3.2. Using `NamedParameterJdbcTemplate` + +The `NamedParameterJdbcTemplate` class adds support for programming JDBC statements +by using named parameters, as opposed to programming JDBC statements using only classic +placeholder ( `'?'`) arguments. The `NamedParameterJdbcTemplate` class wraps a`JdbcTemplate` and delegates to the wrapped `JdbcTemplate` to do much of its work. This +section describes only those areas of the `NamedParameterJdbcTemplate` class that differ +from the `JdbcTemplate` itself — namely, programming JDBC statements by using named +parameters. The following example shows how to use `NamedParameterJdbcTemplate`: + +Java + +``` +// some JDBC-backed DAO class... +private NamedParameterJdbcTemplate namedParameterJdbcTemplate; + +public void setDataSource(DataSource dataSource) { + this.namedParameterJdbcTemplate = new NamedParameterJdbcTemplate(dataSource); +} + +public int countOfActorsByFirstName(String firstName) { + + String sql = "select count(*) from T_ACTOR where first_name = :first_name"; + + SqlParameterSource namedParameters = new MapSqlParameterSource("first_name", firstName); + + return this.namedParameterJdbcTemplate.queryForObject(sql, namedParameters, Integer.class); +} +``` + +Kotlin + +``` +private val namedParameterJdbcTemplate = NamedParameterJdbcTemplate(dataSource) + +fun countOfActorsByFirstName(firstName: String): Int { + val sql = "select count(*) from T_ACTOR where first_name = :first_name" + val namedParameters = MapSqlParameterSource("first_name", firstName) + return namedParameterJdbcTemplate.queryForObject(sql, namedParameters, Int::class.java)!! +} +``` + +Notice the use of the named parameter notation in the value assigned to the `sql`variable and the corresponding value that is plugged into the `namedParameters`variable (of type `MapSqlParameterSource`). + +Alternatively, you can pass along named parameters and their corresponding values to a`NamedParameterJdbcTemplate` instance by using the `Map`-based style. The remaining +methods exposed by the `NamedParameterJdbcOperations` and implemented by the`NamedParameterJdbcTemplate` class follow a similar pattern and are not covered here. + +The following example shows the use of the `Map`-based style: + +Java + +``` +// some JDBC-backed DAO class... +private NamedParameterJdbcTemplate namedParameterJdbcTemplate; + +public void setDataSource(DataSource dataSource) { + this.namedParameterJdbcTemplate = new NamedParameterJdbcTemplate(dataSource); +} + +public int countOfActorsByFirstName(String firstName) { + + String sql = "select count(*) from T_ACTOR where first_name = :first_name"; + + Map namedParameters = Collections.singletonMap("first_name", firstName); + + return this.namedParameterJdbcTemplate.queryForObject(sql, namedParameters, Integer.class); +} +``` + +Kotlin + +``` +// some JDBC-backed DAO class... +private val namedParameterJdbcTemplate = NamedParameterJdbcTemplate(dataSource) + +fun countOfActorsByFirstName(firstName: String): Int { + val sql = "select count(*) from T_ACTOR where first_name = :first_name" + val namedParameters = mapOf("first_name" to firstName) + return namedParameterJdbcTemplate.queryForObject(sql, namedParameters, Int::class.java)!! +} +``` + +One nice feature related to the `NamedParameterJdbcTemplate` (and existing in the same +Java package) is the `SqlParameterSource` interface. You have already seen an example of +an implementation of this interface in one of the previous code snippets (the`MapSqlParameterSource` class). An `SqlParameterSource` is a source of named parameter +values to a `NamedParameterJdbcTemplate`. The `MapSqlParameterSource` class is a +simple implementation that is an adapter around a `java.util.Map`, where the keys +are the parameter names and the values are the parameter values. + +Another `SqlParameterSource` implementation is the `BeanPropertySqlParameterSource`class. This class wraps an arbitrary JavaBean (that is, an instance of a class that +adheres to [the +JavaBean conventions](https://www.oracle.com/technetwork/java/javase/documentation/spec-136004.html)) and uses the properties of the wrapped JavaBean as the source +of named parameter values. + +The following example shows a typical JavaBean: + +Java + +``` +public class Actor { + + private Long id; + private String firstName; + private String lastName; + + public String getFirstName() { + return this.firstName; + } + + public String getLastName() { + return this.lastName; + } + + public Long getId() { + return this.id; + } + + // setters omitted... + +} +``` + +Kotlin + +``` +data class Actor(val id: Long, val firstName: String, val lastName: String) +``` + +The following example uses a `NamedParameterJdbcTemplate` to return the count of the +members of the class shown in the preceding example: + +Java + +``` +// some JDBC-backed DAO class... +private NamedParameterJdbcTemplate namedParameterJdbcTemplate; + +public void setDataSource(DataSource dataSource) { + this.namedParameterJdbcTemplate = new NamedParameterJdbcTemplate(dataSource); +} + +public int countOfActors(Actor exampleActor) { + + // notice how the named parameters match the properties of the above 'Actor' class + String sql = "select count(*) from T_ACTOR where first_name = :firstName and last_name = :lastName"; + + SqlParameterSource namedParameters = new BeanPropertySqlParameterSource(exampleActor); + + return this.namedParameterJdbcTemplate.queryForObject(sql, namedParameters, Integer.class); +} +``` + +Kotlin + +``` +// some JDBC-backed DAO class... +private val namedParameterJdbcTemplate = NamedParameterJdbcTemplate(dataSource) + +private val namedParameterJdbcTemplate = NamedParameterJdbcTemplate(dataSource) + +fun countOfActors(exampleActor: Actor): Int { + // notice how the named parameters match the properties of the above 'Actor' class + val sql = "select count(*) from T_ACTOR where first_name = :firstName and last_name = :lastName" + val namedParameters = BeanPropertySqlParameterSource(exampleActor) + return namedParameterJdbcTemplate.queryForObject(sql, namedParameters, Int::class.java)!! +} +``` + +Remember that the `NamedParameterJdbcTemplate` class wraps a classic `JdbcTemplate`template. If you need access to the wrapped `JdbcTemplate` instance to access +functionality that is present only in the `JdbcTemplate` class, you can use the`getJdbcOperations()` method to access the wrapped `JdbcTemplate` through the`JdbcOperations` interface. + +See also [`JdbcTemplate` Best Practices](#jdbc-JdbcTemplate-idioms) for guidelines on using the`NamedParameterJdbcTemplate` class in the context of an application. + +#### 3.3.3. Using `SQLExceptionTranslator` + +`SQLExceptionTranslator` is an interface to be implemented by classes that can translate +between `SQLException`s and Spring’s own `org.springframework.dao.DataAccessException`, +which is agnostic in regard to data access strategy. Implementations can be generic (for +example, using SQLState codes for JDBC) or proprietary (for example, using Oracle error +codes) for greater precision. + +`SQLErrorCodeSQLExceptionTranslator` is the implementation of `SQLExceptionTranslator`that is used by default. This implementation uses specific vendor codes. It is more +precise than the `SQLState` implementation. The error code translations are based on +codes held in a JavaBean type class called `SQLErrorCodes`. This class is created and +populated by an `SQLErrorCodesFactory`, which (as the name suggests) is a factory for +creating `SQLErrorCodes` based on the contents of a configuration file named`sql-error-codes.xml`. This file is populated with vendor codes and based on the`DatabaseProductName` taken from `DatabaseMetaData`. The codes for the actual +database you are using are used. + +The `SQLErrorCodeSQLExceptionTranslator` applies matching rules in the following sequence: + +1. Any custom translation implemented by a subclass. Normally, the provided concrete`SQLErrorCodeSQLExceptionTranslator` is used, so this rule does not apply. It + applies only if you have actually provided a subclass implementation. + +2. Any custom implementation of the `SQLExceptionTranslator` interface that is provided + as the `customSqlExceptionTranslator` property of the `SQLErrorCodes` class. + +3. The list of instances of the `CustomSQLErrorCodesTranslation` class (provided for the`customTranslations` property of the `SQLErrorCodes` class) are searched for a match. + +4. Error code matching is applied. + +5. Use the fallback translator. `SQLExceptionSubclassTranslator` is the default fallback + translator. If this translation is not available, the next fallback translator is + the `SQLStateSQLExceptionTranslator`. + +| |The `SQLErrorCodesFactory` is used by default to define `Error` codes and custom exception
translations. They are looked up in a file named `sql-error-codes.xml` from the
classpath, and the matching `SQLErrorCodes` instance is located based on the database
name from the database metadata of the database in use.| +|---|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +You can extend `SQLErrorCodeSQLExceptionTranslator`, as the following example shows: + +Java + +``` +public class CustomSQLErrorCodesTranslator extends SQLErrorCodeSQLExceptionTranslator { + + protected DataAccessException customTranslate(String task, String sql, SQLException sqlEx) { + if (sqlEx.getErrorCode() == -12345) { + return new DeadlockLoserDataAccessException(task, sqlEx); + } + return null; + } +} +``` + +Kotlin + +``` +class CustomSQLErrorCodesTranslator : SQLErrorCodeSQLExceptionTranslator() { + + override fun customTranslate(task: String, sql: String?, sqlEx: SQLException): DataAccessException? { + if (sqlEx.errorCode == -12345) { + return DeadlockLoserDataAccessException(task, sqlEx) + } + return null + } +} +``` + +In the preceding example, the specific error code (`-12345`) is translated, while other errors are +left to be translated by the default translator implementation. To use this custom +translator, you must pass it to the `JdbcTemplate` through the method`setExceptionTranslator`, and you must use this `JdbcTemplate` for all of the data access +processing where this translator is needed. The following example shows how you can use this custom +translator: + +Java + +``` +private JdbcTemplate jdbcTemplate; + +public void setDataSource(DataSource dataSource) { + + // create a JdbcTemplate and set data source + this.jdbcTemplate = new JdbcTemplate(); + this.jdbcTemplate.setDataSource(dataSource); + + // create a custom translator and set the DataSource for the default translation lookup + CustomSQLErrorCodesTranslator tr = new CustomSQLErrorCodesTranslator(); + tr.setDataSource(dataSource); + this.jdbcTemplate.setExceptionTranslator(tr); + +} + +public void updateShippingCharge(long orderId, long pct) { + // use the prepared JdbcTemplate for this update + this.jdbcTemplate.update("update orders" + + " set shipping_charge = shipping_charge * ? / 100" + + " where id = ?", pct, orderId); +} +``` + +Kotlin + +``` +// create a JdbcTemplate and set data source +private val jdbcTemplate = JdbcTemplate(dataSource).apply { + // create a custom translator and set the DataSource for the default translation lookup + exceptionTranslator = CustomSQLErrorCodesTranslator().apply { + this.dataSource = dataSource + } +} + +fun updateShippingCharge(orderId: Long, pct: Long) { + // use the prepared JdbcTemplate for this update + this.jdbcTemplate!!.update("update orders" + + " set shipping_charge = shipping_charge * ? / 100" + + " where id = ?", pct, orderId) +} +``` + +The custom translator is passed a data source in order to look up the error codes in`sql-error-codes.xml`. + +#### 3.3.4. Running Statements + +Running an SQL statement requires very little code. You need a `DataSource` and a`JdbcTemplate`, including the convenience methods that are provided with the`JdbcTemplate`. The following example shows what you need to include for a minimal but +fully functional class that creates a new table: + +Java + +``` +import javax.sql.DataSource; +import org.springframework.jdbc.core.JdbcTemplate; + +public class ExecuteAStatement { + + private JdbcTemplate jdbcTemplate; + + public void setDataSource(DataSource dataSource) { + this.jdbcTemplate = new JdbcTemplate(dataSource); + } + + public void doExecute() { + this.jdbcTemplate.execute("create table mytable (id integer, name varchar(100))"); + } +} +``` + +Kotlin + +``` +import javax.sql.DataSource +import org.springframework.jdbc.core.JdbcTemplate + +class ExecuteAStatement(dataSource: DataSource) { + + private val jdbcTemplate = JdbcTemplate(dataSource) + + fun doExecute() { + jdbcTemplate.execute("create table mytable (id integer, name varchar(100))") + } +} +``` + +#### 3.3.5. Running Queries + +Some query methods return a single value. To retrieve a count or a specific value from +one row, use `queryForObject(..)`. The latter converts the returned JDBC `Type` to the +Java class that is passed in as an argument. If the type conversion is invalid, an`InvalidDataAccessApiUsageException` is thrown. The following example contains two +query methods, one for an `int` and one that queries for a `String`: + +Java + +``` +import javax.sql.DataSource; +import org.springframework.jdbc.core.JdbcTemplate; + +public class RunAQuery { + + private JdbcTemplate jdbcTemplate; + + public void setDataSource(DataSource dataSource) { + this.jdbcTemplate = new JdbcTemplate(dataSource); + } + + public int getCount() { + return this.jdbcTemplate.queryForObject("select count(*) from mytable", Integer.class); + } + + public String getName() { + return this.jdbcTemplate.queryForObject("select name from mytable", String.class); + } +} +``` + +Kotlin + +``` +import javax.sql.DataSource +import org.springframework.jdbc.core.JdbcTemplate + +class RunAQuery(dataSource: DataSource) { + + private val jdbcTemplate = JdbcTemplate(dataSource) + + val count: Int + get() = jdbcTemplate.queryForObject("select count(*) from mytable")!! + + val name: String? + get() = jdbcTemplate.queryForObject("select name from mytable") +} +``` + +In addition to the single result query methods, several methods return a list with an +entry for each row that the query returned. The most generic method is `queryForList(..)`, +which returns a `List` where each element is a `Map` containing one entry for each column, +using the column name as the key. If you add a method to the preceding example to retrieve a +list of all the rows, it might be as follows: + +Java + +``` +private JdbcTemplate jdbcTemplate; + +public void setDataSource(DataSource dataSource) { + this.jdbcTemplate = new JdbcTemplate(dataSource); +} + +public List> getList() { + return this.jdbcTemplate.queryForList("select * from mytable"); +} +``` + +Kotlin + +``` +private val jdbcTemplate = JdbcTemplate(dataSource) + +fun getList(): List> { + return jdbcTemplate.queryForList("select * from mytable") +} +``` + +The returned list would resemble the following: + +``` +[{name=Bob, id=1}, {name=Mary, id=2}] +``` + +#### 3.3.6. Updating the Database + +The following example updates a column for a certain primary key: + +Java + +``` +import javax.sql.DataSource; +import org.springframework.jdbc.core.JdbcTemplate; + +public class ExecuteAnUpdate { + + private JdbcTemplate jdbcTemplate; + + public void setDataSource(DataSource dataSource) { + this.jdbcTemplate = new JdbcTemplate(dataSource); + } + + public void setName(int id, String name) { + this.jdbcTemplate.update("update mytable set name = ? where id = ?", name, id); + } +} +``` + +Kotlin + +``` +import javax.sql.DataSource +import org.springframework.jdbc.core.JdbcTemplate + +class ExecuteAnUpdate(dataSource: DataSource) { + + private val jdbcTemplate = JdbcTemplate(dataSource) + + fun setName(id: Int, name: String) { + jdbcTemplate.update("update mytable set name = ? where id = ?", name, id) + } +} +``` + +In the preceding example, +an SQL statement has placeholders for row parameters. You can pass the parameter values +in as varargs or, alternatively, as an array of objects. Thus, you should explicitly wrap primitives +in the primitive wrapper classes, or you should use auto-boxing. + +#### 3.3.7. Retrieving Auto-generated Keys + +An `update()` convenience method supports the retrieval of primary keys generated by the +database. This support is part of the JDBC 3.0 standard. See Chapter 13.6 of the +specification for details. The method takes a `PreparedStatementCreator` as its first +argument, and this is the way the required insert statement is specified. The other +argument is a `KeyHolder`, which contains the generated key on successful return from the +update. There is no standard single way to create an appropriate `PreparedStatement`(which explains why the method signature is the way it is). The following example works +on Oracle but may not work on other platforms: + +Java + +``` +final String INSERT_SQL = "insert into my_test (name) values(?)"; +final String name = "Rob"; + +KeyHolder keyHolder = new GeneratedKeyHolder(); +jdbcTemplate.update(connection -> { + PreparedStatement ps = connection.prepareStatement(INSERT_SQL, new String[] { "id" }); + ps.setString(1, name); + return ps; +}, keyHolder); + +// keyHolder.getKey() now contains the generated key +``` + +Kotlin + +``` +val INSERT_SQL = "insert into my_test (name) values(?)" +val name = "Rob" + +val keyHolder = GeneratedKeyHolder() +jdbcTemplate.update({ + it.prepareStatement (INSERT_SQL, arrayOf("id")).apply { setString(1, name) } +}, keyHolder) + +// keyHolder.getKey() now contains the generated key +``` + +### 3.4. Controlling Database Connections + +This section covers: + +* [Using `DataSource`](#jdbc-datasource) + +* [Using `DataSourceUtils`](#jdbc-DataSourceUtils) + +* [Implementing `SmartDataSource`](#jdbc-SmartDataSource) + +* [Extending `AbstractDataSource`](#jdbc-AbstractDataSource) + +* [Using `SingleConnectionDataSource`](#jdbc-SingleConnectionDataSource) + +* [Using `DriverManagerDataSource`](#jdbc-DriverManagerDataSource) + +* [Using `TransactionAwareDataSourceProxy`](#jdbc-TransactionAwareDataSourceProxy) + +* [Using `DataSourceTransactionManager`](#jdbc-DataSourceTransactionManager) + +#### 3.4.1. Using `DataSource` + +Spring obtains a connection to the database through a `DataSource`. A `DataSource` is +part of the JDBC specification and is a generalized connection factory. It lets a +container or a framework hide connection pooling and transaction management issues +from the application code. As a developer, you need not know details about how to +connect to the database. That is the responsibility of the administrator who sets up +the datasource. You most likely fill both roles as you develop and test code, but you +do not necessarily have to know how the production data source is configured. + +When you use Spring’s JDBC layer, you can obtain a data source from JNDI, or you can +configure your own with a connection pool implementation provided by a third party. +Traditional choices are Apache Commons DBCP and C3P0 with bean-style `DataSource` classes; +for a modern JDBC connection pool, consider HikariCP with its builder-style API instead. + +| |You should use the `DriverManagerDataSource` and `SimpleDriverDataSource` classes
(as included in the Spring distribution) only for testing purposes! Those variants do not
provide pooling and perform poorly when multiple requests for a connection are made.| +|---|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +The following section uses Spring’s `DriverManagerDataSource` implementation. +Several other `DataSource` variants are covered later. + +To configure a `DriverManagerDataSource`: + +1. Obtain a connection with `DriverManagerDataSource` as you typically obtain a JDBC + connection. + +2. Specify the fully qualified classname of the JDBC driver so that the `DriverManager`can load the driver class. + +3. Provide a URL that varies between JDBC drivers. (See the documentation for your driver + for the correct value.) + +4. Provide a username and a password to connect to the database. + +The following example shows how to configure a `DriverManagerDataSource` in Java: + +Java + +``` +DriverManagerDataSource dataSource = new DriverManagerDataSource(); +dataSource.setDriverClassName("org.hsqldb.jdbcDriver"); +dataSource.setUrl("jdbc:hsqldb:hsql://localhost:"); +dataSource.setUsername("sa"); +dataSource.setPassword(""); +``` + +Kotlin + +``` +val dataSource = DriverManagerDataSource().apply { + setDriverClassName("org.hsqldb.jdbcDriver") + url = "jdbc:hsqldb:hsql://localhost:" + username = "sa" + password = "" +} +``` + +The following example shows the corresponding XML configuration: + +``` + + + + + + + + +``` + +The next two examples show the basic connectivity and configuration for DBCP and C3P0. +To learn about more options that help control the pooling features, see the product +documentation for the respective connection pooling implementations. + +The following example shows DBCP configuration: + +``` + + + + + + + + +``` + +The following example shows C3P0 configuration: + +``` + + + + + + + + +``` + +#### 3.4.2. Using `DataSourceUtils` + +The `DataSourceUtils` class is a convenient and powerful helper class that provides`static` methods to obtain connections from JNDI and close connections if necessary. It +supports thread-bound connections with, for example, `DataSourceTransactionManager`. + +#### 3.4.3. Implementing `SmartDataSource` + +The `SmartDataSource` interface should be implemented by classes that can provide a +connection to a relational database. It extends the `DataSource` interface to let +classes that use it query whether the connection should be closed after a given +operation. This usage is efficient when you know that you need to reuse a connection. + +#### 3.4.4. Extending `AbstractDataSource` + +`AbstractDataSource` is an `abstract` base class for Spring’s `DataSource`implementations. It implements code that is common to all `DataSource` implementations. +You should extend the `AbstractDataSource` class if you write your own `DataSource`implementation. + +#### 3.4.5. Using `SingleConnectionDataSource` + +The `SingleConnectionDataSource` class is an implementation of the `SmartDataSource`interface that wraps a single `Connection` that is not closed after each use. +This is not multi-threading capable. + +If any client code calls `close` on the assumption of a pooled connection (as when using +persistence tools), you should set the `suppressClose` property to `true`. This setting +returns a close-suppressing proxy that wraps the physical connection. Note that you can +no longer cast this to a native Oracle `Connection` or a similar object. + +`SingleConnectionDataSource` is primarily a test class. It typically enables easy testing +of code outside an application server, in conjunction with a simple JNDI environment. +In contrast to `DriverManagerDataSource`, it reuses the same connection all the time, +avoiding excessive creation of physical connections. + +#### 3.4.6. Using `DriverManagerDataSource` + +The `DriverManagerDataSource` class is an implementation of the standard `DataSource`interface that configures a plain JDBC driver through bean properties and returns a new`Connection` every time. + +This implementation is useful for test and stand-alone environments outside of a Java EE +container, either as a `DataSource` bean in a Spring IoC container or in conjunction +with a simple JNDI environment. Pool-assuming `Connection.close()` calls +close the connection, so any `DataSource`-aware persistence code should work. However, +using JavaBean-style connection pools (such as `commons-dbcp`) is so easy, even in a test +environment, that it is almost always preferable to use such a connection pool over`DriverManagerDataSource`. + +#### 3.4.7. Using `TransactionAwareDataSourceProxy` + +`TransactionAwareDataSourceProxy` is a proxy for a target `DataSource`. The proxy wraps that +target `DataSource` to add awareness of Spring-managed transactions. In this respect, it +is similar to a transactional JNDI `DataSource`, as provided by a Java EE server. + +| |It is rarely desirable to use this class, except when already existing code must be
called and passed a standard JDBC `DataSource` interface implementation. In this case,
you can still have this code be usable and, at the same time, have this code
participating in Spring managed transactions. It is generally preferable to write your
own new code by using the higher level abstractions for resource management, such as`JdbcTemplate` or `DataSourceUtils`.| +|---|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +See the [`TransactionAwareDataSourceProxy`](https://docs.spring.io/spring-framework/docs/5.3.16/javadoc-api/org/springframework/jdbc/datasource/TransactionAwareDataSourceProxy.html)javadoc for more details. + +#### 3.4.8. Using `DataSourceTransactionManager` + +The `DataSourceTransactionManager` class is a `PlatformTransactionManager`implementation for single JDBC datasources. It binds a JDBC connection from the +specified data source to the currently executing thread, potentially allowing for one +thread connection per data source. + +Application code is required to retrieve the JDBC connection through`DataSourceUtils.getConnection(DataSource)` instead of Java EE’s standard`DataSource.getConnection`. It throws unchecked `org.springframework.dao` exceptions +instead of checked `SQLExceptions`. All framework classes (such as `JdbcTemplate`) use this +strategy implicitly. If not used with this transaction manager, the lookup strategy +behaves exactly like the common one. Thus, it can be used in any case. + +The `DataSourceTransactionManager` class supports custom isolation levels and timeouts +that get applied as appropriate JDBC statement query timeouts. To support the latter, +application code must either use `JdbcTemplate` or call the`DataSourceUtils.applyTransactionTimeout(..)` method for each created statement. + +You can use this implementation instead of `JtaTransactionManager` in the single-resource +case, as it does not require the container to support JTA. Switching between +both is just a matter of configuration, provided you stick to the required connection lookup +pattern. JTA does not support custom isolation levels. + +### 3.5. JDBC Batch Operations + +Most JDBC drivers provide improved performance if you batch multiple calls to the same +prepared statement. By grouping updates into batches, you limit the number of round trips +to the database. + +#### 3.5.1. Basic Batch Operations with `JdbcTemplate` + +You accomplish `JdbcTemplate` batch processing by implementing two methods of a special +interface, `BatchPreparedStatementSetter`, and passing that implementation in as the second parameter +in your `batchUpdate` method call. You can use the `getBatchSize` method to provide the size of +the current batch. You can use the `setValues` method to set the values for the parameters of +the prepared statement. This method is called the number of times that you +specified in the `getBatchSize` call. The following example updates the `t_actor` table +based on entries in a list, and the entire list is used as the batch: + +Java + +``` +public class JdbcActorDao implements ActorDao { + + private JdbcTemplate jdbcTemplate; + + public void setDataSource(DataSource dataSource) { + this.jdbcTemplate = new JdbcTemplate(dataSource); + } + + public int[] batchUpdate(final List actors) { + return this.jdbcTemplate.batchUpdate( + "update t_actor set first_name = ?, last_name = ? where id = ?", + new BatchPreparedStatementSetter() { + public void setValues(PreparedStatement ps, int i) throws SQLException { + Actor actor = actors.get(i); + ps.setString(1, actor.getFirstName()); + ps.setString(2, actor.getLastName()); + ps.setLong(3, actor.getId().longValue()); + } + public int getBatchSize() { + return actors.size(); + } + }); + } + + // ... additional methods +} +``` + +Kotlin + +``` +class JdbcActorDao(dataSource: DataSource) : ActorDao { + + private val jdbcTemplate = JdbcTemplate(dataSource) + + fun batchUpdate(actors: List): IntArray { + return jdbcTemplate.batchUpdate( + "update t_actor set first_name = ?, last_name = ? where id = ?", + object: BatchPreparedStatementSetter { + override fun setValues(ps: PreparedStatement, i: Int) { + ps.setString(1, actors[i].firstName) + ps.setString(2, actors[i].lastName) + ps.setLong(3, actors[i].id) + } + + override fun getBatchSize() = actors.size + }) + } + + // ... additional methods +} +``` + +If you process a stream of updates or reading from a file, you might have a +preferred batch size, but the last batch might not have that number of entries. In this +case, you can use the `InterruptibleBatchPreparedStatementSetter` interface, which lets +you interrupt a batch once the input source is exhausted. The `isBatchExhausted` method +lets you signal the end of the batch. + +#### 3.5.2. Batch Operations with a List of Objects + +Both the `JdbcTemplate` and the `NamedParameterJdbcTemplate` provides an alternate way +of providing the batch update. Instead of implementing a special batch interface, you +provide all parameter values in the call as a list. The framework loops over these +values and uses an internal prepared statement setter. The API varies, depending on +whether you use named parameters. For the named parameters, you provide an array of`SqlParameterSource`, one entry for each member of the batch. You can use the`SqlParameterSourceUtils.createBatch` convenience methods to create this array, passing +in an array of bean-style objects (with getter methods corresponding to parameters),`String`-keyed `Map` instances (containing the corresponding parameters as values), or a mix of both. + +The following example shows a batch update using named parameters: + +Java + +``` +public class JdbcActorDao implements ActorDao { + + private NamedParameterTemplate namedParameterJdbcTemplate; + + public void setDataSource(DataSource dataSource) { + this.namedParameterJdbcTemplate = new NamedParameterJdbcTemplate(dataSource); + } + + public int[] batchUpdate(List actors) { + return this.namedParameterJdbcTemplate.batchUpdate( + "update t_actor set first_name = :firstName, last_name = :lastName where id = :id", + SqlParameterSourceUtils.createBatch(actors)); + } + + // ... additional methods +} +``` + +Kotlin + +``` +class JdbcActorDao(dataSource: DataSource) : ActorDao { + + private val namedParameterJdbcTemplate = NamedParameterJdbcTemplate(dataSource) + + fun batchUpdate(actors: List): IntArray { + return this.namedParameterJdbcTemplate.batchUpdate( + "update t_actor set first_name = :firstName, last_name = :lastName where id = :id", + SqlParameterSourceUtils.createBatch(actors)); + } + + // ... additional methods +} +``` + +For an SQL statement that uses the classic `?` placeholders, you pass in a list +containing an object array with the update values. This object array must have one entry +for each placeholder in the SQL statement, and they must be in the same order as they are +defined in the SQL statement. + +The following example is the same as the preceding example, except that it uses classic +JDBC `?` placeholders: + +Java + +``` +public class JdbcActorDao implements ActorDao { + + private JdbcTemplate jdbcTemplate; + + public void setDataSource(DataSource dataSource) { + this.jdbcTemplate = new JdbcTemplate(dataSource); + } + + public int[] batchUpdate(final List actors) { + List batch = new ArrayList(); + for (Actor actor : actors) { + Object[] values = new Object[] { + actor.getFirstName(), actor.getLastName(), actor.getId()}; + batch.add(values); + } + return this.jdbcTemplate.batchUpdate( + "update t_actor set first_name = ?, last_name = ? where id = ?", + batch); + } + + // ... additional methods +} +``` + +Kotlin + +``` +class JdbcActorDao(dataSource: DataSource) : ActorDao { + + private val jdbcTemplate = JdbcTemplate(dataSource) + + fun batchUpdate(actors: List): IntArray { + val batch = mutableListOf>() + for (actor in actors) { + batch.add(arrayOf(actor.firstName, actor.lastName, actor.id)) + } + return jdbcTemplate.batchUpdate( + "update t_actor set first_name = ?, last_name = ? where id = ?", batch) + } + + // ... additional methods +} +``` + +All of the batch update methods that we described earlier return an `int` array +containing the number of affected rows for each batch entry. This count is reported by +the JDBC driver. If the count is not available, the JDBC driver returns a value of `-2`. + +| |In such a scenario, with automatic setting of values on an underlying `PreparedStatement`,
the corresponding JDBC type for each value needs to be derived from the given Java type.
While this usually works well, there is a potential for issues (for example, with Map-contained`null` values). Spring, by default, calls `ParameterMetaData.getParameterType` in such a
case, which can be expensive with your JDBC driver. You should use a recent driver
version and consider setting the `spring.jdbc.getParameterType.ignore` property to `true`(as a JVM system property or via the[`SpringProperties`](appendix.html#appendix-spring-properties) mechanism) if you encounter
a performance issue (as reported on Oracle 12c, JBoss, and PostgreSQL).

Alternatively, you might consider specifying the corresponding JDBC types explicitly,
either through a `BatchPreparedStatementSetter` (as shown earlier), through an explicit type
array given to a `List` based call, through `registerSqlType` calls on a
custom `MapSqlParameterSource` instance, or through a `BeanPropertySqlParameterSource`that derives the SQL type from the Java-declared property type even for a null value.| +|---|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +#### 3.5.3. Batch Operations with Multiple Batches + +The preceding example of a batch update deals with batches that are so large that you want to +break them up into several smaller batches. You can do this with the methods +mentioned earlier by making multiple calls to the `batchUpdate` method, but there is now a +more convenient method. This method takes, in addition to the SQL statement, a`Collection` of objects that contain the parameters, the number of updates to make for each +batch, and a `ParameterizedPreparedStatementSetter` to set the values for the parameters +of the prepared statement. The framework loops over the provided values and breaks the +update calls into batches of the size specified. + +The following example shows a batch update that uses a batch size of 100: + +Java + +``` +public class JdbcActorDao implements ActorDao { + + private JdbcTemplate jdbcTemplate; + + public void setDataSource(DataSource dataSource) { + this.jdbcTemplate = new JdbcTemplate(dataSource); + } + + public int[][] batchUpdate(final Collection actors) { + int[][] updateCounts = jdbcTemplate.batchUpdate( + "update t_actor set first_name = ?, last_name = ? where id = ?", + actors, + 100, + (PreparedStatement ps, Actor actor) -> { + ps.setString(1, actor.getFirstName()); + ps.setString(2, actor.getLastName()); + ps.setLong(3, actor.getId().longValue()); + }); + return updateCounts; + } + + // ... additional methods +} +``` + +Kotlin + +``` +class JdbcActorDao(dataSource: DataSource) : ActorDao { + + private val jdbcTemplate = JdbcTemplate(dataSource) + + fun batchUpdate(actors: List): Array { + return jdbcTemplate.batchUpdate( + "update t_actor set first_name = ?, last_name = ? where id = ?", + actors, 100) { ps, argument -> + ps.setString(1, argument.firstName) + ps.setString(2, argument.lastName) + ps.setLong(3, argument.id) + } + } + + // ... additional methods +} +``` + +The batch update method for this call returns an array of `int` arrays that contains an +array entry for each batch with an array of the number of affected rows for each update. +The top-level array’s length indicates the number of batches run, and the second level +array’s length indicates the number of updates in that batch. The number of updates in +each batch should be the batch size provided for all batches (except that the last one +that might be less), depending on the total number of update objects provided. The update +count for each update statement is the one reported by the JDBC driver. If the count is +not available, the JDBC driver returns a value of `-2`. + +### 3.6. Simplifying JDBC Operations with the `SimpleJdbc` Classes + +The `SimpleJdbcInsert` and `SimpleJdbcCall` classes provide a simplified configuration +by taking advantage of database metadata that can be retrieved through the JDBC driver. +This means that you have less to configure up front, although you can override or turn off +the metadata processing if you prefer to provide all the details in your code. + +#### 3.6.1. Inserting Data by Using `SimpleJdbcInsert` + +We start by looking at the `SimpleJdbcInsert` class with the minimal amount of +configuration options. You should instantiate the `SimpleJdbcInsert` in the data access +layer’s initialization method. For this example, the initializing method is the`setDataSource` method. You do not need to subclass the `SimpleJdbcInsert` class. Instead, +you can create a new instance and set the table name by using the `withTableName` method. +Configuration methods for this class follow the `fluid` style that returns the instance +of the `SimpleJdbcInsert`, which lets you chain all configuration methods. The following +example uses only one configuration method (we show examples of multiple methods later): + +Java + +``` +public class JdbcActorDao implements ActorDao { + + private SimpleJdbcInsert insertActor; + + public void setDataSource(DataSource dataSource) { + this.insertActor = new SimpleJdbcInsert(dataSource).withTableName("t_actor"); + } + + public void add(Actor actor) { + Map parameters = new HashMap(3); + parameters.put("id", actor.getId()); + parameters.put("first_name", actor.getFirstName()); + parameters.put("last_name", actor.getLastName()); + insertActor.execute(parameters); + } + + // ... additional methods +} +``` + +Kotlin + +``` +class JdbcActorDao(dataSource: DataSource) : ActorDao { + + private val insertActor = SimpleJdbcInsert(dataSource).withTableName("t_actor") + + fun add(actor: Actor) { + val parameters = mutableMapOf() + parameters["id"] = actor.id + parameters["first_name"] = actor.firstName + parameters["last_name"] = actor.lastName + insertActor.execute(parameters) + } + + // ... additional methods +} +``` + +The `execute` method used here takes a plain `java.util.Map` as its only parameter. The +important thing to note here is that the keys used for the `Map` must match the column +names of the table, as defined in the database. This is because we read the metadata +to construct the actual insert statement. + +#### 3.6.2. Retrieving Auto-generated Keys by Using `SimpleJdbcInsert` + +The next example uses the same insert as the preceding example, but, instead of passing in the `id`, it +retrieves the auto-generated key and sets it on the new `Actor` object. When it creates +the `SimpleJdbcInsert`, in addition to specifying the table name, it specifies the name +of the generated key column with the `usingGeneratedKeyColumns` method. The following +listing shows how it works: + +Java + +``` +public class JdbcActorDao implements ActorDao { + + private SimpleJdbcInsert insertActor; + + public void setDataSource(DataSource dataSource) { + this.insertActor = new SimpleJdbcInsert(dataSource) + .withTableName("t_actor") + .usingGeneratedKeyColumns("id"); + } + + public void add(Actor actor) { + Map parameters = new HashMap(2); + parameters.put("first_name", actor.getFirstName()); + parameters.put("last_name", actor.getLastName()); + Number newId = insertActor.executeAndReturnKey(parameters); + actor.setId(newId.longValue()); + } + + // ... additional methods +} +``` + +Kotlin + +``` +class JdbcActorDao(dataSource: DataSource) : ActorDao { + + private val insertActor = SimpleJdbcInsert(dataSource) + .withTableName("t_actor").usingGeneratedKeyColumns("id") + + fun add(actor: Actor): Actor { + val parameters = mapOf( + "first_name" to actor.firstName, + "last_name" to actor.lastName) + val newId = insertActor.executeAndReturnKey(parameters); + return actor.copy(id = newId.toLong()) + } + + // ... additional methods +} +``` + +The main difference when you run the insert by using this second approach is that you do not +add the `id` to the `Map`, and you call the `executeAndReturnKey` method. This returns a`java.lang.Number` object with which you can create an instance of the numerical type that +is used in your domain class. You cannot rely on all databases to return a specific Java +class here. `java.lang.Number` is the base class that you can rely on. If you have +multiple auto-generated columns or the generated values are non-numeric, you can +use a `KeyHolder` that is returned from the `executeAndReturnKeyHolder` method. + +#### 3.6.3. Specifying Columns for a `SimpleJdbcInsert` + +You can limit the columns for an insert by specifying a list of column names with the`usingColumns` method, as the following example shows: + +Java + +``` +public class JdbcActorDao implements ActorDao { + + private SimpleJdbcInsert insertActor; + + public void setDataSource(DataSource dataSource) { + this.insertActor = new SimpleJdbcInsert(dataSource) + .withTableName("t_actor") + .usingColumns("first_name", "last_name") + .usingGeneratedKeyColumns("id"); + } + + public void add(Actor actor) { + Map parameters = new HashMap(2); + parameters.put("first_name", actor.getFirstName()); + parameters.put("last_name", actor.getLastName()); + Number newId = insertActor.executeAndReturnKey(parameters); + actor.setId(newId.longValue()); + } + + // ... additional methods +} +``` + +Kotlin + +``` +class JdbcActorDao(dataSource: DataSource) : ActorDao { + + private val insertActor = SimpleJdbcInsert(dataSource) + .withTableName("t_actor") + .usingColumns("first_name", "last_name") + .usingGeneratedKeyColumns("id") + + fun add(actor: Actor): Actor { + val parameters = mapOf( + "first_name" to actor.firstName, + "last_name" to actor.lastName) + val newId = insertActor.executeAndReturnKey(parameters); + return actor.copy(id = newId.toLong()) + } + + // ... additional methods +} +``` + +The execution of the insert is the same as if you had relied on the metadata to determine +which columns to use. + +#### 3.6.4. Using `SqlParameterSource` to Provide Parameter Values + +Using a `Map` to provide parameter values works fine, but it is not the most convenient +class to use. Spring provides a couple of implementations of the `SqlParameterSource`interface that you can use instead. The first one is `BeanPropertySqlParameterSource`, +which is a very convenient class if you have a JavaBean-compliant class that contains +your values. It uses the corresponding getter method to extract the parameter +values. The following example shows how to use `BeanPropertySqlParameterSource`: + +Java + +``` +public class JdbcActorDao implements ActorDao { + + private SimpleJdbcInsert insertActor; + + public void setDataSource(DataSource dataSource) { + this.insertActor = new SimpleJdbcInsert(dataSource) + .withTableName("t_actor") + .usingGeneratedKeyColumns("id"); + } + + public void add(Actor actor) { + SqlParameterSource parameters = new BeanPropertySqlParameterSource(actor); + Number newId = insertActor.executeAndReturnKey(parameters); + actor.setId(newId.longValue()); + } + + // ... additional methods +} +``` + +Kotlin + +``` +class JdbcActorDao(dataSource: DataSource) : ActorDao { + + private val insertActor = SimpleJdbcInsert(dataSource) + .withTableName("t_actor") + .usingGeneratedKeyColumns("id") + + fun add(actor: Actor): Actor { + val parameters = BeanPropertySqlParameterSource(actor) + val newId = insertActor.executeAndReturnKey(parameters) + return actor.copy(id = newId.toLong()) + } + + // ... additional methods +} +``` + +Another option is the `MapSqlParameterSource` that resembles a `Map` but provides a more +convenient `addValue` method that can be chained. The following example shows how to use it: + +Java + +``` +public class JdbcActorDao implements ActorDao { + + private SimpleJdbcInsert insertActor; + + public void setDataSource(DataSource dataSource) { + this.insertActor = new SimpleJdbcInsert(dataSource) + .withTableName("t_actor") + .usingGeneratedKeyColumns("id"); + } + + public void add(Actor actor) { + SqlParameterSource parameters = new MapSqlParameterSource() + .addValue("first_name", actor.getFirstName()) + .addValue("last_name", actor.getLastName()); + Number newId = insertActor.executeAndReturnKey(parameters); + actor.setId(newId.longValue()); + } + + // ... additional methods +} +``` + +Kotlin + +``` +class JdbcActorDao(dataSource: DataSource) : ActorDao { + + private val insertActor = SimpleJdbcInsert(dataSource) + .withTableName("t_actor") + .usingGeneratedKeyColumns("id") + + fun add(actor: Actor): Actor { + val parameters = MapSqlParameterSource() + .addValue("first_name", actor.firstName) + .addValue("last_name", actor.lastName) + val newId = insertActor.executeAndReturnKey(parameters) + return actor.copy(id = newId.toLong()) + } + + // ... additional methods +} +``` + +As you can see, the configuration is the same. Only the executing code has to change to +use these alternative input classes. + +#### 3.6.5. Calling a Stored Procedure with `SimpleJdbcCall` + +The `SimpleJdbcCall` class uses metadata in the database to look up names of `in`and `out` parameters so that you do not have to explicitly declare them. You can +declare parameters if you prefer to do that or if you have parameters (such as `ARRAY`or `STRUCT`) that do not have an automatic mapping to a Java class. The first example +shows a simple procedure that returns only scalar values in `VARCHAR` and `DATE` format +from a MySQL database. The example procedure reads a specified actor entry and returns`first_name`, `last_name`, and `birth_date` columns in the form of `out` parameters. +The following listing shows the first example: + +``` +CREATE PROCEDURE read_actor ( + IN in_id INTEGER, + OUT out_first_name VARCHAR(100), + OUT out_last_name VARCHAR(100), + OUT out_birth_date DATE) +BEGIN + SELECT first_name, last_name, birth_date + INTO out_first_name, out_last_name, out_birth_date + FROM t_actor where id = in_id; +END; +``` + +The `in_id` parameter contains the `id` of the actor that you are looking up. The `out`parameters return the data read from the table. + +You can declare `SimpleJdbcCall` in a manner similar to declaring `SimpleJdbcInsert`. You +should instantiate and configure the class in the initialization method of your data-access +layer. Compared to the `StoredProcedure` class, you need not create a subclass +and you need not to declare parameters that can be looked up in the database metadata. +The following example of a `SimpleJdbcCall` configuration uses the preceding stored +procedure (the only configuration option, in addition to the `DataSource`, is the name +of the stored procedure): + +Java + +``` +public class JdbcActorDao implements ActorDao { + + private SimpleJdbcCall procReadActor; + + public void setDataSource(DataSource dataSource) { + this.procReadActor = new SimpleJdbcCall(dataSource) + .withProcedureName("read_actor"); + } + + public Actor readActor(Long id) { + SqlParameterSource in = new MapSqlParameterSource() + .addValue("in_id", id); + Map out = procReadActor.execute(in); + Actor actor = new Actor(); + actor.setId(id); + actor.setFirstName((String) out.get("out_first_name")); + actor.setLastName((String) out.get("out_last_name")); + actor.setBirthDate((Date) out.get("out_birth_date")); + return actor; + } + + // ... additional methods +} +``` + +Kotlin + +``` +class JdbcActorDao(dataSource: DataSource) : ActorDao { + + private val procReadActor = SimpleJdbcCall(dataSource) + .withProcedureName("read_actor") + + fun readActor(id: Long): Actor { + val source = MapSqlParameterSource().addValue("in_id", id) + val output = procReadActor.execute(source) + return Actor( + id, + output["out_first_name"] as String, + output["out_last_name"] as String, + output["out_birth_date"] as Date) + } + + // ... additional methods +} +``` + +The code you write for the execution of the call involves creating an `SqlParameterSource`containing the IN parameter. You must match the name provided for the input value +with that of the parameter name declared in the stored procedure. The case does not have +to match because you use metadata to determine how database objects should be referred to +in a stored procedure. What is specified in the source for the stored procedure is not +necessarily the way it is stored in the database. Some databases transform names to all +upper case, while others use lower case or use the case as specified. + +The `execute` method takes the IN parameters and returns a `Map` that contains any `out`parameters keyed by the name, as specified in the stored procedure. In this case, they are`out_first_name`, `out_last_name`, and `out_birth_date`. + +The last part of the `execute` method creates an `Actor` instance to use to return the +data retrieved. Again, it is important to use the names of the `out` parameters as they +are declared in the stored procedure. Also, the case in the names of the `out`parameters stored in the results map matches that of the `out` parameter names in the +database, which could vary between databases. To make your code more portable, you should +do a case-insensitive lookup or instruct Spring to use a `LinkedCaseInsensitiveMap`. +To do the latter, you can create your own `JdbcTemplate` and set the `setResultsMapCaseInsensitive`property to `true`. Then you can pass this customized `JdbcTemplate` instance into +the constructor of your `SimpleJdbcCall`. The following example shows this configuration: + +Java + +``` +public class JdbcActorDao implements ActorDao { + + private SimpleJdbcCall procReadActor; + + public void setDataSource(DataSource dataSource) { + JdbcTemplate jdbcTemplate = new JdbcTemplate(dataSource); + jdbcTemplate.setResultsMapCaseInsensitive(true); + this.procReadActor = new SimpleJdbcCall(jdbcTemplate) + .withProcedureName("read_actor"); + } + + // ... additional methods +} +``` + +Kotlin + +``` +class JdbcActorDao(dataSource: DataSource) : ActorDao { + + private var procReadActor = SimpleJdbcCall(JdbcTemplate(dataSource).apply { + isResultsMapCaseInsensitive = true + }).withProcedureName("read_actor") + + // ... additional methods +} +``` + +By taking this action, you avoid conflicts in the case used for the names of your +returned `out` parameters. + +#### 3.6.6. Explicitly Declaring Parameters to Use for a `SimpleJdbcCall` + +Earlier in this chapter, we described how parameters are deduced from metadata, but you can declare them +explicitly if you wish. You can do so by creating and configuring `SimpleJdbcCall` with +the `declareParameters` method, which takes a variable number of `SqlParameter` objects +as input. See the [next section](#jdbc-params) for details on how to define an `SqlParameter`. + +| |Explicit declarations are necessary if the database you use is not a Spring-supported
database. Currently, Spring supports metadata lookup of stored procedure calls for the
following databases: Apache Derby, DB2, MySQL, Microsoft SQL Server, Oracle, and Sybase.
We also support metadata lookup of stored functions for MySQL, Microsoft SQL Server,
and Oracle.| +|---|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +You can opt to explicitly declare one, some, or all of the parameters. The parameter +metadata is still used where you do not explicitly declare parameters. To bypass all +processing of metadata lookups for potential parameters and use only the declared +parameters, you can call the method `withoutProcedureColumnMetaDataAccess` as part of the +declaration. Suppose that you have two or more different call signatures declared for a +database function. In this case, you call `useInParameterNames` to specify the list +of IN parameter names to include for a given signature. + +The following example shows a fully declared procedure call and uses the information from +the preceding example: + +Java + +``` +public class JdbcActorDao implements ActorDao { + + private SimpleJdbcCall procReadActor; + + public void setDataSource(DataSource dataSource) { + JdbcTemplate jdbcTemplate = new JdbcTemplate(dataSource); + jdbcTemplate.setResultsMapCaseInsensitive(true); + this.procReadActor = new SimpleJdbcCall(jdbcTemplate) + .withProcedureName("read_actor") + .withoutProcedureColumnMetaDataAccess() + .useInParameterNames("in_id") + .declareParameters( + new SqlParameter("in_id", Types.NUMERIC), + new SqlOutParameter("out_first_name", Types.VARCHAR), + new SqlOutParameter("out_last_name", Types.VARCHAR), + new SqlOutParameter("out_birth_date", Types.DATE) + ); + } + + // ... additional methods +} +``` + +Kotlin + +``` +class JdbcActorDao(dataSource: DataSource) : ActorDao { + + private val procReadActor = SimpleJdbcCall(JdbcTemplate(dataSource).apply { + isResultsMapCaseInsensitive = true + }).withProcedureName("read_actor") + .withoutProcedureColumnMetaDataAccess() + .useInParameterNames("in_id") + .declareParameters( + SqlParameter("in_id", Types.NUMERIC), + SqlOutParameter("out_first_name", Types.VARCHAR), + SqlOutParameter("out_last_name", Types.VARCHAR), + SqlOutParameter("out_birth_date", Types.DATE) + ) + + // ... additional methods +} +``` + +The execution and end results of the two examples are the same. The second example specifies all +details explicitly rather than relying on metadata. + +#### 3.6.7. How to Define `SqlParameters` + +To define a parameter for the `SimpleJdbc` classes and also for the RDBMS operations +classes (covered in [Modeling JDBC Operations as Java Objects](#jdbc-object)) you can use `SqlParameter` or one of its subclasses. +To do so, you typically specify the parameter name and SQL type in the constructor. The SQL type +is specified by using the `java.sql.Types` constants. Earlier in this chapter, we saw declarations +similar to the following: + +Java + +``` +new SqlParameter("in_id", Types.NUMERIC), +new SqlOutParameter("out_first_name", Types.VARCHAR), +``` + +Kotlin + +``` +SqlParameter("in_id", Types.NUMERIC), +SqlOutParameter("out_first_name", Types.VARCHAR), +``` + +The first line with the `SqlParameter` declares an IN parameter. You can use IN parameters +for both stored procedure calls and for queries by using the `SqlQuery` and its +subclasses (covered in [Understanding `SqlQuery`](#jdbc-SqlQuery)). + +The second line (with the `SqlOutParameter`) declares an `out` parameter to be used in a +stored procedure call. There is also an `SqlInOutParameter` for `InOut` parameters +(parameters that provide an IN value to the procedure and that also return a value). + +| |Only parameters declared as `SqlParameter` and `SqlInOutParameter` are used to
provide input values. This is different from the `StoredProcedure` class, which (for
backwards compatibility reasons) lets input values be provided for parameters
declared as `SqlOutParameter`.| +|---|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +For IN parameters, in addition to the name and the SQL type, you can specify a scale for +numeric data or a type name for custom database types. For `out` parameters, you can +provide a `RowMapper` to handle mapping of rows returned from a `REF` cursor. Another +option is to specify an `SqlReturnType` that provides an opportunity to define +customized handling of the return values. + +#### 3.6.8. Calling a Stored Function by Using `SimpleJdbcCall` + +You can call a stored function in almost the same way as you call a stored procedure, except +that you provide a function name rather than a procedure name. You use the`withFunctionName` method as part of the configuration to indicate that you want to make +a call to a function, and the corresponding string for a function call is generated. A +specialized call (`executeFunction`) is used to run the function, and it +returns the function return value as an object of a specified type, which means you do +not have to retrieve the return value from the results map. A similar convenience method +(named `executeObject`) is also available for stored procedures that have only one `out`parameter. The following example (for MySQL) is based on a stored function named `get_actor_name`that returns an actor’s full name: + +``` +CREATE FUNCTION get_actor_name (in_id INTEGER) +RETURNS VARCHAR(200) READS SQL DATA +BEGIN + DECLARE out_name VARCHAR(200); + SELECT concat(first_name, ' ', last_name) + INTO out_name + FROM t_actor where id = in_id; + RETURN out_name; +END; +``` + +To call this function, we again create a `SimpleJdbcCall` in the initialization method, +as the following example shows: + +Java + +``` +public class JdbcActorDao implements ActorDao { + + private SimpleJdbcCall funcGetActorName; + + public void setDataSource(DataSource dataSource) { + JdbcTemplate jdbcTemplate = new JdbcTemplate(dataSource); + jdbcTemplate.setResultsMapCaseInsensitive(true); + this.funcGetActorName = new SimpleJdbcCall(jdbcTemplate) + .withFunctionName("get_actor_name"); + } + + public String getActorName(Long id) { + SqlParameterSource in = new MapSqlParameterSource() + .addValue("in_id", id); + String name = funcGetActorName.executeFunction(String.class, in); + return name; + } + + // ... additional methods +} +``` + +Kotlin + +``` +class JdbcActorDao(dataSource: DataSource) : ActorDao { + + private val jdbcTemplate = JdbcTemplate(dataSource).apply { + isResultsMapCaseInsensitive = true + } + private val funcGetActorName = SimpleJdbcCall(jdbcTemplate) + .withFunctionName("get_actor_name") + + fun getActorName(id: Long): String { + val source = MapSqlParameterSource().addValue("in_id", id) + return funcGetActorName.executeFunction(String::class.java, source) + } + + // ... additional methods +} +``` + +The `executeFunction` method used returns a `String` that contains the return value from the +function call. + +#### 3.6.9. Returning a `ResultSet` or REF Cursor from a `SimpleJdbcCall` + +Calling a stored procedure or function that returns a result set is a bit tricky. Some +databases return result sets during the JDBC results processing, while others require an +explicitly registered `out` parameter of a specific type. Both approaches need +additional processing to loop over the result set and process the returned rows. With +the `SimpleJdbcCall`, you can use the `returningResultSet` method and declare a `RowMapper`implementation to be used for a specific parameter. If the result set is +returned during the results processing, there are no names defined, so the returned +results must match the order in which you declare the `RowMapper`implementations. The name specified is still used to store the processed list of results +in the results map that is returned from the `execute` statement. + +The next example (for MySQL) uses a stored procedure that takes no IN parameters and returns +all rows from the `t_actor` table: + +``` +CREATE PROCEDURE read_all_actors() +BEGIN + SELECT a.id, a.first_name, a.last_name, a.birth_date FROM t_actor a; +END; +``` + +To call this procedure, you can declare the `RowMapper`. Because the class to which you want +to map follows the JavaBean rules, you can use a `BeanPropertyRowMapper` that is created by +passing in the required class to map to in the `newInstance` method. +The following example shows how to do so: + +Java + +``` +public class JdbcActorDao implements ActorDao { + + private SimpleJdbcCall procReadAllActors; + + public void setDataSource(DataSource dataSource) { + JdbcTemplate jdbcTemplate = new JdbcTemplate(dataSource); + jdbcTemplate.setResultsMapCaseInsensitive(true); + this.procReadAllActors = new SimpleJdbcCall(jdbcTemplate) + .withProcedureName("read_all_actors") + .returningResultSet("actors", + BeanPropertyRowMapper.newInstance(Actor.class)); + } + + public List getActorsList() { + Map m = procReadAllActors.execute(new HashMap(0)); + return (List) m.get("actors"); + } + + // ... additional methods +} +``` + +Kotlin + +``` +class JdbcActorDao(dataSource: DataSource) : ActorDao { + + private val procReadAllActors = SimpleJdbcCall(JdbcTemplate(dataSource).apply { + isResultsMapCaseInsensitive = true + }).withProcedureName("read_all_actors") + .returningResultSet("actors", + BeanPropertyRowMapper.newInstance(Actor::class.java)) + + fun getActorsList(): List { + val m = procReadAllActors.execute(mapOf()) + return m["actors"] as List + } + + // ... additional methods +} +``` + +The `execute` call passes in an empty `Map`, because this call does not take any parameters. +The list of actors is then retrieved from the results map and returned to the caller. + +### 3.7. Modeling JDBC Operations as Java Objects + +The `org.springframework.jdbc.object` package contains classes that let you access +the database in a more object-oriented manner. As an example, you can run queries +and get the results back as a list that contains business objects with the relational +column data mapped to the properties of the business object. You can also run stored +procedures and run update, delete, and insert statements. + +| |Many Spring developers believe that the various RDBMS operation classes described below
(with the exception of the [`StoredProcedure`](#jdbc-StoredProcedure) class) can often
be replaced with straight `JdbcTemplate` calls. Often, it is simpler to write a DAO
method that calls a method on a `JdbcTemplate` directly (as opposed to
encapsulating a query as a full-blown class).

However, if you are getting measurable value from using the RDBMS operation classes,
you should continue to use these classes.| +|---|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +#### 3.7.1. Understanding `SqlQuery` + +`SqlQuery` is a reusable, thread-safe class that encapsulates an SQL query. Subclasses +must implement the `newRowMapper(..)` method to provide a `RowMapper` instance that can +create one object per row obtained from iterating over the `ResultSet` that is created +during the execution of the query. The `SqlQuery` class is rarely used directly, because +the `MappingSqlQuery` subclass provides a much more convenient implementation for +mapping rows to Java classes. Other implementations that extend `SqlQuery` are`MappingSqlQueryWithParameters` and `UpdatableSqlQuery`. + +#### 3.7.2. Using `MappingSqlQuery` + +`MappingSqlQuery` is a reusable query in which concrete subclasses must implement the +abstract `mapRow(..)` method to convert each row of the supplied `ResultSet` into an +object of the type specified. The following example shows a custom query that maps the +data from the `t_actor` relation to an instance of the `Actor` class: + +Java + +``` +public class ActorMappingQuery extends MappingSqlQuery { + + public ActorMappingQuery(DataSource ds) { + super(ds, "select id, first_name, last_name from t_actor where id = ?"); + declareParameter(new SqlParameter("id", Types.INTEGER)); + compile(); + } + + @Override + protected Actor mapRow(ResultSet rs, int rowNumber) throws SQLException { + Actor actor = new Actor(); + actor.setId(rs.getLong("id")); + actor.setFirstName(rs.getString("first_name")); + actor.setLastName(rs.getString("last_name")); + return actor; + } +} +``` + +Kotlin + +``` +class ActorMappingQuery(ds: DataSource) : MappingSqlQuery(ds, "select id, first_name, last_name from t_actor where id = ?") { + + init { + declareParameter(SqlParameter("id", Types.INTEGER)) + compile() + } + + override fun mapRow(rs: ResultSet, rowNumber: Int) = Actor( + rs.getLong("id"), + rs.getString("first_name"), + rs.getString("last_name") + ) +} +``` + +The class extends `MappingSqlQuery` parameterized with the `Actor` type. The constructor +for this customer query takes a `DataSource` as the only parameter. In this +constructor, you can call the constructor on the superclass with the `DataSource` and the SQL +that should be run to retrieve the rows for this query. This SQL is used to +create a `PreparedStatement`, so it may contain placeholders for any parameters to be +passed in during execution. You must declare each parameter by using the `declareParameter`method passing in an `SqlParameter`. The `SqlParameter` takes a name, and the JDBC type +as defined in `java.sql.Types`. After you define all parameters, you can call the`compile()` method so that the statement can be prepared and later run. This class is +thread-safe after it is compiled, so, as long as these instances are created when the DAO +is initialized, they can be kept as instance variables and be reused. The following +example shows how to define such a class: + +Java + +``` +private ActorMappingQuery actorMappingQuery; + +@Autowired +public void setDataSource(DataSource dataSource) { + this.actorMappingQuery = new ActorMappingQuery(dataSource); +} + +public Customer getCustomer(Long id) { + return actorMappingQuery.findObject(id); +} +``` + +Kotlin + +``` +private val actorMappingQuery = ActorMappingQuery(dataSource) + +fun getCustomer(id: Long) = actorMappingQuery.findObject(id) +``` + +The method in the preceding example retrieves the customer with the `id` that is passed in as the +only parameter. Since we want only one object to be returned, we call the `findObject` convenience +method with the `id` as the parameter. If we had instead a query that returned a +list of objects and took additional parameters, we would use one of the `execute`methods that takes an array of parameter values passed in as varargs. The following +example shows such a method: + +Java + +``` +public List searchForActors(int age, String namePattern) { + List actors = actorSearchMappingQuery.execute(age, namePattern); + return actors; +} +``` + +Kotlin + +``` +fun searchForActors(age: Int, namePattern: String) = + actorSearchMappingQuery.execute(age, namePattern) +``` + +#### 3.7.3. Using `SqlUpdate` + +The `SqlUpdate` class encapsulates an SQL update. As with a query, an update object is +reusable, and, as with all `RdbmsOperation` classes, an update can have parameters and is +defined in SQL. This class provides a number of `update(..)` methods analogous to the`execute(..)` methods of query objects. The `SqlUpdate` class is concrete. It can be +subclassed — for example, to add a custom update method. +However, you do not have to subclass the `SqlUpdate`class, since it can easily be parameterized by setting SQL and declaring parameters. +The following example creates a custom update method named `execute`: + +Java + +``` +import java.sql.Types; +import javax.sql.DataSource; +import org.springframework.jdbc.core.SqlParameter; +import org.springframework.jdbc.object.SqlUpdate; + +public class UpdateCreditRating extends SqlUpdate { + + public UpdateCreditRating(DataSource ds) { + setDataSource(ds); + setSql("update customer set credit_rating = ? where id = ?"); + declareParameter(new SqlParameter("creditRating", Types.NUMERIC)); + declareParameter(new SqlParameter("id", Types.NUMERIC)); + compile(); + } + + /** + * @param id for the Customer to be updated + * @param rating the new value for credit rating + * @return number of rows updated + */ + public int execute(int id, int rating) { + return update(rating, id); + } +} +``` + +Kotlin + +``` +import java.sql.Types +import javax.sql.DataSource +import org.springframework.jdbc.core.SqlParameter +import org.springframework.jdbc.object.SqlUpdate + +class UpdateCreditRating(ds: DataSource) : SqlUpdate() { + + init { + setDataSource(ds) + sql = "update customer set credit_rating = ? where id = ?" + declareParameter(SqlParameter("creditRating", Types.NUMERIC)) + declareParameter(SqlParameter("id", Types.NUMERIC)) + compile() + } + + /** + * @param id for the Customer to be updated + * @param rating the new value for credit rating + * @return number of rows updated + */ + fun execute(id: Int, rating: Int): Int { + return update(rating, id) + } +} +``` + +#### 3.7.4. Using `StoredProcedure` + +The `StoredProcedure` class is an `abstract` superclass for object abstractions of RDBMS +stored procedures. + +The inherited `sql` property is the name of the stored procedure in the RDBMS. + +To define a parameter for the `StoredProcedure` class, you can use an `SqlParameter` or one +of its subclasses. You must specify the parameter name and SQL type in the constructor, +as the following code snippet shows: + +Java + +``` +new SqlParameter("in_id", Types.NUMERIC), +new SqlOutParameter("out_first_name", Types.VARCHAR), +``` + +Kotlin + +``` +SqlParameter("in_id", Types.NUMERIC), +SqlOutParameter("out_first_name", Types.VARCHAR), +``` + +The SQL type is specified using the `java.sql.Types` constants. + +The first line (with the `SqlParameter`) declares an IN parameter. You can use IN parameters +both for stored procedure calls and for queries using the `SqlQuery` and its +subclasses (covered in [Understanding `SqlQuery`](#jdbc-SqlQuery)). + +The second line (with the `SqlOutParameter`) declares an `out` parameter to be used in the +stored procedure call. There is also an `SqlInOutParameter` for `InOut` parameters +(parameters that provide an `in` value to the procedure and that also return a value). + +For `in` parameters, in addition to the name and the SQL type, you can specify a +scale for numeric data or a type name for custom database types. For `out` parameters, +you can provide a `RowMapper` to handle mapping of rows returned from a `REF` cursor. +Another option is to specify an `SqlReturnType` that lets you define customized +handling of the return values. + +The next example of a simple DAO uses a `StoredProcedure` to call a function +(`sysdate()`), which comes with any Oracle database. To use the stored procedure +functionality, you have to create a class that extends `StoredProcedure`. In this +example, the `StoredProcedure` class is an inner class. However, if you need to reuse the`StoredProcedure`, you can declare it as a top-level class. This example has no input +parameters, but an output parameter is declared as a date type by using the`SqlOutParameter` class. The `execute()` method runs the procedure and extracts the +returned date from the results `Map`. The results `Map` has an entry for each declared +output parameter (in this case, only one) by using the parameter name as the key. +The following listing shows our custom StoredProcedure class: + +Java + +``` +import java.sql.Types; +import java.util.Date; +import java.util.HashMap; +import java.util.Map; +import javax.sql.DataSource; +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.jdbc.core.SqlOutParameter; +import org.springframework.jdbc.object.StoredProcedure; + +public class StoredProcedureDao { + + private GetSysdateProcedure getSysdate; + + @Autowired + public void init(DataSource dataSource) { + this.getSysdate = new GetSysdateProcedure(dataSource); + } + + public Date getSysdate() { + return getSysdate.execute(); + } + + private class GetSysdateProcedure extends StoredProcedure { + + private static final String SQL = "sysdate"; + + public GetSysdateProcedure(DataSource dataSource) { + setDataSource(dataSource); + setFunction(true); + setSql(SQL); + declareParameter(new SqlOutParameter("date", Types.DATE)); + compile(); + } + + public Date execute() { + // the 'sysdate' sproc has no input parameters, so an empty Map is supplied... + Map results = execute(new HashMap()); + Date sysdate = (Date) results.get("date"); + return sysdate; + } + } + +} +``` + +Kotlin + +``` +import java.sql.Types +import java.util.Date +import java.util.Map +import javax.sql.DataSource +import org.springframework.jdbc.core.SqlOutParameter +import org.springframework.jdbc.object.StoredProcedure + +class StoredProcedureDao(dataSource: DataSource) { + + private val SQL = "sysdate" + + private val getSysdate = GetSysdateProcedure(dataSource) + + val sysdate: Date + get() = getSysdate.execute() + + private inner class GetSysdateProcedure(dataSource: DataSource) : StoredProcedure() { + + init { + setDataSource(dataSource) + isFunction = true + sql = SQL + declareParameter(SqlOutParameter("date", Types.DATE)) + compile() + } + + fun execute(): Date { + // the 'sysdate' sproc has no input parameters, so an empty Map is supplied... + val results = execute(mutableMapOf()) + return results["date"] as Date + } + } +} +``` + +The following example of a `StoredProcedure` has two output parameters (in this case, +Oracle REF cursors): + +Java + +``` +import java.util.HashMap; +import java.util.Map; +import javax.sql.DataSource; +import oracle.jdbc.OracleTypes; +import org.springframework.jdbc.core.SqlOutParameter; +import org.springframework.jdbc.object.StoredProcedure; + +public class TitlesAndGenresStoredProcedure extends StoredProcedure { + + private static final String SPROC_NAME = "AllTitlesAndGenres"; + + public TitlesAndGenresStoredProcedure(DataSource dataSource) { + super(dataSource, SPROC_NAME); + declareParameter(new SqlOutParameter("titles", OracleTypes.CURSOR, new TitleMapper())); + declareParameter(new SqlOutParameter("genres", OracleTypes.CURSOR, new GenreMapper())); + compile(); + } + + public Map execute() { + // again, this sproc has no input parameters, so an empty Map is supplied + return super.execute(new HashMap()); + } +} +``` + +Kotlin + +``` +import java.util.HashMap +import javax.sql.DataSource +import oracle.jdbc.OracleTypes +import org.springframework.jdbc.core.SqlOutParameter +import org.springframework.jdbc.object.StoredProcedure + +class TitlesAndGenresStoredProcedure(dataSource: DataSource) : StoredProcedure(dataSource, SPROC_NAME) { + + companion object { + private const val SPROC_NAME = "AllTitlesAndGenres" + } + + init { + declareParameter(SqlOutParameter("titles", OracleTypes.CURSOR, TitleMapper())) + declareParameter(SqlOutParameter("genres", OracleTypes.CURSOR, GenreMapper())) + compile() + } + + fun execute(): Map { + // again, this sproc has no input parameters, so an empty Map is supplied + return super.execute(HashMap()) + } +} +``` + +Notice how the overloaded variants of the `declareParameter(..)` method that have been +used in the `TitlesAndGenresStoredProcedure` constructor are passed `RowMapper`implementation instances. This is a very convenient and powerful way to reuse existing +functionality. The next two examples provide code for the two `RowMapper` implementations. + +The `TitleMapper` class maps a `ResultSet` to a `Title` domain object for each row in +the supplied `ResultSet`, as follows: + +Java + +``` +import java.sql.ResultSet; +import java.sql.SQLException; +import com.foo.domain.Title; +import org.springframework.jdbc.core.RowMapper; + +public final class TitleMapper implements RowMapper { + + public Title mapRow(ResultSet rs, int rowNum) throws SQLException { + Title title = new Title(); + title.setId(rs.getLong("id")); + title.setName(rs.getString("name")); + return title; + } +} +``` + +Kotlin + +``` +import java.sql.ResultSet +import com.foo.domain.Title +import org.springframework.jdbc.core.RowMapper + +class TitleMapper : RowMapper<Title> { + + override fun mapRow(rs: ResultSet, rowNum: Int) = + Title(rs.getLong("id"), rs.getString("name")) +} +``` + +The `GenreMapper` class maps a `ResultSet` to a `Genre` domain object for each row in +the supplied `ResultSet`, as follows: + +Java + +``` +import java.sql.ResultSet; +import java.sql.SQLException; +import com.foo.domain.Genre; +import org.springframework.jdbc.core.RowMapper; + +public final class GenreMapper implements RowMapper<Genre> { + + public Genre mapRow(ResultSet rs, int rowNum) throws SQLException { + return new Genre(rs.getString("name")); + } +} +``` + +Kotlin + +``` +import java.sql.ResultSet +import com.foo.domain.Genre +import org.springframework.jdbc.core.RowMapper + +class GenreMapper : RowMapper<Genre> { + + override fun mapRow(rs: ResultSet, rowNum: Int): Genre { + return Genre(rs.getString("name")) + } +} +``` + +To pass parameters to a stored procedure that has one or more input parameters in its +definition in the RDBMS, you can code a strongly typed `execute(..)` method that would +delegate to the untyped `execute(Map)` method in the superclass, as the following example shows: + +Java + +``` +import java.sql.Types; +import java.util.Date; +import java.util.HashMap; +import java.util.Map; +import javax.sql.DataSource; +import oracle.jdbc.OracleTypes; +import org.springframework.jdbc.core.SqlOutParameter; +import org.springframework.jdbc.core.SqlParameter; +import org.springframework.jdbc.object.StoredProcedure; + +public class TitlesAfterDateStoredProcedure extends StoredProcedure { + + private static final String SPROC_NAME = "TitlesAfterDate"; + private static final String CUTOFF_DATE_PARAM = "cutoffDate"; + + public TitlesAfterDateStoredProcedure(DataSource dataSource) { + super(dataSource, SPROC_NAME); + declareParameter(new SqlParameter(CUTOFF_DATE_PARAM, Types.DATE); + declareParameter(new SqlOutParameter("titles", OracleTypes.CURSOR, new TitleMapper())); + compile(); + } + + public Map<String, Object> execute(Date cutoffDate) { + Map<String, Object> inputs = new HashMap<String, Object>(); + inputs.put(CUTOFF_DATE_PARAM, cutoffDate); + return super.execute(inputs); + } +} +``` + +Kotlin + +``` +import java.sql.Types +import java.util.Date +import javax.sql.DataSource +import oracle.jdbc.OracleTypes +import org.springframework.jdbc.core.SqlOutParameter +import org.springframework.jdbc.core.SqlParameter +import org.springframework.jdbc.object.StoredProcedure + +class TitlesAfterDateStoredProcedure(dataSource: DataSource) : StoredProcedure(dataSource, SPROC_NAME) { + + companion object { + private const val SPROC_NAME = "TitlesAfterDate" + private const val CUTOFF_DATE_PARAM = "cutoffDate" + } + + init { + declareParameter(SqlParameter(CUTOFF_DATE_PARAM, Types.DATE)) + declareParameter(SqlOutParameter("titles", OracleTypes.CURSOR, TitleMapper())) + compile() + } + + fun execute(cutoffDate: Date) = super.execute( + mapOf<String, Any>(CUTOFF_DATE_PARAM to cutoffDate)) +} +``` + +### 3.8. Common Problems with Parameter and Data Value Handling + +Common problems with parameters and data values exist in the different approaches +provided by Spring Framework’s JDBC support. This section covers how to address them. + +#### 3.8.1. Providing SQL Type Information for Parameters + +Usually, Spring determines the SQL type of the parameters based on the type of parameter +passed in. It is possible to explicitly provide the SQL type to be used when setting +parameter values. This is sometimes necessary to correctly set `NULL` values. + +You can provide SQL type information in several ways: + +* Many update and query methods of the `JdbcTemplate` take an additional parameter in + the form of an `int` array. This array is used to indicate the SQL type of the + corresponding parameter by using constant values from the `java.sql.Types` class. Provide + one entry for each parameter. + +* You can use the `SqlParameterValue` class to wrap the parameter value that needs this + additional information. To do so, create a new instance for each value and pass in the SQL type + and the parameter value in the constructor. You can also provide an optional scale + parameter for numeric values. + +* For methods that work with named parameters, you can use the `SqlParameterSource` classes,`BeanPropertySqlParameterSource` or `MapSqlParameterSource`. They both have methods + for registering the SQL type for any of the named parameter values. + +#### 3.8.2. Handling BLOB and CLOB objects + +You can store images, other binary data, and large chunks of text in the database. These +large objects are called BLOBs (Binary Large OBject) for binary data and CLOBs (Character +Large OBject) for character data. In Spring, you can handle these large objects by using +the `JdbcTemplate` directly and also when using the higher abstractions provided by RDBMS +Objects and the `SimpleJdbc` classes. All of these approaches use an implementation of +the `LobHandler` interface for the actual management of the LOB (Large OBject) data.`LobHandler` provides access to a `LobCreator` class, through the `getLobCreator` method, +that is used for creating new LOB objects to be inserted. + +`LobCreator` and `LobHandler` provide the following support for LOB input and output: + +* BLOB + + * `byte[]`: `getBlobAsBytes` and `setBlobAsBytes` + + * `InputStream`: `getBlobAsBinaryStream` and `setBlobAsBinaryStream` + +* CLOB + + * `String`: `getClobAsString` and `setClobAsString` + + * `InputStream`: `getClobAsAsciiStream` and `setClobAsAsciiStream` + + * `Reader`: `getClobAsCharacterStream` and `setClobAsCharacterStream` + +The next example shows how to create and insert a BLOB. Later we show how to read +it back from the database. + +This example uses a `JdbcTemplate` and an implementation of the`AbstractLobCreatingPreparedStatementCallback`. It implements one method,`setValues`. This method provides a `LobCreator` that we use to set the values for the +LOB columns in your SQL insert statement. + +For this example, we assume that there is a variable, `lobHandler`, that is already +set to an instance of a `DefaultLobHandler`. You typically set this value through +dependency injection. + +The following example shows how to create and insert a BLOB: + +Java + +``` +final File blobIn = new File("spring2004.jpg"); +final InputStream blobIs = new FileInputStream(blobIn); +final File clobIn = new File("large.txt"); +final InputStream clobIs = new FileInputStream(clobIn); +final InputStreamReader clobReader = new InputStreamReader(clobIs); + +jdbcTemplate.execute( + "INSERT INTO lob_table (id, a_clob, a_blob) VALUES (?, ?, ?)", + new AbstractLobCreatingPreparedStatementCallback(lobHandler) { (1) + protected void setValues(PreparedStatement ps, LobCreator lobCreator) throws SQLException { + ps.setLong(1, 1L); + lobCreator.setClobAsCharacterStream(ps, 2, clobReader, (int)clobIn.length()); (2) + lobCreator.setBlobAsBinaryStream(ps, 3, blobIs, (int)blobIn.length()); (3) + } + } +); + +blobIs.close(); +clobReader.close(); +``` + +|**1**|Pass in the `lobHandler` that (in this example) is a plain `DefaultLobHandler`. | +|-----|--------------------------------------------------------------------------------| +|**2**|Using the method `setClobAsCharacterStream` to pass in the contents of the CLOB.| +|**3**| Using the method `setBlobAsBinaryStream` to pass in the contents of the BLOB. | + +Kotlin + +``` +val blobIn = File("spring2004.jpg") +val blobIs = FileInputStream(blobIn) +val clobIn = File("large.txt") +val clobIs = FileInputStream(clobIn) +val clobReader = InputStreamReader(clobIs) + +jdbcTemplate.execute( + "INSERT INTO lob_table (id, a_clob, a_blob) VALUES (?, ?, ?)", + object: AbstractLobCreatingPreparedStatementCallback(lobHandler) { (1) + override fun setValues(ps: PreparedStatement, lobCreator: LobCreator) { + ps.setLong(1, 1L) + lobCreator.setClobAsCharacterStream(ps, 2, clobReader, clobIn.length().toInt()) (2) + lobCreator.setBlobAsBinaryStream(ps, 3, blobIs, blobIn.length().toInt()) (3) + } + } +) +blobIs.close() +clobReader.close() +``` + +|**1**|Pass in the `lobHandler` that (in this example) is a plain `DefaultLobHandler`. | +|-----|--------------------------------------------------------------------------------| +|**2**|Using the method `setClobAsCharacterStream` to pass in the contents of the CLOB.| +|**3**| Using the method `setBlobAsBinaryStream` to pass in the contents of the BLOB. | + +| |If you invoke the `setBlobAsBinaryStream`, `setClobAsAsciiStream`, or`setClobAsCharacterStream` method on the `LobCreator` returned from`DefaultLobHandler.getLobCreator()`, you can optionally specify a negative value for the`contentLength` argument. If the specified content length is negative, the`DefaultLobHandler` uses the JDBC 4.0 variants of the set-stream methods without a<br/>length parameter. Otherwise, it passes the specified length on to the driver.<br/><br/>See the documentation for the JDBC driver you use to verify that it supports streaming a<br/>LOB without providing the content length.| +|---|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +Now it is time to read the LOB data from the database. Again, you use a `JdbcTemplate`with the same instance variable `lobHandler` and a reference to a `DefaultLobHandler`. +The following example shows how to do so: + +Java + +``` +List<Map<String, Object>> l = jdbcTemplate.query("select id, a_clob, a_blob from lob_table", + new RowMapper<Map<String, Object>>() { + public Map<String, Object> mapRow(ResultSet rs, int i) throws SQLException { + Map<String, Object> results = new HashMap<String, Object>(); + String clobText = lobHandler.getClobAsString(rs, "a_clob"); (1) + results.put("CLOB", clobText); + byte[] blobBytes = lobHandler.getBlobAsBytes(rs, "a_blob"); (2) + results.put("BLOB", blobBytes); + return results; + } + }); +``` + +|**1**|Using the method `getClobAsString` to retrieve the contents of the CLOB.| +|-----|------------------------------------------------------------------------| +|**2**|Using the method `getBlobAsBytes` to retrieve the contents of the BLOB. | + +Kotlin + +``` +val l = jdbcTemplate.query("select id, a_clob, a_blob from lob_table") { rs, _ -> + val clobText = lobHandler.getClobAsString(rs, "a_clob") (1) + val blobBytes = lobHandler.getBlobAsBytes(rs, "a_blob") (2) + mapOf("CLOB" to clobText, "BLOB" to blobBytes) +} +``` + +|**1**|Using the method `getClobAsString` to retrieve the contents of the CLOB.| +|-----|------------------------------------------------------------------------| +|**2**|Using the method `getBlobAsBytes` to retrieve the contents of the BLOB. | + +#### 3.8.3. Passing in Lists of Values for IN Clause + +The SQL standard allows for selecting rows based on an expression that includes a +variable list of values. A typical example would be `select * from T_ACTOR where id in +(1, 2, 3)`. This variable list is not directly supported for prepared statements by the +JDBC standard. You cannot declare a variable number of placeholders. You need a number +of variations with the desired number of placeholders prepared, or you need to generate +the SQL string dynamically once you know how many placeholders are required. The named +parameter support provided in the `NamedParameterJdbcTemplate` and `JdbcTemplate` takes +the latter approach. You can pass in the values as a `java.util.List` of primitive objects. This +list is used to insert the required placeholders and pass in the values during +statement execution. + +| |Be careful when passing in many values. The JDBC standard does not guarantee that you<br/>can use more than 100 values for an `in` expression list. Various databases exceed this<br/>number, but they usually have a hard limit for how many values are allowed. For example, Oracle’s<br/>limit is 1000.| +|---|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +In addition to the primitive values in the value list, you can create a `java.util.List`of object arrays. This list can support multiple expressions being defined for the `in`clause, such as `select * from T_ACTOR where (id, last_name) in ((1, 'Johnson'), (2, +'Harrop'))`. This, of course, requires that your database supports this syntax. + +#### 3.8.4. Handling Complex Types for Stored Procedure Calls + +When you call stored procedures, you can sometimes use complex types specific to the +database. To accommodate these types, Spring provides a `SqlReturnType` for handling +them when they are returned from the stored procedure call and `SqlTypeValue` when they +are passed in as a parameter to the stored procedure. + +The `SqlReturnType` interface has a single method (named `getTypeValue`) that must be +implemented. This interface is used as part of the declaration of an `SqlOutParameter`. +The following example shows returning the value of an Oracle `STRUCT` object of the user +declared type `ITEM_TYPE`: + +Java + +``` +public class TestItemStoredProcedure extends StoredProcedure { + + public TestItemStoredProcedure(DataSource dataSource) { + // ... + declareParameter(new SqlOutParameter("item", OracleTypes.STRUCT, "ITEM_TYPE", + (CallableStatement cs, int colIndx, int sqlType, String typeName) -> { + STRUCT struct = (STRUCT) cs.getObject(colIndx); + Object[] attr = struct.getAttributes(); + TestItem item = new TestItem(); + item.setId(((Number) attr[0]).longValue()); + item.setDescription((String) attr[1]); + item.setExpirationDate((java.util.Date) attr[2]); + return item; + })); + // ... + } +``` + +Kotlin + +``` +class TestItemStoredProcedure(dataSource: DataSource) : StoredProcedure() { + + init { + // ... + declareParameter(SqlOutParameter("item", OracleTypes.STRUCT, "ITEM_TYPE") { cs, colIndx, sqlType, typeName -> + val struct = cs.getObject(colIndx) as STRUCT + val attr = struct.getAttributes() + TestItem((attr[0] as Long, attr[1] as String, attr[2] as Date) + }) + // ... + } +} +``` + +You can use `SqlTypeValue` to pass the value of a Java object (such as `TestItem`) to a +stored procedure. The `SqlTypeValue` interface has a single method (named`createTypeValue`) that you must implement. The active connection is passed in, and you +can use it to create database-specific objects, such as `StructDescriptor` instances +or `ArrayDescriptor` instances. The following example creates a `StructDescriptor` instance: + +Java + +``` +final TestItem testItem = new TestItem(123L, "A test item", + new SimpleDateFormat("yyyy-M-d").parse("2010-12-31")); + +SqlTypeValue value = new AbstractSqlTypeValue() { + protected Object createTypeValue(Connection conn, int sqlType, String typeName) throws SQLException { + StructDescriptor itemDescriptor = new StructDescriptor(typeName, conn); + Struct item = new STRUCT(itemDescriptor, conn, + new Object[] { + testItem.getId(), + testItem.getDescription(), + new java.sql.Date(testItem.getExpirationDate().getTime()) + }); + return item; + } +}; +``` + +Kotlin + +``` +val (id, description, expirationDate) = TestItem(123L, "A test item", + SimpleDateFormat("yyyy-M-d").parse("2010-12-31")) + +val value = object : AbstractSqlTypeValue() { + override fun createTypeValue(conn: Connection, sqlType: Int, typeName: String?): Any { + val itemDescriptor = StructDescriptor(typeName, conn) + return STRUCT(itemDescriptor, conn, + arrayOf(id, description, java.sql.Date(expirationDate.time))) + } +} +``` + +You can now add this `SqlTypeValue` to the `Map` that contains the input parameters for the`execute` call of the stored procedure. + +Another use for the `SqlTypeValue` is passing in an array of values to an Oracle stored +procedure. Oracle has its own internal `ARRAY` class that must be used in this case, and +you can use the `SqlTypeValue` to create an instance of the Oracle `ARRAY` and populate +it with values from the Java `ARRAY`, as the following example shows: + +Java + +``` +final Long[] ids = new Long[] {1L, 2L}; + +SqlTypeValue value = new AbstractSqlTypeValue() { + protected Object createTypeValue(Connection conn, int sqlType, String typeName) throws SQLException { + ArrayDescriptor arrayDescriptor = new ArrayDescriptor(typeName, conn); + ARRAY idArray = new ARRAY(arrayDescriptor, conn, ids); + return idArray; + } +}; +``` + +Kotlin + +``` +class TestItemStoredProcedure(dataSource: DataSource) : StoredProcedure() { + + init { + val ids = arrayOf(1L, 2L) + val value = object : AbstractSqlTypeValue() { + override fun createTypeValue(conn: Connection, sqlType: Int, typeName: String?): Any { + val arrayDescriptor = ArrayDescriptor(typeName, conn) + return ARRAY(arrayDescriptor, conn, ids) + } + } + } +} +``` + +### 3.9. Embedded Database Support + +The `org.springframework.jdbc.datasource.embedded` package provides support for embedded +Java database engines. Support for [HSQL](http://www.hsqldb.org),[H2](https://www.h2database.com), and [Derby](https://db.apache.org/derby) is provided +natively. You can also use an extensible API to plug in new embedded database types and`DataSource` implementations. + +#### 3.9.1. Why Use an Embedded Database? + +An embedded database can be useful during the development phase of a project because of its +lightweight nature. Benefits include ease of configuration, quick startup time, +testability, and the ability to rapidly evolve your SQL during development. + +#### 3.9.2. Creating an Embedded Database by Using Spring XML + +If you want to expose an embedded database instance as a bean in a Spring`ApplicationContext`, you can use the `embedded-database` tag in the `spring-jdbc` namespace: + +``` +<jdbc:embedded-database id="dataSource" generate-name="true"> + <jdbc:script location="classpath:schema.sql"/> + <jdbc:script location="classpath:test-data.sql"/> +</jdbc:embedded-database> +``` + +The preceding configuration creates an embedded HSQL database that is populated with SQL from +the `schema.sql` and `test-data.sql` resources in the root of the classpath. In addition, as +a best practice, the embedded database is assigned a uniquely generated name. The +embedded database is made available to the Spring container as a bean of type`javax.sql.DataSource` that can then be injected into data access objects as needed. + +#### 3.9.3. Creating an Embedded Database Programmatically + +The `EmbeddedDatabaseBuilder` class provides a fluent API for constructing an embedded +database programmatically. You can use this when you need to create an embedded database in a +stand-alone environment or in a stand-alone integration test, as in the following example: + +Java + +``` +EmbeddedDatabase db = new EmbeddedDatabaseBuilder() + .generateUniqueName(true) + .setType(H2) + .setScriptEncoding("UTF-8") + .ignoreFailedDrops(true) + .addScript("schema.sql") + .addScripts("user_data.sql", "country_data.sql") + .build(); + +// perform actions against the db (EmbeddedDatabase extends javax.sql.DataSource) + +db.shutdown() +``` + +Kotlin + +``` +val db = EmbeddedDatabaseBuilder() + .generateUniqueName(true) + .setType(H2) + .setScriptEncoding("UTF-8") + .ignoreFailedDrops(true) + .addScript("schema.sql") + .addScripts("user_data.sql", "country_data.sql") + .build() + +// perform actions against the db (EmbeddedDatabase extends javax.sql.DataSource) + +db.shutdown() +``` + +See the [javadoc for `EmbeddedDatabaseBuilder`](https://docs.spring.io/spring-framework/docs/5.3.16/javadoc-api/org/springframework/jdbc/datasource/embedded/EmbeddedDatabaseBuilder.html)for further details on all supported options. + +You can also use the `EmbeddedDatabaseBuilder` to create an embedded database by using Java +configuration, as the following example shows: + +Java + +``` +@Configuration +public class DataSourceConfig { + + @Bean + public DataSource dataSource() { + return new EmbeddedDatabaseBuilder() + .generateUniqueName(true) + .setType(H2) + .setScriptEncoding("UTF-8") + .ignoreFailedDrops(true) + .addScript("schema.sql") + .addScripts("user_data.sql", "country_data.sql") + .build(); + } +} +``` + +Kotlin + +``` +@Configuration +class DataSourceConfig { + + @Bean + fun dataSource(): DataSource { + return EmbeddedDatabaseBuilder() + .generateUniqueName(true) + .setType(H2) + .setScriptEncoding("UTF-8") + .ignoreFailedDrops(true) + .addScript("schema.sql") + .addScripts("user_data.sql", "country_data.sql") + .build() + } +} +``` + +#### 3.9.4. Selecting the Embedded Database Type + +This section covers how to select one of the three embedded databases that Spring +supports. It includes the following topics: + +* [Using HSQL](#jdbc-embedded-database-using-HSQL) + +* [Using H2](#jdbc-embedded-database-using-H2) + +* [Using Derby](#jdbc-embedded-database-using-Derby) + +##### Using HSQL + +Spring supports HSQL 1.8.0 and above. HSQL is the default embedded database if no type is +explicitly specified. To specify HSQL explicitly, set the `type` attribute of the`embedded-database` tag to `HSQL`. If you use the builder API, call the`setType(EmbeddedDatabaseType)` method with `EmbeddedDatabaseType.HSQL`. + +##### Using H2 + +Spring supports the H2 database. To enable H2, set the `type` attribute of the`embedded-database` tag to `H2`. If you use the builder API, call the`setType(EmbeddedDatabaseType)` method with `EmbeddedDatabaseType.H2`. + +##### Using Derby + +Spring supports Apache Derby 10.5 and above. To enable Derby, set the `type`attribute of the `embedded-database` tag to `DERBY`. If you use the builder API, +call the `setType(EmbeddedDatabaseType)` method with `EmbeddedDatabaseType.DERBY`. + +#### 3.9.5. Testing Data Access Logic with an Embedded Database + +Embedded databases provide a lightweight way to test data access code. The next example is a +data access integration test template that uses an embedded database. Using such a template +can be useful for one-offs when the embedded database does not need to be reused across test +classes. However, if you wish to create an embedded database that is shared within a test suite, +consider using the [Spring TestContext Framework](testing.html#testcontext-framework) and +configuring the embedded database as a bean in the Spring `ApplicationContext` as described +in [Creating an Embedded Database by Using Spring XML](#jdbc-embedded-database-xml) and [Creating an Embedded Database Programmatically](#jdbc-embedded-database-java). The following listing +shows the test template: + +Java + +``` +public class DataAccessIntegrationTestTemplate { + + private EmbeddedDatabase db; + + @BeforeEach + public void setUp() { + // creates an HSQL in-memory database populated from default scripts + // classpath:schema.sql and classpath:data.sql + db = new EmbeddedDatabaseBuilder() + .generateUniqueName(true) + .addDefaultScripts() + .build(); + } + + @Test + public void testDataAccess() { + JdbcTemplate template = new JdbcTemplate(db); + template.query( /* ... */ ); + } + + @AfterEach + public void tearDown() { + db.shutdown(); + } + +} +``` + +Kotlin + +``` +class DataAccessIntegrationTestTemplate { + + private lateinit var db: EmbeddedDatabase + + @BeforeEach + fun setUp() { + // creates an HSQL in-memory database populated from default scripts + // classpath:schema.sql and classpath:data.sql + db = EmbeddedDatabaseBuilder() + .generateUniqueName(true) + .addDefaultScripts() + .build() + } + + @Test + fun testDataAccess() { + val template = JdbcTemplate(db) + template.query( /* ... */) + } + + @AfterEach + fun tearDown() { + db.shutdown() + } +} +``` + +#### 3.9.6. Generating Unique Names for Embedded Databases + +Development teams often encounter errors with embedded databases if their test suite +inadvertently attempts to recreate additional instances of the same database. This can +happen quite easily if an XML configuration file or `@Configuration` class is responsible +for creating an embedded database and the corresponding configuration is then reused +across multiple testing scenarios within the same test suite (that is, within the same JVM +process) — for example, integration tests against embedded databases whose`ApplicationContext` configuration differs only with regard to which bean definition +profiles are active. + +The root cause of such errors is the fact that Spring’s `EmbeddedDatabaseFactory` (used +internally by both the `<jdbc:embedded-database>` XML namespace element and the`EmbeddedDatabaseBuilder` for Java configuration) sets the name of the embedded database to`testdb` if not otherwise specified. For the case of `<jdbc:embedded-database>`, the +embedded database is typically assigned a name equal to the bean’s `id` (often, +something like `dataSource`). Thus, subsequent attempts to create an embedded database +do not result in a new database. Instead, the same JDBC connection URL is reused, +and attempts to create a new embedded database actually point to an existing +embedded database created from the same configuration. + +To address this common issue, Spring Framework 4.2 provides support for generating +unique names for embedded databases. To enable the use of generated names, use one of +the following options. + +* `EmbeddedDatabaseFactory.setGenerateUniqueDatabaseName()` + +* `EmbeddedDatabaseBuilder.generateUniqueName()` + +* `<jdbc:embedded-database generate-name="true" …​ >` + +#### 3.9.7. Extending the Embedded Database Support + +You can extend Spring JDBC embedded database support in two ways: + +* Implement `EmbeddedDatabaseConfigurer` to support a new embedded database type. + +* Implement `DataSourceFactory` to support a new `DataSource` implementation, such as a + connection pool to manage embedded database connections. + +We encourage you to contribute extensions to the Spring community at[GitHub Issues](https://github.com/spring-projects/spring-framework/issues). + +### 3.10. Initializing a `DataSource` + +The `org.springframework.jdbc.datasource.init` package provides support for initializing +an existing `DataSource`. The embedded database support provides one option for creating +and initializing a `DataSource` for an application. However, you may sometimes need to initialize +an instance that runs on a server somewhere. + +#### 3.10.1. Initializing a Database by Using Spring XML + +If you want to initialize a database and you can provide a reference to a `DataSource`bean, you can use the `initialize-database` tag in the `spring-jdbc` namespace: + +``` +<jdbc:initialize-database data-source="dataSource"> + <jdbc:script location="classpath:com/foo/sql/db-schema.sql"/> + <jdbc:script location="classpath:com/foo/sql/db-test-data.sql"/> +</jdbc:initialize-database> +``` + +The preceding example runs the two specified scripts against the database. The first +script creates a schema, and the second populates tables with a test data set. The script +locations can also be patterns with wildcards in the usual Ant style used for resources +in Spring (for example,`classpath*:/com/foo/**/sql/*-data.sql`). If you use a +pattern, the scripts are run in the lexical order of their URL or filename. + +The default behavior of the database initializer is to unconditionally run the provided +scripts. This may not always be what you want — for instance, if you run +the scripts against a database that already has test data in it. The likelihood +of accidentally deleting data is reduced by following the common pattern (shown earlier) +of creating the tables first and then inserting the data. The first step fails if +the tables already exist. + +However, to gain more control over the creation and deletion of existing data, the XML +namespace provides a few additional options. The first is a flag to switch the +initialization on and off. You can set this according to the environment (such as pulling a +boolean value from system properties or from an environment bean). The following example gets a value from a system property: + +``` +<jdbc:initialize-database data-source="dataSource" + enabled="#{systemProperties.INITIALIZE_DATABASE}"> (1) + <jdbc:script location="..."/> +</jdbc:initialize-database> +``` + +|**1**|Get the value for `enabled` from a system property called `INITIALIZE_DATABASE`.| +|-----|--------------------------------------------------------------------------------| + +The second option to control what happens with existing data is to be more tolerant of +failures. To this end, you can control the ability of the initializer to ignore certain +errors in the SQL it runs from the scripts, as the following example shows: + +``` +<jdbc:initialize-database data-source="dataSource" ignore-failures="DROPS"> + <jdbc:script location="..."/> +</jdbc:initialize-database> +``` + +In the preceding example, we are saying that we expect that, sometimes, the scripts are run +against an empty database, and there are some `DROP` statements in the scripts that +would, therefore, fail. So failed SQL `DROP` statements will be ignored, but other failures +will cause an exception. This is useful if your SQL dialect doesn’t support `DROP …​ IF +EXISTS` (or similar) but you want to unconditionally remove all test data before +re-creating it. In that case the first script is usually a set of `DROP` statements, +followed by a set of `CREATE` statements. + +The `ignore-failures` option can be set to `NONE` (the default), `DROPS` (ignore failed +drops), or `ALL` (ignore all failures). + +Each statement should be separated by `;` or a new line if the `;` character is not +present at all in the script. You can control that globally or script by script, as the +following example shows: + +``` +<jdbc:initialize-database data-source="dataSource" separator="@@"> (1) + <jdbc:script location="classpath:com/myapp/sql/db-schema.sql" separator=";"/> (2) + <jdbc:script location="classpath:com/myapp/sql/db-test-data-1.sql"/> + <jdbc:script location="classpath:com/myapp/sql/db-test-data-2.sql"/> +</jdbc:initialize-database> +``` + +|**1**| Set the separator scripts to `@@`. | +|-----|---------------------------------------------| +|**2**|Set the separator for `db-schema.sql` to `;`.| + +In this example, the two `test-data` scripts use `@@` as statement separator and only +the `db-schema.sql` uses `;`. This configuration specifies that the default separator +is `@@` and overrides that default for the `db-schema` script. + +If you need more control than you get from the XML namespace, you can use the`DataSourceInitializer` directly and define it as a component in your application. + +##### Initialization of Other Components that Depend on the Database + +A large class of applications (those that do not use the database until after the Spring context has +started) can use the database initializer with no further +complications. If your application is not one of those, you might need to read the rest +of this section. + +The database initializer depends on a `DataSource` instance and runs the scripts +provided in its initialization callback (analogous to an `init-method` in an XML bean +definition, a `@PostConstruct` method in a component, or the `afterPropertiesSet()`method in a component that implements `InitializingBean`). If other beans depend on the +same data source and use the data source in an initialization callback, there +might be a problem because the data has not yet been initialized. A common example of +this is a cache that initializes eagerly and loads data from the database on application +startup. + +To get around this issue, you have two options: change your cache initialization strategy +to a later phase or ensure that the database initializer is initialized first. + +Changing your cache initialization strategy might be easy if the application is in your control and not otherwise. +Some suggestions for how to implement this include: + +* Make the cache initialize lazily on first usage, which improves application startup + time. + +* Have your cache or a separate component that initializes the cache implement`Lifecycle` or `SmartLifecycle`. When the application context starts, you can + automatically start a `SmartLifecycle` by setting its `autoStartup` flag, and you can + manually start a `Lifecycle` by calling `ConfigurableApplicationContext.start()`on the enclosing context. + +* Use a Spring `ApplicationEvent` or similar custom observer mechanism to trigger the + cache initialization. `ContextRefreshedEvent` is always published by the context when + it is ready for use (after all beans have been initialized), so that is often a useful + hook (this is how the `SmartLifecycle` works by default). + +Ensuring that the database initializer is initialized first can also be easy. Some suggestions on how to implement this include: + +* Rely on the default behavior of the Spring `BeanFactory`, which is that beans are + initialized in registration order. You can easily arrange that by adopting the common + practice of a set of `<import/>` elements in XML configuration that order your + application modules and ensuring that the database and database initialization are + listed first. + +* Separate the `DataSource` and the business components that use it and control their + startup order by putting them in separate `ApplicationContext` instances (for example, the + parent context contains the `DataSource`, and the child context contains the business + components). This structure is common in Spring web applications but can be more + generally applied. + +## 4. Data Access with R2DBC + +[R2DBC](https://r2dbc.io) ("Reactive Relational Database Connectivity") is a community-driven +specification effort to standardize access to SQL databases using reactive patterns. + +### 4.1. Package Hierarchy + +The Spring Framework’s R2DBC abstraction framework consists of two different packages: + +* `core`: The `org.springframework.r2dbc.core` package contains the `DatabaseClient`class plus a variety of related classes. See [Using the R2DBC Core Classes to Control Basic R2DBC Processing and Error Handling](#r2dbc-core). + +* `connection`: The `org.springframework.r2dbc.connection` package contains a utility class + for easy `ConnectionFactory` access and various simple `ConnectionFactory` implementations + that you can use for testing and running unmodified R2DBC. See [Controlling Database Connections](#r2dbc-connections). + +### 4.2. Using the R2DBC Core Classes to Control Basic R2DBC Processing and Error Handling + +This section covers how to use the R2DBC core classes to control basic R2DBC processing, +including error handling. It includes the following topics: + +* [Using `DatabaseClient`](#r2dbc-DatabaseClient) + +* [Executing Statements](#r2dbc-DatabaseClient-examples-statement) + +* [Querying (`SELECT`)](#r2dbc-DatabaseClient-examples-query) + +* [Updating (`INSERT`, `UPDATE`, and `DELETE`) with `DatabaseClient`](#r2dbc-DatabaseClient-examples-update) + +* [Statement Filters](#r2dbc-DatabaseClient-filter) + +* [Retrieving Auto-generated Keys](#r2dbc-auto-generated-keys) + +#### 4.2.1. Using `DatabaseClient` + +`DatabaseClient` is the central class in the R2DBC core package. It handles the +creation and release of resources, which helps to avoid common errors, such as +forgetting to close the connection. It performs the basic tasks of the core R2DBC +workflow (such as statement creation and execution), leaving application code to provide +SQL and extract results. The `DatabaseClient` class: + +* Runs SQL queries + +* Update statements and stored procedure calls + +* Performs iteration over `Result` instances + +* Catches R2DBC exceptions and translates them to the generic, more informative, exception + hierarchy defined in the `org.springframework.dao` package. (See [Consistent Exception Hierarchy](#dao-exceptions).) + +The client has a functional, fluent API using reactive types for declarative composition. + +When you use the `DatabaseClient` for your code, you need only to implement`java.util.function` interfaces, giving them a clearly defined contract. +Given a `Connection` provided by the `DatabaseClient` class, a `Function`callback creates a `Publisher`. The same is true for mapping functions that +extract a `Row` result. + +You can use `DatabaseClient` within a DAO implementation through direct instantiation +with a `ConnectionFactory` reference, or you can configure it in a Spring IoC container +and give it to DAOs as a bean reference. + +The simplest way to create a `DatabaseClient` object is through a static factory method, as follows: + +Java + +``` +DatabaseClient client = DatabaseClient.create(connectionFactory); +``` + +Kotlin + +``` +val client = DatabaseClient.create(connectionFactory) +``` + +| |The `ConnectionFactory` should always be configured as a bean in the Spring IoC<br/>container.| +|---|----------------------------------------------------------------------------------------------| + +The preceding method creates a `DatabaseClient` with default settings. + +You can also obtain a `Builder` instance from `DatabaseClient.builder()`. +You can customize the client by calling the following methods: + +* `….bindMarkers(…)`: Supply a specific `BindMarkersFactory` to configure named + parameter to database bind marker translation. + +* `….executeFunction(…)`: Set the `ExecuteFunction` how `Statement` objects get + run. + +* `….namedParameters(false)`: Disable named parameter expansion. Enabled by default. + +| |Dialects are resolved by [`BindMarkersFactoryResolver`](https://docs.spring.io/spring-framework/docs/5.3.16/javadoc-api/org/springframework/r2dbc/core/binding/BindMarkersFactoryResolver.html)from a `ConnectionFactory`, typically by inspecting `ConnectionFactoryMetadata`. <br/>You can let Spring auto-discover your `BindMarkersFactory` by registering a<br/>class that implements `org.springframework.r2dbc.core.binding.BindMarkersFactoryResolver$BindMarkerFactoryProvider`through `META-INF/spring.factories`.`BindMarkersFactoryResolver` discovers bind marker provider implementations from<br/>the class path using Spring’s `SpringFactoriesLoader`.| +|---|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +Currently supported databases are: + +* H2 + +* MariaDB + +* Microsoft SQL Server + +* MySQL + +* Postgres + +All SQL issued by this class is logged at the `DEBUG` level under the category +corresponding to the fully qualified class name of the client instance (typically`DefaultDatabaseClient`). Additionally, each execution registers a checkpoint in +the reactive sequence to aid debugging. + +The following sections provide some examples of `DatabaseClient` usage. These examples +are not an exhaustive list of all of the functionality exposed by the `DatabaseClient`. +See the attendant [javadoc](https://docs.spring.io/spring-framework/docs/5.3.16/javadoc-api/org/springframework/r2dbc/core/DatabaseClient.html) for that. + +##### Executing Statements + +`DatabaseClient` provides the basic functionality of running a statement. +The following example shows what you need to include for minimal but fully functional +code that creates a new table: + +Java + +``` +Mono<Void> completion = client.sql("CREATE TABLE person (id VARCHAR(255) PRIMARY KEY, name VARCHAR(255), age INTEGER);") + .then(); +``` + +Kotlin + +``` +client.sql("CREATE TABLE person (id VARCHAR(255) PRIMARY KEY, name VARCHAR(255), age INTEGER);") + .await() +``` + +`DatabaseClient` is designed for convenient, fluent usage. +It exposes intermediate, continuation, and terminal methods at each stage of the +execution specification. The preceding example above uses `then()` to return a completion`Publisher` that completes as soon as the query (or queries, if the SQL query contains +multiple statements) completes. + +| |`execute(…)` accepts either the SQL query string or a query `Supplier<String>`to defer the actual query creation until execution.| +|---|---------------------------------------------------------------------------------------------------------------------------------| + +##### Querying (`SELECT`) + +SQL queries can return values through `Row` objects or the number of affected rows.`DatabaseClient` can return the number of updated rows or the rows themselves, +depending on the issued query. + +The following query gets the `id` and `name` columns from a table: + +Java + +``` +Mono<Map<String, Object>> first = client.sql("SELECT id, name FROM person") + .fetch().first(); +``` + +Kotlin + +``` +val first = client.sql("SELECT id, name FROM person") + .fetch().awaitSingle() +``` + +The following query uses a bind variable: + +Java + +``` +Mono<Map<String, Object>> first = client.sql("SELECT id, name FROM person WHERE first_name = :fn") + .bind("fn", "Joe") + .fetch().first(); +``` + +Kotlin + +``` +val first = client.sql("SELECT id, name FROM person WHERE WHERE first_name = :fn") + .bind("fn", "Joe") + .fetch().awaitSingle() +``` + +You might have noticed the use of `fetch()` in the example above. `fetch()` is a +continuation operator that lets you specify how much data you want to consume. + +Calling `first()` returns the first row from the result and discards remaining rows. +You can consume data with the following operators: + +* `first()` return the first row of the entire result. Its Kotlin Coroutine variant + is named `awaitSingle()` for non-nullable return values and `awaitSingleOrNull()`if the value is optional. + +* `one()` returns exactly one result and fails if the result contains more rows. + Using Kotlin Coroutines, `awaitOne()` for exactly one value or `awaitOneOrNull()`if the value may be `null`. + +* `all()` returns all rows of the result. When using Kotlin Coroutines, use `flow()`. + +* `rowsUpdated()` returns the number of affected rows (`INSERT`/`UPDATE`/`DELETE`count). Its Kotlin Coroutine variant is named `awaitRowsUpdated()`. + +Without specifying further mapping details, queries return tabular results +as `Map` whose keys are case-insensitive column names that map to their column value. + +You can take control over result mapping by supplying a `Function<Row, T>` that gets +called for each `Row` so it can can return arbitrary values (singular values, +collections and maps, and objects). + +The following example extracts the `name` column and emits its value: + +Java + +``` +Flux<String> names = client.sql("SELECT name FROM person") + .map(row -> row.get("name", String.class)) + .all(); +``` + +Kotlin + +``` +val names = client.sql("SELECT name FROM person") + .map{ row: Row -> row.get("name", String.class) } + .flow() +``` + +What about `null`? + +Relational database results can contain `null` values. +The Reactive Streams specification forbids the emission of `null` values. +That requirement mandates proper `null` handling in the extractor function. +While you can obtain `null` values from a `Row`, you must not emit a `null`value. You must wrap any `null` values in an object (for example, `Optional`for singular values) to make sure a `null` value is never returned directly +by your extractor function. + +##### Updating (`INSERT`, `UPDATE`, and `DELETE`) with `DatabaseClient` ##### + +The only difference of modifying statements is that these statements typically +do not return tabular data so you use `rowsUpdated()` to consume results. + +The following example shows an `UPDATE` statement that returns the number +of updated rows: + +Java + +``` +Mono<Integer> affectedRows = client.sql("UPDATE person SET first_name = :fn") + .bind("fn", "Joe") + .fetch().rowsUpdated(); +``` + +Kotlin + +``` +val affectedRows = client.sql("UPDATE person SET first_name = :fn") + .bind("fn", "Joe") + .fetch().awaitRowsUpdated() +``` + +##### Binding Values to Queries + +A typical application requires parameterized SQL statements to select or +update rows according to some input. These are typically `SELECT` statements +constrained by a `WHERE` clause or `INSERT` and `UPDATE` statements that accept +input parameters. Parameterized statements bear the risk of SQL injection if +parameters are not escaped properly. `DatabaseClient` leverages R2DBC’s`bind` API to eliminate the risk of SQL injection for query parameters. +You can provide a parameterized SQL statement with the `execute(…)` operator +and bind parameters to the actual `Statement`. Your R2DBC driver then runs +the statement by using prepared statements and parameter substitution. + +Parameter binding supports two binding strategies: + +* By Index, using zero-based parameter indexes. + +* By Name, using the placeholder name. + +The following example shows parameter binding for a query: + +``` +db.sql("INSERT INTO person (id, name, age) VALUES(:id, :name, :age)") + .bind("id", "joe") + .bind("name", "Joe") + .bind("age", 34); +``` + +R2DBC Native Bind Markers + +R2DBC uses database-native bind markers that depend on the actual database vendor. +As an example, Postgres uses indexed markers, such as `$1`, `$2`, `$n`. +Another example is SQL Server, which uses named bind markers prefixed with `@`. + +This is different from JDBC, which requires `?` as bind markers. +In JDBC, the actual drivers translate `?` bind markers to database-native +markers as part of their statement execution. + +Spring Framework’s R2DBC support lets you use native bind markers or named bind +markers with the `:name` syntax. + +Named parameter support leverages a `BindMarkersFactory` instance to expand named +parameters to native bind markers at the time of query execution, which gives you +a certain degree of query portability across various database vendors. + +The query-preprocessor unrolls named `Collection` parameters into a series of bind +markers to remove the need of dynamic query creation based on the number of arguments. +Nested object arrays are expanded to allow usage of (for example) select lists. + +Consider the following query: + +``` +SELECT id, name, state FROM table WHERE (name, age) IN (('John', 35), ('Ann', 50)) +``` + +The preceding query can be parametrized and run as follows: + +Java + +``` +List<Object[]> tuples = new ArrayList<>(); +tuples.add(new Object[] {"John", 35}); +tuples.add(new Object[] {"Ann", 50}); + +client.sql("SELECT id, name, state FROM table WHERE (name, age) IN (:tuples)") + .bind("tuples", tuples); +``` + +Kotlin + +``` +val tuples: MutableList<Array<Any>> = ArrayList() +tuples.add(arrayOf("John", 35)) +tuples.add(arrayOf("Ann", 50)) + +client.sql("SELECT id, name, state FROM table WHERE (name, age) IN (:tuples)") + .bind("tuples", tuples) +``` + +| |Usage of select lists is vendor-dependent.| +|---|------------------------------------------| + +The following example shows a simpler variant using `IN` predicates: + +Java + +``` +client.sql("SELECT id, name, state FROM table WHERE age IN (:ages)") + .bind("ages", Arrays.asList(35, 50)); +``` + +Kotlin + +``` +val tuples: MutableList<Array<Any>> = ArrayList() +tuples.add(arrayOf("John", 35)) +tuples.add(arrayOf("Ann", 50)) + +client.sql("SELECT id, name, state FROM table WHERE age IN (:ages)") + .bind("tuples", arrayOf(35, 50)) +``` + +| |R2DBC itself does not support Collection-like values. Nevertheless,<br/>expanding a given `List` in the example above works for named parameters<br/>in Spring’s R2DBC support, e.g. for use in `IN` clauses as shown above.<br/>However, inserting or updating array-typed columns (e.g. in Postgres)<br/>requires an array type that is supported by the underlying R2DBC driver:<br/>typically a Java array, e.g. `String[]` to update a `text[]` column.<br/>Do not pass `Collection<String>` or the like as an array parameter.| +|---|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +##### Statement Filters + +Sometimes it you need to fine-tune options on the actual `Statement`before it gets run. Register a `Statement` filter +(`StatementFilterFunction`) through `DatabaseClient` to intercept and +modify statements in their execution, as the following example shows: + +Java + +``` +client.sql("INSERT INTO table (name, state) VALUES(:name, :state)") + .filter((s, next) -> next.execute(s.returnGeneratedValues("id"))) + .bind("name", …) + .bind("state", …); +``` + +Kotlin + +``` +client.sql("INSERT INTO table (name, state) VALUES(:name, :state)") + .filter { s: Statement, next: ExecuteFunction -> next.execute(s.returnGeneratedValues("id")) } + .bind("name", …) + .bind("state", …) +``` + +`DatabaseClient` exposes also simplified `filter(…)` overload accepting `Function<Statement, Statement>`: + +Java + +``` +client.sql("INSERT INTO table (name, state) VALUES(:name, :state)") + .filter(statement -> s.returnGeneratedValues("id")); + +client.sql("SELECT id, name, state FROM table") + .filter(statement -> s.fetchSize(25)); +``` + +Kotlin + +``` +client.sql("INSERT INTO table (name, state) VALUES(:name, :state)") + .filter { statement -> s.returnGeneratedValues("id") } + +client.sql("SELECT id, name, state FROM table") + .filter { statement -> s.fetchSize(25) } +``` + +`StatementFilterFunction` implementations allow filtering of the`Statement` and filtering of `Result` objects. + +##### `DatabaseClient` Best Practices + +Instances of the `DatabaseClient` class are thread-safe, once configured. This is +important because it means that you can configure a single instance of a `DatabaseClient`and then safely inject this shared reference into multiple DAOs (or repositories). +The `DatabaseClient` is stateful, in that it maintains a reference to a `ConnectionFactory`, +but this state is not conversational state. + +A common practice when using the `DatabaseClient` class is to configure a `ConnectionFactory`in your Spring configuration file and then dependency-inject +that shared `ConnectionFactory` bean into your DAO classes. The `DatabaseClient` is created in +the setter for the `ConnectionFactory`. This leads to DAOs that resemble the following: + +Java + +``` +public class R2dbcCorporateEventDao implements CorporateEventDao { + + private DatabaseClient databaseClient; + + public void setConnectionFactory(ConnectionFactory connectionFactory) { + this.databaseClient = DatabaseClient.create(connectionFactory); + } + + // R2DBC-backed implementations of the methods on the CorporateEventDao follow... +} +``` + +Kotlin + +``` +class R2dbcCorporateEventDao(connectionFactory: ConnectionFactory) : CorporateEventDao { + + private val databaseClient = DatabaseClient.create(connectionFactory) + + // R2DBC-backed implementations of the methods on the CorporateEventDao follow... +} +``` + +An alternative to explicit configuration is to use component-scanning and annotation +support for dependency injection. In this case, you can annotate the class with `@Component`(which makes it a candidate for component-scanning) and annotate the `ConnectionFactory` setter +method with `@Autowired`. The following example shows how to do so: + +Java + +``` +@Component (1) +public class R2dbcCorporateEventDao implements CorporateEventDao { + + private DatabaseClient databaseClient; + + @Autowired (2) + public void setConnectionFactory(ConnectionFactory connectionFactory) { + this.databaseClient = DatabaseClient.create(connectionFactory); (3) + } + + // R2DBC-backed implementations of the methods on the CorporateEventDao follow... +} +``` + +|**1**| Annotate the class with `@Component`. | +|-----|-----------------------------------------------------------------| +|**2**|Annotate the `ConnectionFactory` setter method with `@Autowired`.| +|**3**| Create a new `DatabaseClient` with the `ConnectionFactory`. | + +Kotlin + +``` +@Component (1) +class R2dbcCorporateEventDao(connectionFactory: ConnectionFactory) : CorporateEventDao { (2) + + private val databaseClient = DatabaseClient(connectionFactory) (3) + + // R2DBC-backed implementations of the methods on the CorporateEventDao follow... +} +``` + +|**1**| Annotate the class with `@Component`. | +|-----|-----------------------------------------------------------| +|**2**| Constructor injection of the `ConnectionFactory`. | +|**3**|Create a new `DatabaseClient` with the `ConnectionFactory`.| + +Regardless of which of the above template initialization styles you choose to use (or +not), it is seldom necessary to create a new instance of a `DatabaseClient` class each +time you want to run SQL. Once configured, a `DatabaseClient` instance is thread-safe. +If your application accesses multiple +databases, you may want multiple `DatabaseClient` instances, which requires multiple`ConnectionFactory` and, subsequently, multiple differently configured `DatabaseClient`instances. + +### 4.3. Retrieving Auto-generated Keys + +`INSERT` statements may generate keys when inserting rows into a table +that defines an auto-increment or identity column. To get full control over +the column name to generate, simply register a `StatementFilterFunction` that +requests the generated key for the desired column. + +Java + +``` +Mono<Integer> generatedId = client.sql("INSERT INTO table (name, state) VALUES(:name, :state)") + .filter(statement -> s.returnGeneratedValues("id")) + .map(row -> row.get("id", Integer.class)) + .first(); + +// generatedId emits the generated key once the INSERT statement has finished +``` + +Kotlin + +``` +val generatedId = client.sql("INSERT INTO table (name, state) VALUES(:name, :state)") + .filter { statement -> s.returnGeneratedValues("id") } + .map { row -> row.get("id", Integer.class) } + .awaitOne() + +// generatedId emits the generated key once the INSERT statement has finished +``` + +### 4.4. Controlling Database Connections + +This section covers: + +* [Using `ConnectionFactory`](#r2dbc-ConnectionFactory) + +* [Using `ConnectionFactoryUtils`](#r2dbc-ConnectionFactoryUtils) + +* [Using `SingleConnectionFactory`](#r2dbc-SingleConnectionFactory) + +* [Using `TransactionAwareConnectionFactoryProxy`](#r2dbc-TransactionAwareConnectionFactoryProxy) + +* [Using `R2dbcTransactionManager`](#r2dbc-R2dbcTransactionManager) + +#### 4.4.1. Using `ConnectionFactory` + +Spring obtains an R2DBC connection to the database through a `ConnectionFactory`. +A `ConnectionFactory` is part of the R2DBC specification and is a common entry-point +for drivers. It lets a container or a framework hide connection pooling +and transaction management issues from the application code. As a developer, +you need not know details about how to connect to the database. That is the +responsibility of the administrator who sets up the `ConnectionFactory`. You +most likely fill both roles as you develop and test code, but you do not +necessarily have to know how the production data source is configured. + +When you use Spring’s R2DBC layer, you can can configure your own with a +connection pool implementation provided by a third party. A popular +implementation is R2DBC Pool (`r2dbc-pool`). Implementations in the Spring +distribution are meant only for testing purposes and do not provide pooling. + +To configure a `ConnectionFactory`: + +1. Obtain a connection with `ConnectionFactory` as you typically obtain an R2DBC `ConnectionFactory`. + +2. Provide an R2DBC URL + (See the documentation for your driver for the correct value). + +The following example shows how to configure a `ConnectionFactory`: + +Java + +``` +ConnectionFactory factory = ConnectionFactories.get("r2dbc:h2:mem:///test?options=DB_CLOSE_DELAY=-1;DB_CLOSE_ON_EXIT=FALSE"); +``` + +Kotlin + +``` +val factory = ConnectionFactories.get("r2dbc:h2:mem:///test?options=DB_CLOSE_DELAY=-1;DB_CLOSE_ON_EXIT=FALSE"); +``` + +#### 4.4.2. Using `ConnectionFactoryUtils` + +The `ConnectionFactoryUtils` class is a convenient and powerful helper class +that provides `static` methods to obtain connections from `ConnectionFactory`and close connections (if necessary). + +It supports subscriber `Context`-bound connections with, for example`R2dbcTransactionManager`. + +#### 4.4.3. Using `SingleConnectionFactory` + +The `SingleConnectionFactory` class is an implementation of `DelegatingConnectionFactory`interface that wraps a single `Connection` that is not closed after each use. + +If any client code calls `close` on the assumption of a pooled connection (as when using +persistence tools), you should set the `suppressClose` property to `true`. This setting +returns a close-suppressing proxy that wraps the physical connection. Note that you can +no longer cast this to a native `Connection` or a similar object. + +`SingleConnectionFactory` is primarily a test class and may be used for specific requirements +such as pipelining if your R2DBC driver permits for such use. +In contrast to a pooled `ConnectionFactory`, it reuses the same connection all the time, avoiding +excessive creation of physical connections. + +#### 4.4.4. Using `TransactionAwareConnectionFactoryProxy` + +`TransactionAwareConnectionFactoryProxy` is a proxy for a target `ConnectionFactory`. +The proxy wraps that target `ConnectionFactory` to add awareness of Spring-managed transactions. + +| |Using this class is required if you use a R2DBC client that is not integrated otherwise<br/>with Spring’s R2DBC support. In this case, you can still use this client and, at<br/>the same time, have this client participating in Spring managed transactions. It is generally<br/>preferable to integrate a R2DBC client with proper access to `ConnectionFactoryUtils`for resource management.| +|---|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +See the [`TransactionAwareConnectionFactoryProxy`](https://docs.spring.io/spring-framework/docs/5.3.16/javadoc-api/org/springframework/r2dbc/connection/TransactionAwareConnectionFactoryProxy.html)javadoc for more details. + +#### 4.4.5. Using `R2dbcTransactionManager` + +The `R2dbcTransactionManager` class is a `ReactiveTransactionManager` implementation for +single R2DBC datasources. It binds an R2DBC connection from the specified connection factory +to the subscriber `Context`, potentially allowing for one subscriber connection for each +connection factory. + +Application code is required to retrieve the R2DBC connection through`ConnectionFactoryUtils.getConnection(ConnectionFactory)`, instead of R2DBC’s standard`ConnectionFactory.create()`. + +All framework classes (such as `DatabaseClient`) use this strategy implicitly. +If not used with this transaction manager, the lookup strategy behaves exactly like the common one. +Thus, it can be used in any case. + +The `R2dbcTransactionManager` class supports custom isolation levels that get applied to the connection. + +## 5. Object Relational Mapping (ORM) Data Access + +This section covers data access when you use Object Relational Mapping (ORM). + +### 5.1. Introduction to ORM with Spring + +The Spring Framework supports integration with the Java Persistence API (JPA) and +supports native Hibernate for resource management, data access object (DAO) implementations, +and transaction strategies. For example, for Hibernate, there is first-class support with +several convenient IoC features that address many typical Hibernate integration issues. +You can configure all of the supported features for OR (object relational) mapping +tools through Dependency Injection. They can participate in Spring’s resource and +transaction management, and they comply with Spring’s generic transaction and DAO +exception hierarchies. The recommended integration style is to code DAOs against plain +Hibernate or JPA APIs. + +Spring adds significant enhancements to the ORM layer of your choice when you create +data access applications. You can leverage as much of the integration support as you +wish, and you should compare this integration effort with the cost and risk of building +a similar infrastructure in-house. You can use much of the ORM support as you would a +library, regardless of technology, because everything is designed as a set of reusable +JavaBeans. ORM in a Spring IoC container facilitates configuration and deployment. Thus, +most examples in this section show configuration inside a Spring container. + +The benefits of using the Spring Framework to create your ORM DAOs include: + +* **Easier testing.** Spring’s IoC approach makes it easy to swap the implementations + and configuration locations of Hibernate `SessionFactory` instances, JDBC `DataSource`instances, transaction managers, and mapped object implementations (if needed). This + in turn makes it much easier to test each piece of persistence-related code in + isolation. + +* **Common data access exceptions.** Spring can wrap exceptions from your ORM tool, + converting them from proprietary (potentially checked) exceptions to a common runtime`DataAccessException` hierarchy. This feature lets you handle most persistence + exceptions, which are non-recoverable, only in the appropriate layers, without + annoying boilerplate catches, throws, and exception declarations. You can still trap + and handle exceptions as necessary. Remember that JDBC exceptions (including + DB-specific dialects) are also converted to the same hierarchy, meaning that you can + perform some operations with JDBC within a consistent programming model. + +* **General resource management.** Spring application contexts can handle the location + and configuration of Hibernate `SessionFactory` instances, JPA `EntityManagerFactory`instances, JDBC `DataSource` instances, and other related resources. This makes these + values easy to manage and change. Spring offers efficient, easy, and safe handling of + persistence resources. For example, related code that uses Hibernate generally needs to + use the same Hibernate `Session` to ensure efficiency and proper transaction handling. + Spring makes it easy to create and bind a `Session` to the current thread transparently, + by exposing a current `Session` through the Hibernate `SessionFactory`. Thus, Spring + solves many chronic problems of typical Hibernate usage, for any local or JTA + transaction environment. + +* **Integrated transaction management.** You can wrap your ORM code with a declarative, + aspect-oriented programming (AOP) style method interceptor either through the`@Transactional` annotation or by explicitly configuring the transaction AOP advice in + an XML configuration file. In both cases, transaction semantics and exception handling + (rollback and so on) are handled for you. As discussed in [Resource and Transaction Management](#orm-resource-mngmnt), + you can also swap various transaction managers, without affecting your ORM-related code. + For example, you can swap between local transactions and JTA, with the same full services + (such as declarative transactions) available in both scenarios. Additionally, + JDBC-related code can fully integrate transactionally with the code you use to do ORM. + This is useful for data access that is not suitable for ORM (such as batch processing and + BLOB streaming) but that still needs to share common transactions with ORM operations. + +| |For more comprehensive ORM support, including support for alternative database<br/>technologies such as MongoDB, you might want to check out the[Spring Data](https://projects.spring.io/spring-data/) suite of projects. If you are<br/>a JPA user, the [Getting Started Accessing<br/>Data with JPA](https://spring.io/guides/gs/accessing-data-jpa/) guide from [https://spring.io](https://spring.io) provides a great introduction.| +|---|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +### 5.2. General ORM Integration Considerations + +This section highlights considerations that apply to all ORM technologies. +The [Hibernate](#orm-hibernate) section provides more details and also show these features and +configurations in a concrete context. + +The major goal of Spring’s ORM integration is clear application layering (with any data +access and transaction technology) and for loose coupling of application objects — no +more business service dependencies on the data access or transaction strategy, no more +hard-coded resource lookups, no more hard-to-replace singletons, no more custom service +registries. The goal is to have one simple and consistent approach to wiring up application objects, keeping +them as reusable and free from container dependencies as possible. All the individual +data access features are usable on their own but integrate nicely with Spring’s +application context concept, providing XML-based configuration and cross-referencing of +plain JavaBean instances that need not be Spring-aware. In a typical Spring application, +many important objects are JavaBeans: data access templates, data access objects, +transaction managers, business services that use the data access objects and transaction +managers, web view resolvers, web controllers that use the business services, and so on. + +#### 5.2.1. Resource and Transaction Management + +Typical business applications are cluttered with repetitive resource management code. +Many projects try to invent their own solutions, sometimes sacrificing proper handling +of failures for programming convenience. Spring advocates simple solutions for proper +resource handling, namely IoC through templating in the case of JDBC and applying AOP +interceptors for the ORM technologies. + +The infrastructure provides proper resource handling and appropriate conversion of +specific API exceptions to an unchecked infrastructure exception hierarchy. Spring +introduces a DAO exception hierarchy, applicable to any data access strategy. For direct +JDBC, the `JdbcTemplate` class mentioned in a [previous section](#jdbc-JdbcTemplate)provides connection handling and proper conversion of `SQLException` to the`DataAccessException` hierarchy, including translation of database-specific SQL error +codes to meaningful exception classes. For ORM technologies, see the[next section](#orm-exception-translation) for how to get the same exception +translation benefits. + +When it comes to transaction management, the `JdbcTemplate` class hooks in to the Spring +transaction support and supports both JTA and JDBC transactions, through respective +Spring transaction managers. For the supported ORM technologies, Spring offers Hibernate +and JPA support through the Hibernate and JPA transaction managers as well as JTA support. +For details on transaction support, see the [Transaction Management](#transaction) chapter. + +#### 5.2.2. Exception Translation + +When you use Hibernate or JPA in a DAO, you must decide how to handle the persistence +technology’s native exception classes. The DAO throws a subclass of a `HibernateException`or `PersistenceException`, depending on the technology. These exceptions are all runtime +exceptions and do not have to be declared or caught. You may also have to deal with`IllegalArgumentException` and `IllegalStateException`. This means that callers can only +treat exceptions as being generally fatal, unless they want to depend on the persistence +technology’s own exception structure. Catching specific causes (such as an optimistic +locking failure) is not possible without tying the caller to the implementation strategy. +This trade-off might be acceptable to applications that are strongly ORM-based or +do not need any special exception treatment (or both). However, Spring lets exception +translation be applied transparently through the `@Repository` annotation. The following +examples (one for Java configuration and one for XML configuration) show how to do so: + +Java + +``` +@Repository +public class ProductDaoImpl implements ProductDao { + + // class body here... + +} +``` + +Kotlin + +``` +@Repository +class ProductDaoImpl : ProductDao { + + // class body here... + +} +``` + +``` +<beans> + + <!-- Exception translation bean post processor --> + <bean class="org.springframework.dao.annotation.PersistenceExceptionTranslationPostProcessor"/> + + <bean id="myProductDao" class="product.ProductDaoImpl"/> + +</beans> +``` + +The postprocessor automatically looks for all exception translators (implementations of +the `PersistenceExceptionTranslator` interface) and advises all beans marked with the`@Repository` annotation so that the discovered translators can intercept and apply the +appropriate translation on the thrown exceptions. + +In summary, you can implement DAOs based on the plain persistence technology’s API and +annotations while still benefiting from Spring-managed transactions, dependency +injection, and transparent exception conversion (if desired) to Spring’s custom +exception hierarchies. + +### 5.3. Hibernate + +We start with a coverage of [Hibernate 5](https://hibernate.org/) in a Spring environment, +using it to demonstrate the approach that Spring takes towards integrating OR mappers. +This section covers many issues in detail and shows different variations of DAO +implementations and transaction demarcation. Most of these patterns can be directly +translated to all other supported ORM tools. The later sections in this chapter then +cover the other ORM technologies and show brief examples. + +| |As of Spring Framework 5.3, Spring requires Hibernate ORM 5.2+ for Spring’s`HibernateJpaVendorAdapter` as well as for a native Hibernate `SessionFactory` setup.<br/>It is strongly recommended to go with Hibernate ORM 5.4 for a newly started application.<br/>For use with `HibernateJpaVendorAdapter`, Hibernate Search needs to be upgraded to 5.11.6.| +|---|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +#### 5.3.1. `SessionFactory` Setup in a Spring Container + +To avoid tying application objects to hard-coded resource lookups, you can define +resources (such as a JDBC `DataSource` or a Hibernate `SessionFactory`) as beans in the +Spring container. Application objects that need to access resources receive references +to such predefined instances through bean references, as illustrated in the DAO +definition in the [next section](#orm-hibernate-straight). + +The following excerpt from an XML application context definition shows how to set up a +JDBC `DataSource` and a Hibernate `SessionFactory` on top of it: + +``` +<beans> + + <bean id="myDataSource" class="org.apache.commons.dbcp.BasicDataSource" destroy-method="close"> + <property name="driverClassName" value="org.hsqldb.jdbcDriver"/> + <property name="url" value="jdbc:hsqldb:hsql://localhost:9001"/> + <property name="username" value="sa"/> + <property name="password" value=""/> + </bean> + + <bean id="mySessionFactory" class="org.springframework.orm.hibernate5.LocalSessionFactoryBean"> + <property name="dataSource" ref="myDataSource"/> + <property name="mappingResources"> + <list> + <value>product.hbm.xml</value> + </list> + </property> + <property name="hibernateProperties"> + <value> + hibernate.dialect=org.hibernate.dialect.HSQLDialect + </value> + </property> + </bean> + +</beans> +``` + +Switching from a local Jakarta Commons DBCP `BasicDataSource` to a JNDI-located`DataSource` (usually managed by an application server) is only a matter of +configuration, as the following example shows: + +``` +<beans> + <jee:jndi-lookup id="myDataSource" jndi-name="java:comp/env/jdbc/myds"/> +</beans> +``` + +You can also access a JNDI-located `SessionFactory`, using Spring’s`JndiObjectFactoryBean` / `<jee:jndi-lookup>` to retrieve and expose it. +However, that is typically not common outside of an EJB context. + +| |Spring also provides a `LocalSessionFactoryBuilder` variant, seamlessly integrating<br/>with `@Bean` style configuration and programmatic setup (no `FactoryBean` involved).<br/><br/>Both `LocalSessionFactoryBean` and `LocalSessionFactoryBuilder` support background<br/>bootstrapping, with Hibernate initialization running in parallel to the application<br/>bootstrap thread on a given bootstrap executor (such as a `SimpleAsyncTaskExecutor`).<br/>On `LocalSessionFactoryBean`, this is available through the `bootstrapExecutor`property. On the programmatic `LocalSessionFactoryBuilder`, there is an overloaded`buildSessionFactory` method that takes a bootstrap executor argument.<br/><br/>As of Spring Framework 5.1, such a native Hibernate setup can also expose a JPA`EntityManagerFactory` for standard JPA interaction next to native Hibernate access.<br/>See [Native Hibernate Setup for JPA](#orm-jpa-hibernate) for details.| +|---|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +#### 5.3.2. Implementing DAOs Based on the Plain Hibernate API + +Hibernate has a feature called contextual sessions, wherein Hibernate itself manages +one current `Session` per transaction. This is roughly equivalent to Spring’s +synchronization of one Hibernate `Session` per transaction. A corresponding DAO +implementation resembles the following example, based on the plain Hibernate API: + +Java + +``` +public class ProductDaoImpl implements ProductDao { + + private SessionFactory sessionFactory; + + public void setSessionFactory(SessionFactory sessionFactory) { + this.sessionFactory = sessionFactory; + } + + public Collection loadProductsByCategory(String category) { + return this.sessionFactory.getCurrentSession() + .createQuery("from test.Product product where product.category=?") + .setParameter(0, category) + .list(); + } +} +``` + +Kotlin + +``` +class ProductDaoImpl(private val sessionFactory: SessionFactory) : ProductDao { + + fun loadProductsByCategory(category: String): Collection<*> { + return sessionFactory.currentSession + .createQuery("from test.Product product where product.category=?") + .setParameter(0, category) + .list() + } +} +``` + +This style is similar to that of the Hibernate reference documentation and examples, +except for holding the `SessionFactory` in an instance variable. We strongly recommend +such an instance-based setup over the old-school `static` `HibernateUtil` class from +Hibernate’s CaveatEmptor sample application. (In general, do not keep any resources in`static` variables unless absolutely necessary.) + +The preceding DAO example follows the dependency injection pattern. It fits nicely into a Spring IoC +container, as it would if coded against Spring’s `HibernateTemplate`. +You can also set up such a DAO in plain Java (for example, in unit tests). To do so, +instantiate it and call `setSessionFactory(..)` with the desired factory reference. As a +Spring bean definition, the DAO would resemble the following: + +``` +<beans> + + <bean id="myProductDao" class="product.ProductDaoImpl"> + <property name="sessionFactory" ref="mySessionFactory"/> + </bean> + +</beans> +``` + +The main advantage of this DAO style is that it depends on Hibernate API only. No import +of any Spring class is required. This is appealing from a non-invasiveness +perspective and may feel more natural to Hibernate developers. + +However, the DAO throws plain `HibernateException` (which is unchecked, so it does not have +to be declared or caught), which means that callers can treat exceptions only as being +generally fatal — unless they want to depend on Hibernate’s own exception hierarchy. +Catching specific causes (such as an optimistic locking failure) is not possible without +tying the caller to the implementation strategy. This trade off might be acceptable to +applications that are strongly Hibernate-based, do not need any special exception +treatment, or both. + +Fortunately, Spring’s `LocalSessionFactoryBean` supports Hibernate’s`SessionFactory.getCurrentSession()` method for any Spring transaction strategy, +returning the current Spring-managed transactional `Session`, even with`HibernateTransactionManager`. The standard behavior of that method remains +to return the current `Session` associated with the ongoing JTA transaction, if any. +This behavior applies regardless of whether you use Spring’s`JtaTransactionManager`, EJB container managed transactions (CMTs), or JTA. + +In summary, you can implement DAOs based on the plain Hibernate API, while still being +able to participate in Spring-managed transactions. + +#### 5.3.3. Declarative Transaction Demarcation + +We recommend that you use Spring’s declarative transaction support, which lets you +replace explicit transaction demarcation API calls in your Java code with an AOP +transaction interceptor. You can configure this transaction interceptor in a Spring +container by using either Java annotations or XML. This declarative transaction capability +lets you keep business services free of repetitive transaction demarcation code and +focus on adding business logic, which is the real value of your application. + +| |Before you continue, we are strongly encourage you to read [Declarative Transaction Management](#transaction-declarative)if you have not already done so.| +|---|---------------------------------------------------------------------------------------------------------------------------------------------------------| + +You can annotate the service layer with `@Transactional` annotations and instruct the +Spring container to find these annotations and provide transactional semantics for +these annotated methods. The following example shows how to do so: + +Java + +``` +public class ProductServiceImpl implements ProductService { + + private ProductDao productDao; + + public void setProductDao(ProductDao productDao) { + this.productDao = productDao; + } + + @Transactional + public void increasePriceOfAllProductsInCategory(final String category) { + List productsToChange = this.productDao.loadProductsByCategory(category); + // ... + } + + @Transactional(readOnly = true) + public List<Product> findAllProducts() { + return this.productDao.findAllProducts(); + } +} +``` + +Kotlin + +``` +class ProductServiceImpl(private val productDao: ProductDao) : ProductService { + + @Transactional + fun increasePriceOfAllProductsInCategory(category: String) { + val productsToChange = productDao.loadProductsByCategory(category) + // ... + } + + @Transactional(readOnly = true) + fun findAllProducts() = productDao.findAllProducts() +} +``` + +In the container, you need to set up the `PlatformTransactionManager` implementation +(as a bean) and a `<tx:annotation-driven/>` entry, opting into `@Transactional`processing at runtime. The following example shows how to do so: + +``` +<?xml version="1.0" encoding="UTF-8"?> +<beans xmlns="http://www.springframework.org/schema/beans" + xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" + xmlns:aop="http://www.springframework.org/schema/aop" + xmlns:tx="http://www.springframework.org/schema/tx" + xsi:schemaLocation=" + http://www.springframework.org/schema/beans + https://www.springframework.org/schema/beans/spring-beans.xsd + http://www.springframework.org/schema/tx + https://www.springframework.org/schema/tx/spring-tx.xsd + http://www.springframework.org/schema/aop + https://www.springframework.org/schema/aop/spring-aop.xsd"> + + <!-- SessionFactory, DataSource, etc. omitted --> + + <bean id="transactionManager" + class="org.springframework.orm.hibernate5.HibernateTransactionManager"> + <property name="sessionFactory" ref="sessionFactory"/> + </bean> + + <tx:annotation-driven/> + + <bean id="myProductService" class="product.SimpleProductService"> + <property name="productDao" ref="myProductDao"/> + </bean> + +</beans> +``` + +#### 5.3.4. Programmatic Transaction Demarcation + +You can demarcate transactions in a higher level of the application, on top of +lower-level data access services that span any number of operations. Nor do restrictions +exist on the implementation of the surrounding business service. It needs only a Spring`PlatformTransactionManager`. Again, the latter can come from anywhere, but preferably +as a bean reference through a `setTransactionManager(..)` method. Also, the`productDAO` should be set by a `setProductDao(..)` method. The following pair of snippets show +a transaction manager and a business service definition in a Spring application context +and an example for a business method implementation: + +``` +<beans> + + <bean id="myTxManager" class="org.springframework.orm.hibernate5.HibernateTransactionManager"> + <property name="sessionFactory" ref="mySessionFactory"/> + </bean> + + <bean id="myProductService" class="product.ProductServiceImpl"> + <property name="transactionManager" ref="myTxManager"/> + <property name="productDao" ref="myProductDao"/> + </bean> + +</beans> +``` + +Java + +``` +public class ProductServiceImpl implements ProductService { + + private TransactionTemplate transactionTemplate; + private ProductDao productDao; + + public void setTransactionManager(PlatformTransactionManager transactionManager) { + this.transactionTemplate = new TransactionTemplate(transactionManager); + } + + public void setProductDao(ProductDao productDao) { + this.productDao = productDao; + } + + public void increasePriceOfAllProductsInCategory(final String category) { + this.transactionTemplate.execute(new TransactionCallbackWithoutResult() { + public void doInTransactionWithoutResult(TransactionStatus status) { + List productsToChange = this.productDao.loadProductsByCategory(category); + // do the price increase... + } + }); + } +} +``` + +Kotlin + +``` +class ProductServiceImpl(transactionManager: PlatformTransactionManager, + private val productDao: ProductDao) : ProductService { + + private val transactionTemplate = TransactionTemplate(transactionManager) + + fun increasePriceOfAllProductsInCategory(category: String) { + transactionTemplate.execute { + val productsToChange = productDao.loadProductsByCategory(category) + // do the price increase... + } + } +} +``` + +Spring’s `TransactionInterceptor` lets any checked application exception be thrown +with the callback code, while `TransactionTemplate` is restricted to unchecked +exceptions within the callback. `TransactionTemplate` triggers a rollback in case of +an unchecked application exception or if the transaction is marked rollback-only by +the application (by setting `TransactionStatus`). By default, `TransactionInterceptor`behaves the same way but allows configurable rollback policies per method. + +#### 5.3.5. Transaction Management Strategies + +Both `TransactionTemplate` and `TransactionInterceptor` delegate the actual transaction +handling to a `PlatformTransactionManager` instance (which can be a`HibernateTransactionManager` (for a single Hibernate `SessionFactory`) by using a`ThreadLocal` `Session` under the hood) or a `JtaTransactionManager` (delegating to the +JTA subsystem of the container) for Hibernate applications. You can even use a custom`PlatformTransactionManager` implementation. Switching from native Hibernate transaction +management to JTA (such as when facing distributed transaction requirements for certain +deployments of your application) is only a matter of configuration. You can replace +the Hibernate transaction manager with Spring’s JTA transaction implementation. Both +transaction demarcation and data access code work without changes, because they +use the generic transaction management APIs. + +For distributed transactions across multiple Hibernate session factories, you can combine`JtaTransactionManager` as a transaction strategy with multiple`LocalSessionFactoryBean` definitions. Each DAO then gets one specific `SessionFactory`reference passed into its corresponding bean property. If all underlying JDBC data +sources are transactional container ones, a business service can demarcate transactions +across any number of DAOs and any number of session factories without special regard, as +long as it uses `JtaTransactionManager` as the strategy. + +Both `HibernateTransactionManager` and `JtaTransactionManager` allow for proper +JVM-level cache handling with Hibernate, without container-specific transaction manager +lookup or a JCA connector (if you do not use EJB to initiate transactions). + +`HibernateTransactionManager` can export the Hibernate JDBC `Connection` to plain JDBC +access code for a specific `DataSource`. This ability allows for high-level +transaction demarcation with mixed Hibernate and JDBC data access completely without +JTA, provided you access only one database. `HibernateTransactionManager` automatically +exposes the Hibernate transaction as a JDBC transaction if you have set up the passed-in`SessionFactory` with a `DataSource` through the `dataSource` property of the`LocalSessionFactoryBean` class. Alternatively, you can specify explicitly the`DataSource` for which the transactions are supposed to be exposed through the`dataSource` property of the `HibernateTransactionManager` class. + +#### 5.3.6. Comparing Container-managed and Locally Defined Resources + +You can switch between a container-managed JNDI `SessionFactory` and a locally defined +one without having to change a single line of application code. Whether to keep +resource definitions in the container or locally within the application is mainly a +matter of the transaction strategy that you use. Compared to a Spring-defined local`SessionFactory`, a manually registered JNDI `SessionFactory` does not provide any +benefits. Deploying a `SessionFactory` through Hibernate’s JCA connector provides the +added value of participating in the Java EE server’s management infrastructure, but does +not add actual value beyond that. + +Spring’s transaction support is not bound to a container. When configured with any strategy +other than JTA, transaction support also works in a stand-alone or test environment. +Especially in the typical case of single-database transactions, Spring’s single-resource +local transaction support is a lightweight and powerful alternative to JTA. When you use +local EJB stateless session beans to drive transactions, you depend both on an EJB +container and on JTA, even if you access only a single database and use only stateless +session beans to provide declarative transactions through container-managed +transactions. Direct use of JTA programmatically also requires a Java EE environment. +JTA does not involve only container dependencies in terms of JTA itself and of +JNDI `DataSource` instances. For non-Spring, JTA-driven Hibernate transactions, you have +to use the Hibernate JCA connector or extra Hibernate transaction code with the`TransactionManagerLookup` configured for proper JVM-level caching. + +Spring-driven transactions can work as well with a locally defined Hibernate`SessionFactory` as they do with a local JDBC `DataSource`, provided they access a +single database. Thus, you need only use Spring’s JTA transaction strategy when you +have distributed transaction requirements. A JCA connector requires container-specific +deployment steps, and (obviously) JCA support in the first place. This configuration +requires more work than deploying a simple web application with local resource +definitions and Spring-driven transactions. Also, you often need the Enterprise Edition +of your container if you use, for example, WebLogic Express, which does not +provide JCA. A Spring application with local resources and transactions that span one +single database works in any Java EE web container (without JTA, JCA, or EJB), such as +Tomcat, Resin, or even plain Jetty. Additionally, you can easily reuse such a middle +tier in desktop applications or test suites. + +All things considered, if you do not use EJBs, stick with local `SessionFactory` setup +and Spring’s `HibernateTransactionManager` or `JtaTransactionManager`. You get all of +the benefits, including proper transactional JVM-level caching and distributed +transactions, without the inconvenience of container deployment. JNDI registration of a +Hibernate `SessionFactory` through the JCA connector adds value only when used in +conjunction with EJBs. + +#### 5.3.7. Spurious Application Server Warnings with Hibernate + +In some JTA environments with very strict `XADataSource` implementations (currently +some WebLogic Server and WebSphere versions), when Hibernate is configured without +regard to the JTA transaction manager for that environment, spurious warnings or +exceptions can show up in the application server log. These warnings or exceptions +indicate that the connection being accessed is no longer valid or JDBC access is no +longer valid, possibly because the transaction is no longer active. As an example, +here is an actual exception from WebLogic: + +``` +java.sql.SQLException: The transaction is no longer active - status: 'Committed'. No +further JDBC access is allowed within this transaction. +``` + +Another common problem is a connection leak after JTA transactions, with Hibernate +sessions (and potentially underlying JDBC connections) not getting closed properly. + +You can resolve such issues by making Hibernate aware of the JTA transaction manager, +to which it synchronizes (along with Spring). You have two options for doing this: + +* Pass your Spring `JtaTransactionManager` bean to your Hibernate setup. The easiest + way is a bean reference into the `jtaTransactionManager` property for your`LocalSessionFactoryBean` bean (see [Hibernate Transaction Setup](#transaction-strategies-hibernate)). + Spring then makes the corresponding JTA strategies available to Hibernate. + +* You may also configure Hibernate’s JTA-related properties explicitly, in particular + "hibernate.transaction.coordinator\_class", "hibernate.connection.handling\_mode" + and potentially "hibernate.transaction.jta.platform" in your "hibernateProperties" + on `LocalSessionFactoryBean` (see Hibernate’s manual for details on those properties). + +The remainder of this section describes the sequence of events that occur with and +without Hibernate’s awareness of the JTA `PlatformTransactionManager`. + +When Hibernate is not configured with any awareness of the JTA transaction manager, +the following events occur when a JTA transaction commits: + +* The JTA transaction commits. + +* Spring’s `JtaTransactionManager` is synchronized to the JTA transaction, so it is + called back through an `afterCompletion` callback by the JTA transaction manager. + +* Among other activities, this synchronization can trigger a callback by Spring to + Hibernate, through Hibernate’s `afterTransactionCompletion` callback (used to clear + the Hibernate cache), followed by an explicit `close()` call on the Hibernate session, + which causes Hibernate to attempt to `close()` the JDBC Connection. + +* In some environments, this `Connection.close()` call then triggers the warning or + error, as the application server no longer considers the `Connection` to be usable, + because the transaction has already been committed. + +When Hibernate is configured with awareness of the JTA transaction manager, +the following events occur when a JTA transaction commits: + +* The JTA transaction is ready to commit. + +* Spring’s `JtaTransactionManager` is synchronized to the JTA transaction, so the + transaction is called back through a `beforeCompletion` callback by the JTA + transaction manager. + +* Spring is aware that Hibernate itself is synchronized to the JTA transaction and + behaves differently than in the previous scenario. In particular, it aligns with + Hibernate’s transactional resource management. + +* The JTA transaction commits. + +* Hibernate is synchronized to the JTA transaction, so the transaction is called back + through an `afterCompletion` callback by the JTA transaction manager and can + properly clear its cache. + +### 5.4. JPA + +The Spring JPA, available under the `org.springframework.orm.jpa` package, offers +comprehensive support for the[Java Persistence +API](https://www.oracle.com/technetwork/articles/javaee/jpa-137156.html) in a manner similar to the integration with Hibernate while being aware of +the underlying implementation in order to provide additional features. + +#### 5.4.1. Three Options for JPA Setup in a Spring Environment + +The Spring JPA support offers three ways of setting up the JPA `EntityManagerFactory`that is used by the application to obtain an entity manager. + +* [Using `LocalEntityManagerFactoryBean`](#orm-jpa-setup-lemfb) + +* [Obtaining an EntityManagerFactory from JNDI](#orm-jpa-setup-jndi) + +* [Using `LocalContainerEntityManagerFactoryBean`](#orm-jpa-setup-lcemfb) + +##### Using `LocalEntityManagerFactoryBean` + +You can use this option only in simple deployment environments such as stand-alone +applications and integration tests. + +The `LocalEntityManagerFactoryBean` creates an `EntityManagerFactory` suitable for +simple deployment environments where the application uses only JPA for data access. +The factory bean uses the JPA `PersistenceProvider` auto-detection mechanism (according +to JPA’s Java SE bootstrapping) and, in most cases, requires you to specify only the +persistence unit name. The following XML example configures such a bean: + +``` +<beans> + <bean id="myEmf" class="org.springframework.orm.jpa.LocalEntityManagerFactoryBean"> + <property name="persistenceUnitName" value="myPersistenceUnit"/> + </bean> +</beans> +``` + +This form of JPA deployment is the simplest and the most limited. You cannot refer to an +existing JDBC `DataSource` bean definition, and no support for global transactions +exists. Furthermore, weaving (byte-code transformation) of persistent classes is +provider-specific, often requiring a specific JVM agent to be specified on startup. This +option is sufficient only for stand-alone applications and test environments, for which +the JPA specification is designed. + +##### Obtaining an EntityManagerFactory from JNDI + +You can use this option when deploying to a Java EE server. Check your server’s documentation +on how to deploy a custom JPA provider into your server, allowing for a different +provider than the server’s default. + +Obtaining an `EntityManagerFactory` from JNDI (for example in a Java EE environment), +is a matter of changing the XML configuration, as the following example shows: + +``` +<beans> + <jee:jndi-lookup id="myEmf" jndi-name="persistence/myPersistenceUnit"/> +</beans> +``` + +This action assumes standard Java EE bootstrapping. The Java EE server auto-detects +persistence units (in effect, `META-INF/persistence.xml` files in application jars) and`persistence-unit-ref` entries in the Java EE deployment descriptor (for example,`web.xml`) and defines environment naming context locations for those persistence units. + +In such a scenario, the entire persistence unit deployment, including the weaving +(byte-code transformation) of persistent classes, is up to the Java EE server. The JDBC`DataSource` is defined through a JNDI location in the `META-INF/persistence.xml` file.`EntityManager` transactions are integrated with the server’s JTA subsystem. Spring merely +uses the obtained `EntityManagerFactory`, passing it on to application objects through +dependency injection and managing transactions for the persistence unit (typically +through `JtaTransactionManager`). + +If you use multiple persistence units in the same application, the bean names of such +JNDI-retrieved persistence units should match the persistence unit names that the +application uses to refer to them (for example, in `@PersistenceUnit` and`@PersistenceContext` annotations). + +##### Using `LocalContainerEntityManagerFactoryBean` + +You can use this option for full JPA capabilities in a Spring-based application environment. +This includes web containers such as Tomcat, stand-alone applications, and +integration tests with sophisticated persistence requirements. + +| |If you want to specifically configure a Hibernate setup, an immediate alternative<br/>is to set up a native Hibernate `LocalSessionFactoryBean` instead of a plain JPA`LocalContainerEntityManagerFactoryBean`, letting it interact with JPA access code<br/>as well as native Hibernate access code.<br/>See [Native Hibernate setup for JPA interaction](#orm-jpa-hibernate) for details.| +|---|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +The `LocalContainerEntityManagerFactoryBean` gives full control over`EntityManagerFactory` configuration and is appropriate for environments where +fine-grained customization is required. The `LocalContainerEntityManagerFactoryBean`creates a `PersistenceUnitInfo` instance based on the `persistence.xml` file, the +supplied `dataSourceLookup` strategy, and the specified `loadTimeWeaver`. It is, thus, +possible to work with custom data sources outside of JNDI and to control the weaving +process. The following example shows a typical bean definition for a`LocalContainerEntityManagerFactoryBean`: + +``` +<beans> + <bean id="myEmf" class="org.springframework.orm.jpa.LocalContainerEntityManagerFactoryBean"> + <property name="dataSource" ref="someDataSource"/> + <property name="loadTimeWeaver"> + <bean class="org.springframework.instrument.classloading.InstrumentationLoadTimeWeaver"/> + </property> + </bean> +</beans> +``` + +The following example shows a typical `persistence.xml` file: + +``` +<persistence xmlns="http://java.sun.com/xml/ns/persistence" version="1.0"> + <persistence-unit name="myUnit" transaction-type="RESOURCE_LOCAL"> + <mapping-file>META-INF/orm.xml</mapping-file> + <exclude-unlisted-classes/> + </persistence-unit> +</persistence> +``` + +| |The `<exclude-unlisted-classes/>` shortcut indicates that no scanning for<br/>annotated entity classes is supposed to occur. An explicit 'true' value<br/>(`<exclude-unlisted-classes>true</exclude-unlisted-classes/>`) also means no scan.`<exclude-unlisted-classes>false</exclude-unlisted-classes/>` does trigger a scan.<br/>However, we recommend omitting the `exclude-unlisted-classes` element<br/>if you want entity class scanning to occur.| +|---|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +Using the `LocalContainerEntityManagerFactoryBean` is the most powerful JPA setup +option, allowing for flexible local configuration within the application. It supports +links to an existing JDBC `DataSource`, supports both local and global transactions, and +so on. However, it also imposes requirements on the runtime environment, such as the +availability of a weaving-capable class loader if the persistence provider demands +byte-code transformation. + +This option may conflict with the built-in JPA capabilities of a Java EE server. In a +full Java EE environment, consider obtaining your `EntityManagerFactory` from JNDI. +Alternatively, specify a custom `persistenceXmlLocation` on your`LocalContainerEntityManagerFactoryBean` definition (for example, +META-INF/my-persistence.xml) and include only a descriptor with that name in your +application jar files. Because the Java EE server looks only for default`META-INF/persistence.xml` files, it ignores such custom persistence units and, hence, +avoids conflicts with a Spring-driven JPA setup upfront. (This applies to Resin 3.1, for +example.) + +When is load-time weaving required? + +Not all JPA providers require a JVM agent. Hibernate is an example of one that does not. +If your provider does not require an agent or you have other alternatives, such as +applying enhancements at build time through a custom compiler or an Ant task, you should not use the +load-time weaver. + +The `LoadTimeWeaver` interface is a Spring-provided class that lets JPA`ClassTransformer` instances be plugged in a specific manner, depending on whether the +environment is a web container or application server. Hooking `ClassTransformers`through an[agent](https://docs.oracle.com/javase/6/docs/api/java/lang/instrument/package-summary.html)is typically not efficient. The agents work against the entire virtual machine and +inspect every class that is loaded, which is usually undesirable in a production +server environment. + +Spring provides a number of `LoadTimeWeaver` implementations for various environments, +letting `ClassTransformer` instances be applied only for each class loader and not +for each VM. + +See the [Spring configuration](core.html#aop-aj-ltw-spring) in the AOP chapter for +more insight regarding the `LoadTimeWeaver` implementations and their setup, either +generic or customized to various platforms (such as Tomcat, JBoss and WebSphere). + +As described in [Spring configuration](core.html#aop-aj-ltw-spring), you can configure +a context-wide `LoadTimeWeaver` by using the `@EnableLoadTimeWeaving` annotation or the`context:load-time-weaver` XML element. Such a global weaver is automatically picked up +by all JPA `LocalContainerEntityManagerFactoryBean` instances. The following example +shows the preferred way of setting up a load-time weaver, delivering auto-detection +of the platform (e.g. Tomcat’s weaving-capable class loader or Spring’s JVM agent) +and automatic propagation of the weaver to all weaver-aware beans: + +``` +<context:load-time-weaver/> +<bean id="emf" class="org.springframework.orm.jpa.LocalContainerEntityManagerFactoryBean"> + ... +</bean> +``` + +However, you can, if needed, manually specify a dedicated weaver through the`loadTimeWeaver` property, as the following example shows: + +``` +<bean id="emf" class="org.springframework.orm.jpa.LocalContainerEntityManagerFactoryBean"> + <property name="loadTimeWeaver"> + <bean class="org.springframework.instrument.classloading.ReflectiveLoadTimeWeaver"/> + </property> +</bean> +``` + +No matter how the LTW is configured, by using this technique, JPA applications relying on +instrumentation can run in the target platform (for example, Tomcat) without needing an agent. +This is especially important when the hosting applications rely on different JPA +implementations, because the JPA transformers are applied only at the class-loader level and +are, thus, isolated from each other. + +##### Dealing with Multiple Persistence Units + +For applications that rely on multiple persistence units locations (stored in various +JARS in the classpath, for example), Spring offers the `PersistenceUnitManager` to act as +a central repository and to avoid the persistence units discovery process, which can be +expensive. The default implementation lets multiple locations be specified. These locations are +parsed and later retrieved through the persistence unit name. (By default, the classpath +is searched for `META-INF/persistence.xml` files.) The following example configures +multiple locations: + +``` +<bean id="pum" class="org.springframework.orm.jpa.persistenceunit.DefaultPersistenceUnitManager"> + <property name="persistenceXmlLocations"> + <list> + <value>org/springframework/orm/jpa/domain/persistence-multi.xml</value> + <value>classpath:/my/package/**/custom-persistence.xml</value> + <value>classpath*:META-INF/persistence.xml</value> + </list> + </property> + <property name="dataSources"> + <map> + <entry key="localDataSource" value-ref="local-db"/> + <entry key="remoteDataSource" value-ref="remote-db"/> + </map> + </property> + <!-- if no datasource is specified, use this one --> + <property name="defaultDataSource" ref="remoteDataSource"/> +</bean> + +<bean id="emf" class="org.springframework.orm.jpa.LocalContainerEntityManagerFactoryBean"> + <property name="persistenceUnitManager" ref="pum"/> + <property name="persistenceUnitName" value="myCustomUnit"/> +</bean> +``` + +The default implementation allows customization of the `PersistenceUnitInfo` instances +(before they are fed to the JPA provider) either declaratively (through its properties, which +affect all hosted units) or programmatically (through the`PersistenceUnitPostProcessor`, which allows persistence unit selection). If no`PersistenceUnitManager` is specified, one is created and used internally by`LocalContainerEntityManagerFactoryBean`. + +##### Background Bootstrapping + +`LocalContainerEntityManagerFactoryBean` supports background bootstrapping through +the `bootstrapExecutor` property, as the following example shows: + +``` +<bean id="emf" class="org.springframework.orm.jpa.LocalContainerEntityManagerFactoryBean"> + <property name="bootstrapExecutor"> + <bean class="org.springframework.core.task.SimpleAsyncTaskExecutor"/> + </property> +</bean> +``` + +The actual JPA provider bootstrapping is handed off to the specified executor and then, +running in parallel, to the application bootstrap thread. The exposed `EntityManagerFactory`proxy can be injected into other application components and is even able to respond to`EntityManagerFactoryInfo` configuration inspection. However, once the actual JPA provider +is being accessed by other components (for example, calling `createEntityManager`), those calls +block until the background bootstrapping has completed. In particular, when you use +Spring Data JPA, make sure to set up deferred bootstrapping for its repositories as well. + +#### 5.4.2. Implementing DAOs Based on JPA: `EntityManagerFactory` and `EntityManager` + +| |Although `EntityManagerFactory` instances are thread-safe, `EntityManager` instances are<br/>not. The injected JPA `EntityManager` behaves like an `EntityManager` fetched from an<br/>application server’s JNDI environment, as defined by the JPA specification. It delegates<br/>all calls to the current transactional `EntityManager`, if any. Otherwise, it falls back<br/>to a newly created `EntityManager` per operation, in effect making its usage thread-safe.| +|---|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +It is possible to write code against the plain JPA without any Spring dependencies, by +using an injected `EntityManagerFactory` or `EntityManager`. Spring can understand the`@PersistenceUnit` and `@PersistenceContext` annotations both at the field and the method level +if a `PersistenceAnnotationBeanPostProcessor` is enabled. The following example shows a plain JPA DAO implementation +that uses the `@PersistenceUnit` annotation: + +Java + +``` +public class ProductDaoImpl implements ProductDao { + + private EntityManagerFactory emf; + + @PersistenceUnit + public void setEntityManagerFactory(EntityManagerFactory emf) { + this.emf = emf; + } + + public Collection loadProductsByCategory(String category) { + EntityManager em = this.emf.createEntityManager(); + try { + Query query = em.createQuery("from Product as p where p.category = ?1"); + query.setParameter(1, category); + return query.getResultList(); + } + finally { + if (em != null) { + em.close(); + } + } + } +} +``` + +Kotlin + +``` +class ProductDaoImpl : ProductDao { + + private lateinit var emf: EntityManagerFactory + + @PersistenceUnit + fun setEntityManagerFactory(emf: EntityManagerFactory) { + this.emf = emf + } + + fun loadProductsByCategory(category: String): Collection<*> { + val em = this.emf.createEntityManager() + val query = em.createQuery("from Product as p where p.category = ?1"); + query.setParameter(1, category); + return query.resultList; + } +} +``` + +The preceding DAO has no dependency on Spring and still fits nicely into a Spring +application context. Moreover, the DAO takes advantage of annotations to require the +injection of the default `EntityManagerFactory`, as the following example bean definition shows: + +``` +<beans> + + <!-- bean post-processor for JPA annotations --> + <bean class="org.springframework.orm.jpa.support.PersistenceAnnotationBeanPostProcessor"/> + + <bean id="myProductDao" class="product.ProductDaoImpl"/> + +</beans> +``` + +As an alternative to explicitly defining a `PersistenceAnnotationBeanPostProcessor`, +consider using the Spring `context:annotation-config` XML element in your application +context configuration. Doing so automatically registers all Spring standard +post-processors for annotation-based configuration, including`CommonAnnotationBeanPostProcessor` and so on. + +Consider the following example: + +``` +<beans> + + <!-- post-processors for all standard config annotations --> + <context:annotation-config/> + + <bean id="myProductDao" class="product.ProductDaoImpl"/> + +</beans> +``` + +The main problem with such a DAO is that it always creates a new `EntityManager` through +the factory. You can avoid this by requesting a transactional `EntityManager` (also +called a “shared EntityManager” because it is a shared, thread-safe proxy for the actual +transactional EntityManager) to be injected instead of the factory. The following example shows how to do so: + +Java + +``` +public class ProductDaoImpl implements ProductDao { + + @PersistenceContext + private EntityManager em; + + public Collection loadProductsByCategory(String category) { + Query query = em.createQuery("from Product as p where p.category = :category"); + query.setParameter("category", category); + return query.getResultList(); + } +} +``` + +Kotlin + +``` +class ProductDaoImpl : ProductDao { + + @PersistenceContext + private lateinit var em: EntityManager + + fun loadProductsByCategory(category: String): Collection<*> { + val query = em.createQuery("from Product as p where p.category = :category") + query.setParameter("category", category) + return query.resultList + } +} +``` + +The `@PersistenceContext` annotation has an optional attribute called `type`, which defaults to`PersistenceContextType.TRANSACTION`. You can use this default to receive a shared`EntityManager` proxy. The alternative, `PersistenceContextType.EXTENDED`, is a completely +different affair. This results in a so-called extended `EntityManager`, which is not +thread-safe and, hence, must not be used in a concurrently accessed component, such as a +Spring-managed singleton bean. Extended `EntityManager` instances are only supposed to be used in +stateful components that, for example, reside in a session, with the lifecycle of the`EntityManager` not tied to a current transaction but rather being completely up to the +application. + +Method- and field-level Injection + +You can apply annotations that indicate dependency injections (such as `@PersistenceUnit` and`@PersistenceContext`) on field or methods inside a class — hence the +expressions “method-level injection” and “field-level injection”. Field-level +annotations are concise and easier to use while method-level annotations allow for further +processing of the injected dependency. In both cases, the member visibility (public, +protected, or private) does not matter. + +What about class-level annotations? + +On the Java EE platform, they are used for dependency declaration and not for resource +injection. + +The injected `EntityManager` is Spring-managed (aware of the ongoing transaction). +Even though the new DAO implementation uses method-level +injection of an `EntityManager` instead of an `EntityManagerFactory`, no change is +required in the application context XML, due to annotation usage. + +The main advantage of this DAO style is that it depends only on the Java Persistence API. +No import of any Spring class is required. Moreover, as the JPA annotations are understood, +the injections are applied automatically by the Spring container. This is appealing from +a non-invasiveness perspective and can feel more natural to JPA developers. + +#### 5.4.3. Spring-driven JPA transactions + +| |We strongly encourage you to read [Declarative Transaction Management](#transaction-declarative), if you have not<br/>already done so, to get more detailed coverage of Spring’s declarative transaction support.| +|---|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +The recommended strategy for JPA is local transactions through JPA’s native transaction +support. Spring’s `JpaTransactionManager` provides many capabilities known from local +JDBC transactions (such as transaction-specific isolation levels and resource-level +read-only optimizations) against any regular JDBC connection pool (no XA requirement). + +Spring JPA also lets a configured `JpaTransactionManager` expose a JPA transaction +to JDBC access code that accesses the same `DataSource`, provided that the registered`JpaDialect` supports retrieval of the underlying JDBC `Connection`. +Spring provides dialects for the EclipseLink and Hibernate JPA implementations. +See the [next section](#orm-jpa-dialect) for details on the `JpaDialect` mechanism. + +| |As an immediate alternative, Spring’s native `HibernateTransactionManager` is capable<br/>of interacting with JPA access code, adapting to several Hibernate specifics and providing<br/>JDBC interaction. This makes particular sense in combination with `LocalSessionFactoryBean`setup. See [Native Hibernate Setup for JPA Interaction](#orm-jpa-hibernate) for details.| +|---|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +#### 5.4.4. Understanding `JpaDialect` and `JpaVendorAdapter` + +As an advanced feature, `JpaTransactionManager` and subclasses of`AbstractEntityManagerFactoryBean` allow a custom `JpaDialect` to be passed into the`jpaDialect` bean property. A `JpaDialect` implementation can enable the following advanced +features supported by Spring, usually in a vendor-specific manner: + +* Applying specific transaction semantics (such as custom isolation level or transaction + timeout) + +* Retrieving the transactional JDBC `Connection` (for exposure to JDBC-based DAOs) + +* Advanced translation of `PersistenceExceptions` to Spring `DataAccessExceptions` + +This is particularly valuable for special transaction semantics and for advanced +translation of exception. The default implementation (`DefaultJpaDialect`) does +not provide any special abilities and, if the features listed earlier are required, you have +to specify the appropriate dialect. + +| |As an even broader provider adaptation facility primarily for Spring’s full-featured`LocalContainerEntityManagerFactoryBean` setup, `JpaVendorAdapter` combines the<br/>capabilities of `JpaDialect` with other provider-specific defaults. Specifying a`HibernateJpaVendorAdapter` or `EclipseLinkJpaVendorAdapter` is the most convenient<br/>way of auto-configuring an `EntityManagerFactory` setup for Hibernate or EclipseLink,<br/>respectively. Note that those provider adapters are primarily designed for use with<br/>Spring-driven transaction management (that is, for use with `JpaTransactionManager`).| +|---|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +See the [`JpaDialect`](https://docs.spring.io/spring-framework/docs/5.3.16/javadoc-api/org/springframework/orm/jpa/JpaDialect.html) and[`JpaVendorAdapter`](https://docs.spring.io/spring-framework/docs/5.3.16/javadoc-api/org/springframework/orm/jpa/JpaVendorAdapter.html) javadoc for +more details of its operations and how they are used within Spring’s JPA support. + +#### 5.4.5. Setting up JPA with JTA Transaction Management + +As an alternative to `JpaTransactionManager`, Spring also allows for multi-resource +transaction coordination through JTA, either in a Java EE environment or with a +stand-alone transaction coordinator, such as Atomikos. Aside from choosing Spring’s`JtaTransactionManager` instead of `JpaTransactionManager`, you need to take few further +steps: + +* The underlying JDBC connection pools need to be XA-capable and be integrated with + your transaction coordinator. This is usually straightforward in a Java EE environment, + exposing a different kind of `DataSource` through JNDI. See your application server + documentation for details. Analogously, a standalone transaction coordinator usually + comes with special XA-integrated `DataSource` variants. Again, check its documentation. + +* The JPA `EntityManagerFactory` setup needs to be configured for JTA. This is + provider-specific, typically through special properties to be specified as `jpaProperties`on `LocalContainerEntityManagerFactoryBean`. In the case of Hibernate, these properties + are even version-specific. See your Hibernate documentation for details. + +* Spring’s `HibernateJpaVendorAdapter` enforces certain Spring-oriented defaults, such + as the connection release mode, `on-close`, which matches Hibernate’s own default in + Hibernate 5.0 but not any more in Hibernate 5.1+. For a JTA setup, make sure to declare + your persistence unit transaction type as "JTA". Alternatively, set Hibernate 5.2’s`hibernate.connection.handling_mode` property to`DELAYED_ACQUISITION_AND_RELEASE_AFTER_STATEMENT` to restore Hibernate’s own default. + See [Spurious Application Server Warnings with Hibernate](#orm-hibernate-invalid-jdbc-access-error) for related notes. + +* Alternatively, consider obtaining the `EntityManagerFactory` from your application + server itself (that is, through a JNDI lookup instead of a locally declared`LocalContainerEntityManagerFactoryBean`). A server-provided `EntityManagerFactory`might require special definitions in your server configuration (making the deployment + less portable) but is set up for the server’s JTA environment. + +#### 5.4.6. Native Hibernate Setup and Native Hibernate Transactions for JPA Interaction + +A native `LocalSessionFactoryBean` setup in combination with `HibernateTransactionManager`allows for interaction with `@PersistenceContext` and other JPA access code. A Hibernate`SessionFactory` natively implements JPA’s `EntityManagerFactory` interface now +and a Hibernate `Session` handle natively is a JPA `EntityManager`. +Spring’s JPA support facilities automatically detect native Hibernate sessions. + +Such native Hibernate setup can, therefore, serve as a replacement for a standard JPA`LocalContainerEntityManagerFactoryBean` and `JpaTransactionManager` combination +in many scenarios, allowing for interaction with `SessionFactory.getCurrentSession()`(and also `HibernateTemplate`) next to `@PersistenceContext EntityManager` within +the same local transaction. Such a setup also provides stronger Hibernate integration +and more configuration flexibility, because it is not constrained by JPA bootstrap contracts. + +You do not need `HibernateJpaVendorAdapter` configuration in such a scenario, +since Spring’s native Hibernate setup provides even more features +(for example, custom Hibernate Integrator setup, Hibernate 5.3 bean container integration, +and stronger optimizations for read-only transactions). Last but not least, you can also +express native Hibernate setup through `LocalSessionFactoryBuilder`, +seamlessly integrating with `@Bean` style configuration (no `FactoryBean` involved). + +| |`LocalSessionFactoryBean` and `LocalSessionFactoryBuilder` support background<br/>bootstrapping, just as the JPA `LocalContainerEntityManagerFactoryBean` does.<br/>See [Background Bootstrapping](#orm-jpa-setup-background) for an introduction.<br/><br/>On `LocalSessionFactoryBean`, this is available through the `bootstrapExecutor`property. On the programmatic `LocalSessionFactoryBuilder`, an overloaded`buildSessionFactory` method takes a bootstrap executor argument.| +|---|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +## 6. Marshalling XML by Using Object-XML Mappers + +### 6.1. Introduction + +This chapter, describes Spring’s Object-XML Mapping support. Object-XML +Mapping (O-X mapping for short) is the act of converting an XML document to and from +an object. This conversion process is also known as XML Marshalling, or XML +Serialization. This chapter uses these terms interchangeably. + +Within the field of O-X mapping, a marshaller is responsible for serializing an +object (graph) to XML. In similar fashion, an unmarshaller deserializes the XML to +an object graph. This XML can take the form of a DOM document, an input or output +stream, or a SAX handler. + +Some of the benefits of using Spring for your O/X mapping needs are: + +* [Ease of configuration](#oxm-ease-of-configuration) + +* [Consistent Interfaces](#oxm-consistent-interfaces) + +* [Consistent Exception Hierarchy](#oxm-consistent-exception-hierarchy) + +#### 6.1.1. Ease of configuration + +Spring’s bean factory makes it easy to configure marshallers, without needing to +construct JAXB context, JiBX binding factories, and so on. You can configure the marshallers +as you would any other bean in your application context. Additionally, XML namespace-based +configuration is available for a number of marshallers, making the configuration even +simpler. + +#### 6.1.2. Consistent Interfaces + +Spring’s O-X mapping operates through two global interfaces: [`Marshaller`](https://docs.spring.io/spring-framework/docs/5.3.16/javadoc-api/org/springframework/oxm/Marshaller.html) and[`Unmarshaller`](https://docs.spring.io/spring-framework/docs/5.3.16/javadoc-api/org/springframework/oxm/Unmarshaller.html). These abstractions let you switch O-X mapping frameworks +with relative ease, with little or no change required on the classes that do the +marshalling. This approach has the additional benefit of making it possible to do XML +marshalling with a mix-and-match approach (for example, some marshalling performed using JAXB +and some by XStream) in a non-intrusive fashion, letting you use the strength of each +technology. + +#### 6.1.3. Consistent Exception Hierarchy + +Spring provides a conversion from exceptions from the underlying O-X mapping tool to its +own exception hierarchy with the `XmlMappingException` as the root exception. +These runtime exceptions wrap the original exception so that no information is lost. + +### 6.2. `Marshaller` and `Unmarshaller` + +As stated in the [introduction](#oxm-introduction), a marshaller serializes an object +to XML, and an unmarshaller deserializes XML stream to an object. This section describes +the two Spring interfaces used for this purpose. + +#### 6.2.1. Understanding `Marshaller` + +Spring abstracts all marshalling operations behind the`org.springframework.oxm.Marshaller` interface, the main method of which follows: + +``` +public interface Marshaller { + + /** + * Marshal the object graph with the given root into the provided Result. + */ + void marshal(Object graph, Result result) throws XmlMappingException, IOException; +} +``` + +The `Marshaller` interface has one main method, which marshals the given object to a +given `javax.xml.transform.Result`. The result is a tagging interface that basically +represents an XML output abstraction. Concrete implementations wrap various XML +representations, as the following table indicates: + +|Result implementation| Wraps XML representation | +|---------------------|-----------------------------------------------------------| +| `DOMResult` | `org.w3c.dom.Node` | +| `SAXResult` | `org.xml.sax.ContentHandler` | +| `StreamResult` |`java.io.File`, `java.io.OutputStream`, or `java.io.Writer`| + +| |Although the `marshal()` method accepts a plain object as its first parameter, most`Marshaller` implementations cannot handle arbitrary objects. Instead, an object class<br/>must be mapped in a mapping file, be marked with an annotation, be registered with the<br/>marshaller, or have a common base class. Refer to the later sections in this chapter<br/>to determine how your O-X technology manages this.| +|---|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +#### 6.2.2. Understanding `Unmarshaller` + +Similar to the `Marshaller`, we have the `org.springframework.oxm.Unmarshaller`interface, which the following listing shows: + +``` +public interface Unmarshaller { + + /** + * Unmarshal the given provided Source into an object graph. + */ + Object unmarshal(Source source) throws XmlMappingException, IOException; +} +``` + +This interface also has one method, which reads from the given`javax.xml.transform.Source` (an XML input abstraction) and returns the object read. As +with `Result`, `Source` is a tagging interface that has three concrete implementations. Each +wraps a different XML representation, as the following table indicates: + +|Source implementation| Wraps XML representation | +|---------------------|----------------------------------------------------------| +| `DOMSource` | `org.w3c.dom.Node` | +| `SAXSource` | `org.xml.sax.InputSource`, and `org.xml.sax.XMLReader` | +| `StreamSource` |`java.io.File`, `java.io.InputStream`, or `java.io.Reader`| + +Even though there are two separate marshalling interfaces (`Marshaller` and`Unmarshaller`), all implementations in Spring-WS implement both in one class. +This means that you can wire up one marshaller class and refer to it both as a +marshaller and as an unmarshaller in your `applicationContext.xml`. + +#### 6.2.3. Understanding `XmlMappingException` + +Spring converts exceptions from the underlying O-X mapping tool to its own exception +hierarchy with the `XmlMappingException` as the root exception. +These runtime exceptions wrap the original exception so that no information will be lost. + +Additionally, the `MarshallingFailureException` and `UnmarshallingFailureException`provide a distinction between marshalling and unmarshalling operations, even though the +underlying O-X mapping tool does not do so. + +The O-X Mapping exception hierarchy is shown in the following figure: + +![oxm exceptions](images/oxm-exceptions.png) + +### 6.3. Using `Marshaller` and `Unmarshaller` + +You can use Spring’s OXM for a wide variety of situations. In the following example, we +use it to marshal the settings of a Spring-managed application as an XML file. In the following example, we +use a simple JavaBean to represent the settings: + +Java + +``` +public class Settings { + + private boolean fooEnabled; + + public boolean isFooEnabled() { + return fooEnabled; + } + + public void setFooEnabled(boolean fooEnabled) { + this.fooEnabled = fooEnabled; + } +} +``` + +Kotlin + +``` +class Settings { + var isFooEnabled: Boolean = false +} +``` + +The application class uses this bean to store its settings. Besides a main method, the +class has two methods: `saveSettings()` saves the settings bean to a file named`settings.xml`, and `loadSettings()` loads these settings again. The following `main()` method +constructs a Spring application context and calls these two methods: + +Java + +``` +import java.io.FileInputStream; +import java.io.FileOutputStream; +import java.io.IOException; +import javax.xml.transform.stream.StreamResult; +import javax.xml.transform.stream.StreamSource; +import org.springframework.context.ApplicationContext; +import org.springframework.context.support.ClassPathXmlApplicationContext; +import org.springframework.oxm.Marshaller; +import org.springframework.oxm.Unmarshaller; + +public class Application { + + private static final String FILE_NAME = "settings.xml"; + private Settings settings = new Settings(); + private Marshaller marshaller; + private Unmarshaller unmarshaller; + + public void setMarshaller(Marshaller marshaller) { + this.marshaller = marshaller; + } + + public void setUnmarshaller(Unmarshaller unmarshaller) { + this.unmarshaller = unmarshaller; + } + + public void saveSettings() throws IOException { + try (FileOutputStream os = new FileOutputStream(FILE_NAME)) { + this.marshaller.marshal(settings, new StreamResult(os)); + } + } + + public void loadSettings() throws IOException { + try (FileInputStream is = new FileInputStream(FILE_NAME)) { + this.settings = (Settings) this.unmarshaller.unmarshal(new StreamSource(is)); + } + } + + public static void main(String[] args) throws IOException { + ApplicationContext appContext = + new ClassPathXmlApplicationContext("applicationContext.xml"); + Application application = (Application) appContext.getBean("application"); + application.saveSettings(); + application.loadSettings(); + } +} +``` + +Kotlin + +``` +class Application { + + lateinit var marshaller: Marshaller + + lateinit var unmarshaller: Unmarshaller + + fun saveSettings() { + FileOutputStream(FILE_NAME).use { outputStream -> marshaller.marshal(settings, StreamResult(outputStream)) } + } + + fun loadSettings() { + FileInputStream(FILE_NAME).use { inputStream -> settings = unmarshaller.unmarshal(StreamSource(inputStream)) as Settings } + } +} + +private const val FILE_NAME = "settings.xml" + +fun main(args: Array<String>) { + val appContext = ClassPathXmlApplicationContext("applicationContext.xml") + val application = appContext.getBean("application") as Application + application.saveSettings() + application.loadSettings() +} +``` + +The `Application` requires both a `marshaller` and an `unmarshaller` property to be set. We +can do so by using the following `applicationContext.xml`: + +``` +<beans> + <bean id="application" class="Application"> + <property name="marshaller" ref="xstreamMarshaller" /> + <property name="unmarshaller" ref="xstreamMarshaller" /> + </bean> + <bean id="xstreamMarshaller" class="org.springframework.oxm.xstream.XStreamMarshaller"/> +</beans> +``` + +This application context uses XStream, but we could have used any of the other marshaller +instances described later in this chapter. Note that, by default, XStream does not require +any further configuration, so the bean definition is rather simple. Also note that the`XStreamMarshaller` implements both `Marshaller` and `Unmarshaller`, so we can refer to the`xstreamMarshaller` bean in both the `marshaller` and `unmarshaller` property of the +application. + +This sample application produces the following `settings.xml` file: + +``` +<?xml version="1.0" encoding="UTF-8"?> +<settings foo-enabled="false"/> +``` + +### 6.4. XML Configuration Namespace + +You can configure marshallers more concisely by using tags from the OXM namespace. +To make these tags available, you must first reference the appropriate schema in the +preamble of the XML configuration file. The following example shows how to do so: + +``` +<?xml version="1.0" encoding="UTF-8"?> +<beans xmlns="http://www.springframework.org/schema/beans" + xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" + xmlns:oxm="http://www.springframework.org/schema/oxm" (1) +xsi:schemaLocation="http://www.springframework.org/schema/beans + https://www.springframework.org/schema/beans/spring-beans.xsd + http://www.springframework.org/schema/oxm https://www.springframework.org/schema/oxm/spring-oxm.xsd"> (2) +``` + +|**1**| Reference the `oxm` schema. | +|-----|----------------------------------| +|**2**|Specify the `oxm` schema location.| + +The schema makes the following elements available: + +* [`jaxb2-marshaller`](#oxm-jaxb2-xsd) + +* [`jibx-marshaller`](#oxm-jibx-xsd) + +Each tag is explained in its respective marshaller’s section. As an example, though, +the configuration of a JAXB2 marshaller might resemble the following: + +``` +<oxm:jaxb2-marshaller id="marshaller" contextPath="org.springframework.ws.samples.airline.schema"/> +``` + +### 6.5. JAXB + +The JAXB binding compiler translates a W3C XML Schema into one or more Java classes, a`jaxb.properties` file, and possibly some resource files. JAXB also offers a way to +generate a schema from annotated Java classes. + +Spring supports the JAXB 2.0 API as XML marshalling strategies, following the`Marshaller` and `Unmarshaller` interfaces described in [`Marshaller` and `Unmarshaller`](#oxm-marshaller-unmarshaller). +The corresponding integration classes reside in the `org.springframework.oxm.jaxb`package. + +#### 6.5.1. Using `Jaxb2Marshaller` + +The `Jaxb2Marshaller` class implements both of Spring’s `Marshaller` and `Unmarshaller`interfaces. It requires a context path to operate. You can set the context path by setting the`contextPath` property. The context path is a list of colon-separated Java package +names that contain schema derived classes. It also offers a `classesToBeBound` property, +which allows you to set an array of classes to be supported by the marshaller. Schema +validation is performed by specifying one or more schema resources to the bean, as the following example shows: + +``` +<beans> + <bean id="jaxb2Marshaller" class="org.springframework.oxm.jaxb.Jaxb2Marshaller"> + <property name="classesToBeBound"> + <list> + <value>org.springframework.oxm.jaxb.Flight</value> + <value>org.springframework.oxm.jaxb.Flights</value> + </list> + </property> + <property name="schema" value="classpath:org/springframework/oxm/schema.xsd"/> + </bean> + + ... + +</beans> +``` + +##### XML Configuration Namespace + +The `jaxb2-marshaller` element configures a `org.springframework.oxm.jaxb.Jaxb2Marshaller`, +as the following example shows: + +``` +<oxm:jaxb2-marshaller id="marshaller" contextPath="org.springframework.ws.samples.airline.schema"/> +``` + +Alternatively, you can provide the list of classes to bind to the marshaller by using the`class-to-be-bound` child element: + +``` +<oxm:jaxb2-marshaller id="marshaller"> + <oxm:class-to-be-bound name="org.springframework.ws.samples.airline.schema.Airport"/> + <oxm:class-to-be-bound name="org.springframework.ws.samples.airline.schema.Flight"/> + ... +</oxm:jaxb2-marshaller> +``` + +The following table describes the available attributes: + +| Attribute | Description |Required| +|-------------|------------------------|--------| +| `id` |The ID of the marshaller| No | +|`contextPath`| The JAXB Context path | No | + +### 6.6. JiBX + +The JiBX framework offers a solution similar to that which Hibernate provides for ORM: A +binding definition defines the rules for how your Java objects are converted to or from +XML. After preparing the binding and compiling the classes, a JiBX binding compiler +enhances the class files and adds code to handle converting instances of the classes +from or to XML. + +For more information on JiBX, see the [JiBX web +site](http://jibx.sourceforge.net/). The Spring integration classes reside in the `org.springframework.oxm.jibx`package. + +#### 6.6.1. Using `JibxMarshaller` + +The `JibxMarshaller` class implements both the `Marshaller` and `Unmarshaller`interface. To operate, it requires the name of the class to marshal in, which you can +set using the `targetClass` property. Optionally, you can set the binding name by setting the`bindingName` property. In the following example, we bind the `Flights` class: + +``` +<beans> + <bean id="jibxFlightsMarshaller" class="org.springframework.oxm.jibx.JibxMarshaller"> + <property name="targetClass">org.springframework.oxm.jibx.Flights</property> + </bean> + ... +</beans> +``` + +A `JibxMarshaller` is configured for a single class. If you want to marshal multiple +classes, you have to configure multiple `JibxMarshaller` instances with different `targetClass`property values. + +##### XML Configuration Namespace + +The `jibx-marshaller` tag configures a `org.springframework.oxm.jibx.JibxMarshaller`, +as the following example shows: + +``` +<oxm:jibx-marshaller id="marshaller" target-class="org.springframework.ws.samples.airline.schema.Flight"/> +``` + +The following table describes the available attributes: + +| Attribute | Description |Required| +|--------------|----------------------------------------|--------| +| `id` | The ID of the marshaller | No | +|`target-class`| The target class for this marshaller | Yes | +|`bindingName` |The binding name used by this marshaller| No | + +### 6.7. XStream + +XStream is a simple library to serialize objects to XML and back again. It does not +require any mapping and generates clean XML. + +For more information on XStream, see the [XStream +web site](https://x-stream.github.io/). The Spring integration classes reside in the`org.springframework.oxm.xstream` package. + +#### 6.7.1. Using `XStreamMarshaller` + +The `XStreamMarshaller` does not require any configuration and can be configured in an +application context directly. To further customize the XML, you can set an alias map, +which consists of string aliases mapped to classes, as the following example shows: + +``` +<beans> + <bean id="xstreamMarshaller" class="org.springframework.oxm.xstream.XStreamMarshaller"> + <property name="aliases"> + <props> + <prop key="Flight">org.springframework.oxm.xstream.Flight</prop> + </props> + </property> + </bean> + ... +</beans> +``` + +| |By default, XStream lets arbitrary classes be unmarshalled, which can lead to<br/>unsafe Java serialization effects. As such, we do not recommend using the`XStreamMarshaller` to unmarshal XML from external sources (that is, the Web), as this can<br/>result in security vulnerabilities.<br/><br/>If you choose to use the `XStreamMarshaller` to unmarshal XML from an external source,<br/>set the `supportedClasses` property on the `XStreamMarshaller`, as the following example shows:<br/><br/>```<br/><bean id="xstreamMarshaller" class="org.springframework.oxm.xstream.XStreamMarshaller"><br/> <property name="supportedClasses" value="org.springframework.oxm.xstream.Flight"/><br/> ...<br/></bean><br/>```<br/><br/>Doing so ensures that only the registered classes are eligible for unmarshalling.<br/><br/>Additionally, you can register[custom<br/>converters](https://docs.spring.io/spring-framework/docs/5.3.16/javadoc-api/org/springframework/oxm/xstream/XStreamMarshaller.html#setConverters(com.thoughtworks.xstream.converters.ConverterMatcher…​)) to make sure that only your supported classes can be unmarshalled. You might<br/>want to add a `CatchAllConverter` as the last converter in the list, in addition to<br/>converters that explicitly support the domain classes that should be supported. As a<br/>result, default XStream converters with lower priorities and possible security<br/>vulnerabilities do not get invoked.| +|---|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +| |Note that XStream is an XML serialization library, not a data binding library.<br/>Therefore, it has limited namespace support. As a result, it is rather unsuitable for usage<br/>within Web Services.| +|---|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +## 7. Appendix + +### 7.1. XML Schemas + +This part of the appendix lists XML schemas for data access, including the following: + +* [The `tx` Schema](#xsd-schemas-tx) + +* [The `jdbc` Schema](#xsd-schemas-jdbc) + +#### 7.1.1. The `tx` Schema + +The `tx` tags deal with configuring all of those beans in Spring’s comprehensive support +for transactions. These tags are covered in the chapter entitled[Transaction Management](#transaction). + +| |We strongly encourage you to look at the `'spring-tx.xsd'` file that ships with the<br/>Spring distribution. This file contains the XML Schema for Spring’s transaction<br/>configuration and covers all of the various elements in the `tx` namespace, including<br/>attribute defaults and similar information. This file is documented inline, and, thus,<br/>the information is not repeated here in the interests of adhering to the DRY (Don’t<br/>Repeat Yourself) principle.| +|---|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +In the interest of completeness, to use the elements in the `tx` schema, you need to have +the following preamble at the top of your Spring XML configuration file. The text in the +following snippet references the correct schema so that the tags in the `tx` namespace +are available to you: + +``` +<?xml version="1.0" encoding="UTF-8"?> +<beans xmlns="http://www.springframework.org/schema/beans" + xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" + xmlns:aop="http://www.springframework.org/schema/aop" + xmlns:tx="http://www.springframework.org/schema/tx" (1) + xsi:schemaLocation=" + http://www.springframework.org/schema/beans https://www.springframework.org/schema/beans/spring-beans.xsd + http://www.springframework.org/schema/tx https://www.springframework.org/schema/tx/spring-tx.xsd (2) + http://www.springframework.org/schema/aop https://www.springframework.org/schema/aop/spring-aop.xsd"> + + <!-- bean definitions here --> + +</beans> +``` + +|**1**| Declare usage of the `tx` namespace. | +|-----|---------------------------------------------------| +|**2**|Specify the location (with other schema locations).| + +| |Often, when you use the elements in the `tx` namespace, you are also using the<br/>elements from the `aop` namespace (since the declarative transaction support in Spring is<br/>implemented by using AOP). The preceding XML snippet contains the relevant lines needed<br/>to reference the `aop` schema so that the elements in the `aop` namespace are available<br/>to you.| +|---|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +#### 7.1.2. The `jdbc` Schema + +The `jdbc` elements let you quickly configure an embedded database or initialize an +existing data source. These elements are documented in[Embedded Database Support](#jdbc-embedded-database-support) and[Initializing a DataSource](#jdbc-initializing-datasource), respectively. + +To use the elements in the `jdbc` schema, you need to have the following preamble at the +top of your Spring XML configuration file. The text in the following snippet references +the correct schema so that the elements in the `jdbc` namespace are available to you: + +``` +<?xml version="1.0" encoding="UTF-8"?> +<beans xmlns="http://www.springframework.org/schema/beans" + xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" + xmlns:jdbc="http://www.springframework.org/schema/jdbc" (1) + xsi:schemaLocation=" + http://www.springframework.org/schema/beans https://www.springframework.org/schema/beans/spring-beans.xsd + http://www.springframework.org/schema/jdbc https://www.springframework.org/schema/jdbc/spring-jdbc.xsd"> (2) + + <!-- bean definitions here --> + +</beans> +``` diff --git a/docs/en/spring-framework/integration.md b/docs/en/spring-framework/integration.md new file mode 100644 index 0000000000000000000000000000000000000000..f394e20e3fc6729100e0aeb7356cdc3bde431ceb --- /dev/null +++ b/docs/en/spring-framework/integration.md @@ -0,0 +1,5545 @@ +# Integration + +This part of the reference documentation covers Spring Framework’s integration with +a number of technologies. + +## 1. REST Endpoints + +The Spring Framework provides two choices for making calls to REST endpoints: + +* [`RestTemplate`](#rest-resttemplate): The original Spring REST client with a synchronous, template + method API. + +* [WebClient](web-reactive.html#webflux-client): a non-blocking, reactive alternative + that supports both synchronous and asynchronous as well as streaming scenarios. + +| |As of 5.0 the `RestTemplate` is in maintenance mode, with only minor requests for<br/>changes and bugs to be accepted going forward. Please, consider using the[WebClient](web-reactive.html#webflux-client) which offers a more modern API and<br/>supports sync, async, and streaming scenarios.| +|---|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +### 1.1. `RestTemplate` + +The `RestTemplate` provides a higher level API over HTTP client libraries. It makes it +easy to invoke REST endpoints in a single line. It exposes the following groups of +overloaded methods: + +| Method group | Description | +|-----------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| `getForObject` | Retrieves a representation via GET. | +| `getForEntity` | Retrieves a `ResponseEntity` (that is, status, headers, and body) by using GET. | +|`headForHeaders` | Retrieves all headers for a resource by using HEAD. | +|`postForLocation`| Creates a new resource by using POST and returns the `Location` header from the response. | +| `postForObject` | Creates a new resource by using POST and returns the representation from the response. | +| `postForEntity` | Creates a new resource by using POST and returns the representation from the response. | +| `put` | Creates or updates a resource by using PUT. | +|`patchForObject` | Updates a resource by using PATCH and returns the representation from the response.<br/>Note that the JDK `HttpURLConnection` does not support `PATCH`, but Apache<br/>HttpComponents and others do. | +| `delete` | Deletes the resources at the specified URI by using DELETE. | +|`optionsForAllow`| Retrieves allowed HTTP methods for a resource by using ALLOW. | +| `exchange` |More generalized (and less opinionated) version of the preceding methods that provides extra<br/>flexibility when needed. It accepts a `RequestEntity` (including HTTP method, URL, headers,<br/>and body as input) and returns a `ResponseEntity`.<br/><br/>These methods allow the use of `ParameterizedTypeReference` instead of `Class` to specify<br/>a response type with generics.| +| `execute` | The most generalized way to perform a request, with full control over request<br/>preparation and response extraction through callback interfaces. | + +#### 1.1.1. Initialization + +The default constructor uses `java.net.HttpURLConnection` to perform requests. You can +switch to a different HTTP library with an implementation of `ClientHttpRequestFactory`. +There is built-in support for the following: + +* Apache HttpComponents + +* Netty + +* OkHttp + +For example, to switch to Apache HttpComponents, you can use the following: + +``` +RestTemplate template = new RestTemplate(new HttpComponentsClientHttpRequestFactory()); +``` + +Each `ClientHttpRequestFactory` exposes configuration options specific to the underlying +HTTP client library — for example, for credentials, connection pooling, and other details. + +| |Note that the `java.net` implementation for HTTP requests can raise an exception when<br/>accessing the status of a response that represents an error (such as 401). If this is an<br/>issue, switch to another HTTP client library.| +|---|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +##### URIs + +Many of the `RestTemplate` methods accept a URI template and URI template variables, +either as a `String` variable argument, or as `Map<String,String>`. + +The following example uses a `String` variable argument: + +``` +String result = restTemplate.getForObject( + "https://example.com/hotels/{hotel}/bookings/{booking}", String.class, "42", "21"); +``` + +The following example uses a `Map<String, String>`: + +``` +Map<String, String> vars = Collections.singletonMap("hotel", "42"); + +String result = restTemplate.getForObject( + "https://example.com/hotels/{hotel}/rooms/{hotel}", String.class, vars); +``` + +Keep in mind URI templates are automatically encoded, as the following example shows: + +``` +restTemplate.getForObject("https://example.com/hotel list", String.class); + +// Results in request to "https://example.com/hotel%20list" +``` + +You can use the `uriTemplateHandler` property of `RestTemplate` to customize how URIs +are encoded. Alternatively, you can prepare a `java.net.URI` and pass it into one of +the `RestTemplate` methods that accepts a `URI`. + +For more details on working with and encoding URIs, see [URI Links](web.html#mvc-uri-building). + +##### Headers + +You can use the `exchange()` methods to specify request headers, as the following example shows: + +``` +String uriTemplate = "https://example.com/hotels/{hotel}"; +URI uri = UriComponentsBuilder.fromUriString(uriTemplate).build(42); + +RequestEntity<Void> requestEntity = RequestEntity.get(uri) + .header("MyRequestHeader", "MyValue") + .build(); + +ResponseEntity<String> response = template.exchange(requestEntity, String.class); + +String responseHeader = response.getHeaders().getFirst("MyResponseHeader"); +String body = response.getBody(); +``` + +You can obtain response headers through many `RestTemplate` method variants that return`ResponseEntity`. + +#### 1.1.2. Body + +Objects passed into and returned from `RestTemplate` methods are converted to and from raw +content with the help of an `HttpMessageConverter`. + +On a POST, an input object is serialized to the request body, as the following example shows: + +``` +URI location = template.postForLocation("https://example.com/people", person); +``` + +You need not explicitly set the Content-Type header of the request. In most cases, +you can find a compatible message converter based on the source `Object` type, and the chosen +message converter sets the content type accordingly. If necessary, you can use the`exchange` methods to explicitly provide the `Content-Type` request header, and that, in +turn, influences what message converter is selected. + +On a GET, the body of the response is deserialized to an output `Object`, as the following example shows: + +``` +Person person = restTemplate.getForObject("https://example.com/people/{id}", Person.class, 42); +``` + +The `Accept` header of the request does not need to be explicitly set. In most cases, +a compatible message converter can be found based on the expected response type, which +then helps to populate the `Accept` header. If necessary, you can use the `exchange`methods to provide the `Accept` header explicitly. + +By default, `RestTemplate` registers all built-in[message converters](#rest-message-conversion), depending on classpath checks that help +to determine what optional conversion libraries are present. You can also set the message +converters to use explicitly. + +#### 1.1.3. Message Conversion + +[WebFlux](web-reactive.html#webflux-codecs) + +The `spring-web` module contains the `HttpMessageConverter` contract for reading and +writing the body of HTTP requests and responses through `InputStream` and `OutputStream`.`HttpMessageConverter` instances are used on the client side (for example, in the `RestTemplate`) and +on the server side (for example, in Spring MVC REST controllers). + +Concrete implementations for the main media (MIME) types are provided in the framework +and are, by default, registered with the `RestTemplate` on the client side and with`RequestMethodHandlerAdapter` on the server side (see[Configuring Message Converters](web.html#mvc-config-message-converters)). + +The implementations of `HttpMessageConverter` are described in the following sections. +For all converters, a default media type is used, but you can override it by setting the`supportedMediaTypes` bean property. The following table describes each implementation: + +| MessageConverter | Description | +|----------------------------------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| `StringHttpMessageConverter` | An `HttpMessageConverter` implementation that can read and write `String` instances from the HTTP<br/>request and response. By default, this converter supports all text media types<br/>(`text/*`) and writes with a `Content-Type` of `text/plain`. | +| `FormHttpMessageConverter` |An `HttpMessageConverter` implementation that can read and write form data from the HTTP<br/>request and response. By default, this converter reads and writes the`application/x-www-form-urlencoded` media type. Form data is read from and written into a`MultiValueMap<String, String>`. The converter can also write (but not read) multipart<br/>data read from a `MultiValueMap<String, Object>`. By default, `multipart/form-data` is<br/>supported. As of Spring Framework 5.2, additional multipart subtypes can be supported for<br/>writing form data. Consult the javadoc for `FormHttpMessageConverter` for further details.| +| `ByteArrayHttpMessageConverter` | An `HttpMessageConverter` implementation that can read and write byte arrays from the<br/>HTTP request and response. By default, this converter supports all media types (`*/*`)<br/>and writes with a `Content-Type` of `application/octet-stream`. You can override this<br/>by setting the `supportedMediaTypes` property and overriding `getContentType(byte[])`. | +| `MarshallingHttpMessageConverter` | An `HttpMessageConverter` implementation that can read and write XML by using Spring’s`Marshaller` and `Unmarshaller` abstractions from the `org.springframework.oxm` package.<br/>This converter requires a `Marshaller` and `Unmarshaller` before it can be used. You can inject these<br/>through constructor or bean properties. By default, this converter supports`text/xml` and `application/xml`. | +| `MappingJackson2HttpMessageConverter` | An `HttpMessageConverter` implementation that can read and write JSON by using Jackson’s`ObjectMapper`. You can customize JSON mapping as needed through the use of Jackson’s<br/>provided annotations. When you need further control (for cases where custom JSON<br/>serializers/deserializers need to be provided for specific types), you can inject a custom `ObjectMapper`through the `ObjectMapper` property. By default, this<br/>converter supports `application/json`. | +|`MappingJackson2XmlHttpMessageConverter`| An `HttpMessageConverter` implementation that can read and write XML by using[Jackson XML](https://github.com/FasterXML/jackson-dataformat-xml) extension’s`XmlMapper`. You can customize XML mapping as needed through the use of JAXB<br/>or Jackson’s provided annotations. When you need further control (for cases where custom XML<br/>serializers/deserializers need to be provided for specific types), you can inject a custom `XmlMapper`through the `ObjectMapper` property. By default, this<br/>converter supports `application/xml`. | +| `SourceHttpMessageConverter` | An `HttpMessageConverter` implementation that can read and write`javax.xml.transform.Source` from the HTTP request and response. Only `DOMSource`,`SAXSource`, and `StreamSource` are supported. By default, this converter supports`text/xml` and `application/xml`. | +| `BufferedImageHttpMessageConverter` | An `HttpMessageConverter` implementation that can read and write`java.awt.image.BufferedImage` from the HTTP request and response. This converter reads<br/>and writes the media type supported by the Java I/O API. | + +#### 1.1.4. Jackson JSON Views + +You can specify a [Jackson JSON View](https://www.baeldung.com/jackson-json-view-annotation)to serialize only a subset of the object properties, as the following example shows: + +``` +MappingJacksonValue value = new MappingJacksonValue(new User("eric", "7!jd#h23")); +value.setSerializationView(User.WithoutPasswordView.class); + +RequestEntity<MappingJacksonValue> requestEntity = + RequestEntity.post(new URI("https://example.com/user")).body(value); + +ResponseEntity<String> response = template.exchange(requestEntity, String.class); +``` + +##### Multipart + +To send multipart data, you need to provide a `MultiValueMap<String, Object>` whose values +may be an `Object` for part content, a `Resource` for a file part, or an `HttpEntity` for +part content with headers. For example: + +``` +MultiValueMap<String, Object> parts = new LinkedMultiValueMap<>(); + +parts.add("fieldPart", "fieldValue"); +parts.add("filePart", new FileSystemResource("...logo.png")); +parts.add("jsonPart", new Person("Jason")); + +HttpHeaders headers = new HttpHeaders(); +headers.setContentType(MediaType.APPLICATION_XML); +parts.add("xmlPart", new HttpEntity<>(myBean, headers)); +``` + +In most cases, you do not have to specify the `Content-Type` for each part. The content +type is determined automatically based on the `HttpMessageConverter` chosen to serialize +it or, in the case of a `Resource` based on the file extension. If necessary, you can +explicitly provide the `MediaType` with an `HttpEntity` wrapper. + +Once the `MultiValueMap` is ready, you can pass it to the `RestTemplate`, as show below: + +``` +MultiValueMap<String, Object> parts = ...; +template.postForObject("https://example.com/upload", parts, Void.class); +``` + +If the `MultiValueMap` contains at least one non-`String` value, the `Content-Type` is set +to `multipart/form-data` by the `FormHttpMessageConverter`. If the `MultiValueMap` has`String` values the `Content-Type` is defaulted to `application/x-www-form-urlencoded`. +If necessary the `Content-Type` may also be set explicitly. + +### 1.2. Using `AsyncRestTemplate` (Deprecated) + +The `AsyncRestTemplate` is deprecated. For all use cases where you might consider using`AsyncRestTemplate`, use the [WebClient](web-reactive.html#webflux-client) instead. + +## 2. Remoting and Web Services + +Spring provides support for remoting with various technologies. +The remoting support eases the development of remote-enabled services, implemented +via Java interfaces and objects as input and output. Currently, Spring supports the +following remoting technologies: + +* [Java Web Services](#remoting-web-services): Spring provides remoting support for web services through JAX-WS. + +* [AMQP](#remoting-amqp): Remoting via AMQP as the underlying protocol is supported by the + separate Spring AMQP project. + +| |As of Spring Framework 5.3, support for several remoting technologies is now deprecated<br/>for security reasons and broader industry support. Supporting infrastructure will be removed<br/>from Spring Framework for its next major release.| +|---|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +The following remoting technologies are now deprecated and will not be replaced: + +* [Remote Method Invocation (RMI)](#remoting-rmi): Through the use of `RmiProxyFactoryBean` and`RmiServiceExporter`, Spring supports both traditional RMI (with `java.rmi.Remote`interfaces and `java.rmi.RemoteException`) and transparent remoting through RMI + invokers (with any Java interface). + +* [Spring HTTP Invoker (Deprecated)](#remoting-httpinvoker): Spring provides a special remoting strategy that allows + for Java serialization though HTTP, supporting any Java interface (as the RMI + invoker does). The corresponding support classes are `HttpInvokerProxyFactoryBean`and `HttpInvokerServiceExporter`. + +* [Hessian](#remoting-caucho-protocols-hessian): By using Spring’s `HessianProxyFactoryBean` and the`HessianServiceExporter`, you can transparently expose your services through the + lightweight binary HTTP-based protocol provided by Caucho. + +* [JMS (Deprecated)](#remoting-jms): Remoting via JMS as the underlying protocol is supported through the`JmsInvokerServiceExporter` and `JmsInvokerProxyFactoryBean` classes in the`spring-jms` module. + +While discussing the remoting capabilities of Spring, we use the following domain +model and corresponding services: + +``` +public class Account implements Serializable { + + private String name; + + public String getName(){ + return name; + } + + public void setName(String name) { + this.name = name; + } +} +``` + +``` +public interface AccountService { + + public void insertAccount(Account account); + + public List<Account> getAccounts(String name); +} +``` + +``` +// the implementation doing nothing at the moment +public class AccountServiceImpl implements AccountService { + + public void insertAccount(Account acc) { + // do something... + } + + public List<Account> getAccounts(String name) { + // do something... + } +} +``` + +This section starts by exposing the service to a remote client by using RMI and talk a bit +about the drawbacks of using RMI. It then continues with an example that uses Hessian as +the protocol. + +### 2.1. AMQP + +Remoting via AMQP as the underlying protocol is supported in the Spring AMQP project. +For further details please visit the [Spring Remoting](https://docs.spring.io/spring-amqp/docs/current/reference/html/#remoting)section of the Spring AMQP reference. + +| |Auto-detection is not implemented for remote interfaces.<br/><br/>The main reason why auto-detection of implemented interfaces does not occur for remote<br/>interfaces is to avoid opening too many doors to remote callers. The target object might<br/>implement internal callback interfaces, such as `InitializingBean` or `DisposableBean`which one would not want to expose to callers.<br/><br/>Offering a proxy with all interfaces implemented by the target usually does not matter<br/>in the local case. However, when you export a remote service, you should expose a specific<br/>service interface, with specific operations intended for remote usage. Besides internal<br/>callback interfaces, the target might implement multiple business interfaces, with only<br/>one of them intended for remote exposure. For these reasons, we require such a<br/>service interface to be specified.<br/><br/>This is a trade-off between configuration convenience and the risk of accidental<br/>exposure of internal methods. Always specifying a service interface is not too much<br/>effort and puts you on the safe side regarding controlled exposure of specific methods.| +|---|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +### 2.2. Considerations when Choosing a Technology + +Each and every technology presented here has its drawbacks. When choosing a technology, +you should carefully consider your needs, the services you expose, and the objects you +send over the wire. + +When using RMI, you cannot access the objects through the HTTP protocol, +unless you tunnel the RMI traffic. RMI is a fairly heavy-weight protocol, in that it +supports full-object serialization, which is important when you use a complex data model +that needs serialization over the wire. However, RMI-JRMP is tied to Java clients. It is +a Java-to-Java remoting solution. + +Spring’s HTTP invoker is a good choice if you need HTTP-based remoting but also rely on +Java serialization. It shares the basic infrastructure with RMI invokers but uses +HTTP as transport. Note that HTTP invokers are not limited only to Java-to-Java remoting +but also to Spring on both the client and the server side. (The latter also applies to +Spring’s RMI invoker for non-RMI interfaces.) + +Hessian might provide significant value when operating in a heterogeneous environment, +because they explicitly allow for non-Java clients. However, non-Java support is still +limited. Known issues include the serialization of Hibernate objects in combination with +lazily-initialized collections. If you have such a data model, consider using RMI or +HTTP invokers instead of Hessian. + +JMS can be useful for providing clusters of services and letting the JMS broker take +care of load balancing, discovery, and auto-failover. By default, Java serialization is +used for JMS remoting, but the JMS provider could use a different mechanism for +the wire formatting, such as XStream to let servers be implemented in other +technologies. + +Last but not least, EJB has an advantage over RMI, in that it supports standard +role-based authentication and authorization and remote transaction propagation. It is +possible to get RMI invokers or HTTP invokers to support security context propagation as +well, although this is not provided by core Spring. Spring offers only appropriate hooks +for plugging in third-party or custom solutions. + +### 2.3. Java Web Services + +Spring provides full support for the standard Java web services APIs: + +* Exposing web services using JAX-WS + +* Accessing web services using JAX-WS + +In addition to stock support for JAX-WS in Spring Core, the Spring portfolio also +features [Spring Web Services](https://projects.spring.io/spring-ws), which is a solution for +contract-first, document-driven web services — highly recommended for building modern, +future-proof web services. + +#### 2.3.1. Exposing Servlet-based Web Services by Using JAX-WS + +Spring provides a convenient base class for JAX-WS servlet endpoint implementations:`SpringBeanAutowiringSupport`. To expose our `AccountService`, we extend Spring’s`SpringBeanAutowiringSupport` class and implement our business logic here, usually +delegating the call to the business layer. We use Spring’s `@Autowired`annotation to express such dependencies on Spring-managed beans. The following example +shows our class that extends `SpringBeanAutowiringSupport`: + +``` +/** + * JAX-WS compliant AccountService implementation that simply delegates + * to the AccountService implementation in the root web application context. + * + * This wrapper class is necessary because JAX-WS requires working with dedicated + * endpoint classes. If an existing service needs to be exported, a wrapper that + * extends SpringBeanAutowiringSupport for simple Spring bean autowiring (through + * the @Autowired annotation) is the simplest JAX-WS compliant way. + * + * This is the class registered with the server-side JAX-WS implementation. + * In the case of a Java EE server, this would simply be defined as a servlet + * in web.xml, with the server detecting that this is a JAX-WS endpoint and reacting + * accordingly. The servlet name usually needs to match the specified WS service name. + * + * The web service engine manages the lifecycle of instances of this class. + * Spring bean references will just be wired in here. + */ +import org.springframework.web.context.support.SpringBeanAutowiringSupport; + +@WebService(serviceName="AccountService") +public class AccountServiceEndpoint extends SpringBeanAutowiringSupport { + + @Autowired + private AccountService biz; + + @WebMethod + public void insertAccount(Account acc) { + biz.insertAccount(acc); + } + + @WebMethod + public Account[] getAccounts(String name) { + return biz.getAccounts(name); + } +} +``` + +Our `AccountServiceEndpoint` needs to run in the same web application as the Spring +context to allow for access to Spring’s facilities. This is the case by default in Java +EE environments, using the standard contract for JAX-WS servlet endpoint deployment. +See the various Java EE web service tutorials for details. + +#### 2.3.2. Exporting Standalone Web Services by Using JAX-WS + +The built-in JAX-WS provider that comes with Oracle’s JDK supports exposure of web +services by using the built-in HTTP server that is also included in the JDK. Spring’s`SimpleJaxWsServiceExporter` detects all `@WebService`-annotated beans in the Spring +application context and exports them through the default JAX-WS server (the JDK HTTP +server). + +In this scenario, the endpoint instances are defined and managed as Spring beans +themselves. They are registered with the JAX-WS engine, but their lifecycle is up to +the Spring application context. This means that you can apply Spring functionality +(such as explicit dependency injection) to the endpoint instances. Annotation-driven +injection through `@Autowired` works as well. The following example shows how to +define these beans: + +``` +<bean class="org.springframework.remoting.jaxws.SimpleJaxWsServiceExporter"> + <property name="baseAddress" value="http://localhost:8080/"/> +</bean> + +<bean id="accountServiceEndpoint" class="example.AccountServiceEndpoint"> + ... +</bean> + +... +``` + +The `AccountServiceEndpoint` can but does not have to derive from Spring’s `SpringBeanAutowiringSupport`, +since the endpoint in this example is a fully Spring-managed bean. This means that +the endpoint implementation can be as follows (without any superclass declared — and Spring’s `@Autowired` configuration annotation is still honored): + +``` +@WebService(serviceName="AccountService") +public class AccountServiceEndpoint { + + @Autowired + private AccountService biz; + + @WebMethod + public void insertAccount(Account acc) { + biz.insertAccount(acc); + } + + @WebMethod + public List<Account> getAccounts(String name) { + return biz.getAccounts(name); + } +} +``` + +#### 2.3.3. Exporting Web Services by Using JAX-WS RI’s Spring Support #### + +Oracle’s JAX-WS RI, developed as part of the GlassFish project, ships Spring support +as part of its JAX-WS Commons project. This allows for defining JAX-WS endpoints as +Spring-managed beans, similar to the standalone mode discussed in the[previous section](#remoting-web-services-jaxws-export-standalone) — but this time in a Servlet environment. + +| |This is not portable in a Java EE environment. It is mainly intended for non-EE<br/>environments, such as Tomcat, that embed the JAX-WS RI as part of the web application.| +|---|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +The differences from the standard style of exporting servlet-based endpoints are that +the lifecycle of the endpoint instances themselves are managed by Spring and that there +is only one JAX-WS servlet defined in `web.xml`. With the standard Java EE style (as +shown earlier), you have one servlet definition per service endpoint, with each endpoint +typically delegating to Spring beans (through the use of `@Autowired`, as shown earlier). + +See [https://jax-ws-commons.java.net/spring/](https://jax-ws-commons.java.net/spring/)for details on setup and usage style. + +#### 2.3.4. Accessing Web Services by Using JAX-WS + +Spring provides two factory beans to create JAX-WS web service proxies, namely`LocalJaxWsServiceFactoryBean` and `JaxWsPortProxyFactoryBean`. The former can +return only a JAX-WS service class for us to work with. The latter is the full-fledged +version that can return a proxy that implements our business service interface. +In the following example, we use `JaxWsPortProxyFactoryBean` to create a proxy for the`AccountService` endpoint (again): + +``` +<bean id="accountWebService" class="org.springframework.remoting.jaxws.JaxWsPortProxyFactoryBean"> + <property name="serviceInterface" value="example.AccountService"/> (1) + <property name="wsdlDocumentUrl" value="http://localhost:8888/AccountServiceEndpoint?WSDL"/> + <property name="namespaceUri" value="https://example/"/> + <property name="serviceName" value="AccountService"/> + <property name="portName" value="AccountServiceEndpointPort"/> +</bean> +``` + +|**1**|Where `serviceInterface` is our business interface that the clients use.| +|-----|------------------------------------------------------------------------| + +`wsdlDocumentUrl` is the URL for the WSDL file. Spring needs this at startup time to +create the JAX-WS Service. `namespaceUri` corresponds to the `targetNamespace` in the +.wsdl file. `serviceName` corresponds to the service name in the .wsdl file. `portName`corresponds to the port name in the .wsdl file. + +Accessing the web service is easy, as we have a bean factory for it that exposes it as +an interface called `AccountService`. The following example shows how we can wire this +up in Spring: + +``` +<bean id="client" class="example.AccountClientImpl"> + ... + <property name="service" ref="accountWebService"/> +</bean> +``` + +From the client code, we can access the web service as if it were a normal class, +as the following example shows: + +``` +public class AccountClientImpl { + + private AccountService service; + + public void setService(AccountService service) { + this.service = service; + } + + public void foo() { + service.insertAccount(...); + } +} +``` + +| |The above is slightly simplified in that JAX-WS requires endpoint interfaces<br/>and implementation classes to be annotated with `@WebService`, `@SOAPBinding`, etc.<br/>annotations. This means that you cannot (easily) use plain Java interfaces and<br/>implementation classes as JAX-WS endpoint artifacts; you need to annotate them<br/>accordingly first. Check the JAX-WS documentation for details on those requirements.| +|---|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +### 2.4. RMI (Deprecated) + +| |As of Spring Framework 5.3, RMI support is deprecated and will not be replaced.| +|---|-------------------------------------------------------------------------------| + +By using Spring’s support for RMI, you can transparently expose your services through the +RMI infrastructure. After having this set up, you basically have a configuration similar +to remote EJBs, except for the fact that there is no standard support for security +context propagation or remote transaction propagation. Spring does provide hooks for +such additional invocation context when you use the RMI invoker, so you can, for example, +plug in security frameworks or custom security credentials. + +#### 2.4.1. Exporting the Service by Using `RmiServiceExporter` + +Using the `RmiServiceExporter`, we can expose the interface of our AccountService object +as RMI object. The interface can be accessed by using `RmiProxyFactoryBean`, or via +plain RMI in case of a traditional RMI service. The `RmiServiceExporter` explicitly +supports the exposing of any non-RMI services via RMI invokers. + +We first have to set up our service in the Spring container. +The following example shows how to do so: + +``` +<bean id="accountService" class="example.AccountServiceImpl"> + <!-- any additional properties, maybe a DAO? --> +</bean> +``` + +Next, we have to expose our service by using `RmiServiceExporter`. +The following example shows how to do so: + +``` +<bean class="org.springframework.remoting.rmi.RmiServiceExporter"> + <!-- does not necessarily have to be the same name as the bean to be exported --> + <property name="serviceName" value="AccountService"/> + <property name="service" ref="accountService"/> + <property name="serviceInterface" value="example.AccountService"/> + <!-- defaults to 1099 --> + <property name="registryPort" value="1199"/> +</bean> +``` + +In the preceding example, we override the port for the RMI registry. Often, your application +server also maintains an RMI registry, and it is wise to not interfere with that one. +Furthermore, the service name is used to bind the service. So, in the preceding example, the +service is bound at `'rmi://HOST:1199/AccountService'`. We use this URL later on to link in +the service at the client side. + +| |The `servicePort` property has been omitted (it defaults to 0). This means that an<br/>anonymous port is used to communicate with the service.| +|---|----------------------------------------------------------------------------------------------------------------------------------------------| + +#### 2.4.2. Linking in the Service at the Client + +Our client is a simple object that uses the `AccountService` to manage accounts, +as the following example shows: + +``` +public class SimpleObject { + + private AccountService accountService; + + public void setAccountService(AccountService accountService) { + this.accountService = accountService; + } + + // additional methods using the accountService +} +``` + +To link in the service on the client, we create a separate Spring container, +to contain the following simple object and the service linking configuration bits: + +``` +<bean class="example.SimpleObject"> + <property name="accountService" ref="accountService"/> +</bean> + +<bean id="accountService" class="org.springframework.remoting.rmi.RmiProxyFactoryBean"> + <property name="serviceUrl" value="rmi://HOST:1199/AccountService"/> + <property name="serviceInterface" value="example.AccountService"/> +</bean> +``` + +That is all we need to do to support the remote account service on the client. Spring +transparently creates an invoker and remotely enables the account service through the`RmiServiceExporter`. At the client, we link it in by using the `RmiProxyFactoryBean`. + +### 2.5. Using Hessian to Remotely Call Services through HTTP (Deprecated) + +| |As of Spring Framework 5.3, Hessian support is deprecated and will not be replaced.| +|---|-----------------------------------------------------------------------------------| + +Hessian offers a binary HTTP-based remoting protocol. It is developed by Caucho, +and you can find more information about Hessian itself at [https://www.caucho.com/](https://www.caucho.com/). + +#### 2.5.1. Hessian + +Hessian communicates through HTTP and does so by using a custom servlet. By using Spring’s`DispatcherServlet` principles (see [webmvc.html](webmvc.html#mvc-servlet)), we can wire up such a +servlet to expose your services. First, we have to create a new servlet in our application, +as shown in the following excerpt from `web.xml`: + +``` +<servlet> + <servlet-name>remoting</servlet-name> + <servlet-class>org.springframework.web.servlet.DispatcherServlet</servlet-class> + <load-on-startup>1</load-on-startup> +</servlet> + +<servlet-mapping> + <servlet-name>remoting</servlet-name> + <url-pattern>/remoting/*</url-pattern> +</servlet-mapping> +``` + +If you are familiar with Spring’s `DispatcherServlet` principles, you probably +know that now you have to create a Spring container configuration resource named`remoting-servlet.xml` (after the name of your servlet) in the `WEB-INF` directory. +The application context is used in the next section. + +Alternatively, consider the use of Spring’s simpler `HttpRequestHandlerServlet`. Doing so +lets you embed the remote exporter definitions in your root application context (by +default, in `WEB-INF/applicationContext.xml`), with individual servlet definitions +pointing to specific exporter beans. In this case, each servlet name needs to match the bean name of +its target exporter. + +#### 2.5.2. Exposing Your Beans by Using `HessianServiceExporter` + +In the newly created application context called `remoting-servlet.xml`, we create a`HessianServiceExporter` to export our services, as the following example shows: + +``` +<bean id="accountService" class="example.AccountServiceImpl"> + <!-- any additional properties, maybe a DAO? --> +</bean> + +<bean name="/AccountService" class="org.springframework.remoting.caucho.HessianServiceExporter"> + <property name="service" ref="accountService"/> + <property name="serviceInterface" value="example.AccountService"/> +</bean> +``` + +Now we are ready to link in the service at the client. No explicit handler mapping is +specified (to map request URLs onto services), so we use `BeanNameUrlHandlerMapping`used. Hence, the service is exported at the URL indicated through its bean name +within the containing `DispatcherServlet` instance’s mapping (as defined earlier):`[https://HOST:8080/remoting/AccountService](https://HOST:8080/remoting/AccountService)`. + +Alternatively, you can create a `HessianServiceExporter` in your root application context (for example, +in `WEB-INF/applicationContext.xml`), as the following example shows: + +``` +<bean name="accountExporter" class="org.springframework.remoting.caucho.HessianServiceExporter"> + <property name="service" ref="accountService"/> + <property name="serviceInterface" value="example.AccountService"/> +</bean> +``` + +In the latter case, you should define a corresponding servlet for this exporter in `web.xml`, +with the same end result: The exporter gets mapped to the request path at`/remoting/AccountService`. Note that the servlet name needs to match the bean name of +the target exporter. The following example shows how to do so: + +``` +<servlet> + <servlet-name>accountExporter</servlet-name> + <servlet-class>org.springframework.web.context.support.HttpRequestHandlerServlet</servlet-class> +</servlet> + +<servlet-mapping> + <servlet-name>accountExporter</servlet-name> + <url-pattern>/remoting/AccountService</url-pattern> +</servlet-mapping> +``` + +#### 2.5.3. Linking in the Service on the Client + +By using the `HessianProxyFactoryBean`, we can link in the service at the client. The same +principles apply as with the RMI example. We create a separate bean factory or +application context and mention the following beans where the `SimpleObject` is by using +the `AccountService` to manage accounts, as the following example shows: + +``` +<bean class="example.SimpleObject"> + <property name="accountService" ref="accountService"/> +</bean> + +<bean id="accountService" class="org.springframework.remoting.caucho.HessianProxyFactoryBean"> + <property name="serviceUrl" value="https://remotehost:8080/remoting/AccountService"/> + <property name="serviceInterface" value="example.AccountService"/> +</bean> +``` + +#### 2.5.4. Applying HTTP Basic Authentication to a Service Exposed through Hessian #### + +One of the advantages of Hessian is that we can easily apply HTTP basic authentication, +because both protocols are HTTP-based. Your normal HTTP server security mechanism can +be applied through using the `web.xml` security features, for example. Usually, +you need not use per-user security credentials here. Rather, you can use shared credentials that you define +at the `HessianProxyFactoryBean` level (similar to a JDBC `DataSource`), as the following example shows: + +``` +<bean class="org.springframework.web.servlet.handler.BeanNameUrlHandlerMapping"> + <property name="interceptors" ref="authorizationInterceptor"/> +</bean> + +<bean id="authorizationInterceptor" + class="org.springframework.web.servlet.handler.UserRoleAuthorizationInterceptor"> + <property name="authorizedRoles" value="administrator,operator"/> +</bean> +``` + +In the preceding example, we explicitly mention the `BeanNameUrlHandlerMapping` and set +an interceptor, to let only administrators and operators call the beans mentioned in +this application context. + +| |The preceding example does not show a flexible kind of security infrastructure. For<br/>more options as far as security is concerned, have a look at the Spring Security project<br/>at [https://projects.spring.io/spring-security/](https://projects.spring.io/spring-security/).| +|---|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +### 2.6. Spring HTTP Invoker (Deprecated) + +| |As of Spring Framework 5.3, HTTP Invoker support is deprecated and will not be replaced.| +|---|----------------------------------------------------------------------------------------| + +As opposed to Hessian, Spring HTTP invokers are both lightweight protocols that use their own slim +serialization mechanisms and use the standard Java serialization +mechanism to expose services through HTTP. This has a huge advantage if your arguments +and return types are complex types that cannot be serialized by using the serialization +mechanisms Hessian uses (see the next section for more considerations when +you choose a remoting technology). + +Under the hood, Spring uses either the standard facilities provided by the JDK or +Apache `HttpComponents` to perform HTTP calls. If you need more +advanced and easier-to-use functionality, use the latter. See[hc.apache.org/httpcomponents-client-ga/](https://hc.apache.org/httpcomponents-client-ga/)for more information. + +| |Be aware of vulnerabilities due to unsafe Java deserialization:<br/>Manipulated input streams can lead to unwanted code execution on the server<br/>during the deserialization step. As a consequence, do not expose HTTP invoker<br/>endpoints to untrusted clients. Rather, expose them only between your own services.<br/>In general, we strongly recommend using any other message format (such as JSON) instead.<br/><br/>If you are concerned about security vulnerabilities due to Java serialization,<br/>consider the general-purpose serialization filter mechanism at the core JVM level,<br/>originally developed for JDK 9 but backported to JDK 8, 7 and 6 in the meantime. See[https://blogs.oracle.com/java-platform-group/entry/incoming\_filter\_serialization\_data\_a](https://blogs.oracle.com/java-platform-group/entry/incoming_filter_serialization_data_a)and [https://openjdk.java.net/jeps/290](https://openjdk.java.net/jeps/290).| +|---|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +#### 2.6.1. Exposing the Service Object + +Setting up the HTTP invoker infrastructure for a service object closely resembles the +way you would do the same by using Hessian. As Hessian support provides`HessianServiceExporter`, Spring’s HttpInvoker support provides`org.springframework.remoting.httpinvoker.HttpInvokerServiceExporter`. + +To expose the `AccountService` (mentioned earlier) within a Spring Web MVC`DispatcherServlet`, the following configuration needs to be in place in the +dispatcher’s application context, as the following example shows: + +``` +<bean name="/AccountService" class="org.springframework.remoting.httpinvoker.HttpInvokerServiceExporter"> + <property name="service" ref="accountService"/> + <property name="serviceInterface" value="example.AccountService"/> +</bean> +``` + +Such an exporter definition is exposed through the `DispatcherServlet` instance’s standard +mapping facilities, as explained in [the section on Hessian](#remoting-caucho-protocols). + +Alternatively, you can create an `HttpInvokerServiceExporter` in your root application context +(for example, in `'WEB-INF/applicationContext.xml'`), as the following example shows: + +``` +<bean name="accountExporter" class="org.springframework.remoting.httpinvoker.HttpInvokerServiceExporter"> + <property name="service" ref="accountService"/> + <property name="serviceInterface" value="example.AccountService"/> +</bean> +``` + +In addition, you can define a corresponding servlet for this exporter in `web.xml`, with the +servlet name matching the bean name of the target exporter, as the following example shows: + +``` +<servlet> + <servlet-name>accountExporter</servlet-name> + <servlet-class>org.springframework.web.context.support.HttpRequestHandlerServlet</servlet-class> +</servlet> + +<servlet-mapping> + <servlet-name>accountExporter</servlet-name> + <url-pattern>/remoting/AccountService</url-pattern> +</servlet-mapping> +``` + +#### 2.6.2. Linking in the Service at the Client + +Again, linking in the service from the client much resembles the way you would do it +when you use Hessian. By using a proxy, Spring can translate your calls to +HTTP POST requests to the URL that points to the exported service. The following example +shows how to configure this arrangement: + +``` +<bean id="httpInvokerProxy" class="org.springframework.remoting.httpinvoker.HttpInvokerProxyFactoryBean"> + <property name="serviceUrl" value="https://remotehost:8080/remoting/AccountService"/> + <property name="serviceInterface" value="example.AccountService"/> +</bean> +``` + +As mentioned earlier, you can choose what HTTP client you want to use. By default, the`HttpInvokerProxy` uses the JDK’s HTTP functionality, but you can also use the Apache`HttpComponents` client by setting the `httpInvokerRequestExecutor` property. +The following example shows how to do so: + +``` +<property name="httpInvokerRequestExecutor"> + <bean class="org.springframework.remoting.httpinvoker.HttpComponentsHttpInvokerRequestExecutor"/> +</property> +``` + +### 2.7. JMS (Deprecated) + +| |As of Spring Framework 5.3, JMS remoting support is deprecated and will not be replaced.| +|---|----------------------------------------------------------------------------------------| + +You can also expose services transparently by using JMS as the underlying communication +protocol. The JMS remoting support in the Spring Framework is pretty basic. It sends +and receives on the `same thread` and in the same non-transactional `Session`. +As a result, throughput is implementation-dependent. Note that these single-threaded +and non-transactional constraints apply only to Spring’s JMS remoting support. +See [JMS (Java Message Service)](#jms) for information on Spring’s rich support for JMS-based messaging. + +The following interface is used on both the server and the client sides: + +``` +package com.foo; + +public interface CheckingAccountService { + + public void cancelAccount(Long accountId); +} +``` + +The following simple implementation of the preceding interface is used on the server-side: + +``` +package com.foo; + +public class SimpleCheckingAccountService implements CheckingAccountService { + + public void cancelAccount(Long accountId) { + System.out.println("Cancelling account [" + accountId + "]"); + } +} +``` + +The following configuration file contains the JMS-infrastructure beans that are shared +on both the client and the server: + +``` +<?xml version="1.0" encoding="UTF-8"?> +<beans xmlns="http://www.springframework.org/schema/beans" + xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" + xsi:schemaLocation="http://www.springframework.org/schema/beans + https://www.springframework.org/schema/beans/spring-beans.xsd"> + + <bean id="connectionFactory" class="org.apache.activemq.ActiveMQConnectionFactory"> + <property name="brokerURL" value="tcp://ep-t43:61616"/> + </bean> + + <bean id="queue" class="org.apache.activemq.command.ActiveMQQueue"> + <constructor-arg value="mmm"/> + </bean> + +</beans> +``` + +#### 2.7.1. Server-side Configuration + +On the server, you need to expose the service object that uses the`JmsInvokerServiceExporter`, as the following example shows: + +``` +<?xml version="1.0" encoding="UTF-8"?> +<beans xmlns="http://www.springframework.org/schema/beans" + xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" + xsi:schemaLocation="http://www.springframework.org/schema/beans + https://www.springframework.org/schema/beans/spring-beans.xsd"> + + <bean id="checkingAccountService" + class="org.springframework.jms.remoting.JmsInvokerServiceExporter"> + <property name="serviceInterface" value="com.foo.CheckingAccountService"/> + <property name="service"> + <bean class="com.foo.SimpleCheckingAccountService"/> + </property> + </bean> + + <bean class="org.springframework.jms.listener.SimpleMessageListenerContainer"> + <property name="connectionFactory" ref="connectionFactory"/> + <property name="destination" ref="queue"/> + <property name="concurrentConsumers" value="3"/> + <property name="messageListener" ref="checkingAccountService"/> + </bean> + +</beans> +``` + +``` +package com.foo; + +import org.springframework.context.support.ClassPathXmlApplicationContext; + +public class Server { + + public static void main(String[] args) throws Exception { + new ClassPathXmlApplicationContext("com/foo/server.xml", "com/foo/jms.xml"); + } +} +``` + +#### 2.7.2. Client-side Configuration + +The client merely needs to create a client-side proxy that implements the agreed-upon +interface (`CheckingAccountService`). + +The following example defines beans that you can inject into other client-side objects +(and the proxy takes care of forwarding the call to the server-side object via JMS): + +``` +<?xml version="1.0" encoding="UTF-8"?> +<beans xmlns="http://www.springframework.org/schema/beans" + xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" + xsi:schemaLocation="http://www.springframework.org/schema/beans + https://www.springframework.org/schema/beans/spring-beans.xsd"> + + <bean id="checkingAccountService" + class="org.springframework.jms.remoting.JmsInvokerProxyFactoryBean"> + <property name="serviceInterface" value="com.foo.CheckingAccountService"/> + <property name="connectionFactory" ref="connectionFactory"/> + <property name="queue" ref="queue"/> + </bean> + +</beans> +``` + +``` +package com.foo; + +import org.springframework.context.ApplicationContext; +import org.springframework.context.support.ClassPathXmlApplicationContext; + +public class Client { + + public static void main(String[] args) throws Exception { + ApplicationContext ctx = new ClassPathXmlApplicationContext("com/foo/client.xml", "com/foo/jms.xml"); + CheckingAccountService service = (CheckingAccountService) ctx.getBean("checkingAccountService"); + service.cancelAccount(new Long(10)); + } +} +``` + +## 3. Enterprise JavaBeans (EJB) Integration + +As a lightweight container, Spring is often considered an EJB replacement. We do believe +that for many, if not most, applications and use cases, Spring, as a container, combined +with its rich supporting functionality in the area of transactions, ORM and JDBC access, +is a better choice than implementing equivalent functionality through an EJB container +and EJBs. + +However, it is important to note that using Spring does not prevent you from using EJBs. +In fact, Spring makes it much easier to access EJBs and implement EJBs and functionality +within them. Additionally, using Spring to access services provided by EJBs allows the +implementation of those services to later transparently be switched between local EJB, +remote EJB, or POJO (plain old Java object) variants, without the client code having to +be changed. + +In this chapter, we look at how Spring can help you access and implement EJBs. Spring +provides particular value when accessing stateless session beans (SLSBs), so we begin +by discussing this topic. + +### 3.1. Accessing EJBs + +This section covers how to access EJBs. + +#### 3.1.1. Concepts + +To invoke a method on a local or remote stateless session bean, client code must +normally perform a JNDI lookup to obtain the (local or remote) EJB Home object and then use +a `create` method call on that object to obtain the actual (local or remote) EJB object. +One or more methods are then invoked on the EJB. + +To avoid repeated low-level code, many EJB applications use the Service Locator and +Business Delegate patterns. These are better than spraying JNDI lookups throughout +client code, but their usual implementations have significant disadvantages: + +* Typically, code that uses EJBs depends on Service Locator or Business Delegate singletons, + making it hard to test. + +* In the case of the Service Locator pattern used without a Business Delegate, + application code still ends up having to invoke the `create()` method on an EJB home + and deal with the resulting exceptions. Thus, it remains tied to the EJB API and the + complexity of the EJB programming model. + +* Implementing the Business Delegate pattern typically results in significant code + duplication, where we have to write numerous methods that call the same method + on the EJB. + +The Spring approach is to allow the creation and use of proxy objects (normally +configured inside a Spring container), which act as codeless business delegates. You need +not write another Service Locator, another JNDI lookup, or duplicate methods in +a hand-coded Business Delegate unless you actually add real value in such code. + +#### 3.1.2. Accessing Local SLSBs + +Assume that we have a web controller that needs to use a local EJB. We follow best +practice and use the EJB Business Methods Interface pattern, so that the EJB’s local +interface extends a non-EJB-specific business methods interface. We call this +business methods interface `MyComponent`. The following example shows such an interface: + +``` +public interface MyComponent { + ... +} +``` + +One of the main reasons to use the Business Methods Interface pattern is to ensure that +synchronization between method signatures in local interface and bean implementation +class is automatic. Another reason is that it later makes it much easier for us to +switch to a POJO (plain old Java object) implementation of the service if it makes sense +to do so. We also need to implement the local home interface and provide an +implementation class that implements `SessionBean` and the `MyComponent` business +methods interface. Now, the only Java coding we need to do to hook up our web tier +controller to the EJB implementation is to expose a setter method of type `MyComponent`on the controller. This saves the reference as an instance variable in the +controller. The following example shows how to do so: + +``` +private MyComponent myComponent; + +public void setMyComponent(MyComponent myComponent) { + this.myComponent = myComponent; +} +``` + +We can subsequently use this instance variable in any business method in the controller. +Now, assuming we obtain our controller object out of a Spring container, we can +(in the same context) configure a `LocalStatelessSessionProxyFactoryBean` instance, +which is the EJB proxy object. We configure the proxy and set the`myComponent` property of the controller with the following configuration entry: + +``` +<bean id="myComponent" + class="org.springframework.ejb.access.LocalStatelessSessionProxyFactoryBean"> + <property name="jndiName" value="ejb/myBean"/> + <property name="businessInterface" value="com.mycom.MyComponent"/> +</bean> + +<bean id="myController" class="com.mycom.myController"> + <property name="myComponent" ref="myComponent"/> +</bean> +``` + +A lot of work happens behind the scenes, courtesy of the Spring AOP framework, +although you are not forced to work with AOP concepts to enjoy the results. The`myComponent` bean definition creates a proxy for the EJB, which implements the business +method interface. The EJB local home is cached on startup, so there is only a single JNDI +lookup. Each time the EJB is invoked, the proxy invokes the `classname` method on the +local EJB and invokes the corresponding business method on the EJB. + +The `myController` bean definition sets the `myComponent` property of the controller +class to the EJB proxy. + +Alternatively (and preferably in case of many such proxy definitions), consider using +the `<jee:local-slsb>` configuration element in Spring’s “jee” namespace. +The following example shows how to do so: + +``` +<jee:local-slsb id="myComponent" jndi-name="ejb/myBean" + business-interface="com.mycom.MyComponent"/> + +<bean id="myController" class="com.mycom.myController"> + <property name="myComponent" ref="myComponent"/> +</bean> +``` + +This EJB access mechanism delivers huge simplification of application code. The web tier +code (or other EJB client code) has no dependence on the use of EJB. To +replace this EJB reference with a POJO or a mock object or other test stub, we could +change the `myComponent` bean definition without changing a line of Java code. +Additionally, we have not had to write a single line of JNDI lookup or other EJB plumbing +code as part of our application. + +Benchmarks and experience in real applications indicate that the performance overhead of +this approach (which involves reflective invocation of the target EJB) is minimal and +is undetectable in typical use. Remember that we do not want to make +fine-grained calls to EJBs anyway, as there is a cost associated with the EJB +infrastructure in the application server. + +There is one caveat with regards to the JNDI lookup. In a bean container, this class is +normally best used as a singleton (there is no reason to make it a prototype). +However, if that bean container pre-instantiates singletons (as do the various XML`ApplicationContext` variants), you can have a problem if the bean container is loaded +before the EJB container loads the target EJB. That is because the JNDI lookup is +performed in the `init()` method of this class and then cached, but the EJB has not +been bound at the target location yet. The solution is to not pre-instantiate this +factory object but to let it be created on first use. In the XML containers, you can control this +by using the `lazy-init` attribute. + +Although not of interest to the majority of Spring users, those doing +programmatic AOP work with EJBs may want to look at `LocalSlsbInvokerInterceptor`. + +#### 3.1.3. Accessing Remote SLSBs + +Accessing remote EJBs is essentially identical to accessing local EJBs, except that the`SimpleRemoteStatelessSessionProxyFactoryBean` or `<jee:remote-slsb>` configuration +element is used. Of course, with or without Spring, remote invocation semantics apply: A +call to a method on an object in another VM in another computer does sometimes have to +be treated differently in terms of usage scenarios and failure handling. + +Spring’s EJB client support adds one more advantage over the non-Spring approach. +Normally, it is problematic for EJB client code to be easily switched back and forth +between calling EJBs locally or remotely. This is because the remote interface methods +must declare that they throw `RemoteException`, and client code must deal with this, +while the local interface methods need not. Client code written for local EJBs that needs +to be moved to remote EJBs typically has to be modified to add handling for the remote +exceptions, and client code written for remote EJBs that needs to be moved to local +EJBs can either stay the same but do a lot of unnecessary handling of remote +exceptions or be modified to remove that code. With the Spring remote EJB +proxy, you can instead not declare any thrown `RemoteException` in your Business Method +Interface and implementing EJB code, have a remote interface that is identical (except +that it does throw `RemoteException`), and rely on the proxy to dynamically treat the two +interfaces as if they were the same. That is, client code does not have to deal with the +checked `RemoteException` class. Any actual `RemoteException` that is thrown during the +EJB invocation is re-thrown as the non-checked `RemoteAccessException` class, which +is a subclass of `RuntimeException`. You can then switch the target service at will +between a local EJB or remote EJB (or even plain Java object) implementation, without +the client code knowing or caring. Of course, this is optional: Nothing +stops you from declaring `RemoteException` in your business interface. + +#### 3.1.4. Accessing EJB 2.x SLSBs Versus EJB 3 SLSBs + +Accessing EJB 2.x Session Beans and EJB 3 Session Beans through Spring is largely +transparent. Spring’s EJB accessors, including the `<jee:local-slsb>` and`<jee:remote-slsb>` facilities, transparently adapt to the actual component at runtime. +They handle a home interface if found (EJB 2.x style) or perform straight component +invocations if no home interface is available (EJB 3 style). + +Note: For EJB 3 Session Beans, you can effectively use a `JndiObjectFactoryBean` /`<jee:jndi-lookup>` as well, since fully usable component references are exposed for +plain JNDI lookups there. Defining explicit `<jee:local-slsb>` or `<jee:remote-slsb>`lookups provides consistent and more explicit EJB access configuration. + +## 4. JMS (Java Message Service) + +Spring provides a JMS integration framework that simplifies the use of the JMS API in much +the same way as Spring’s integration does for the JDBC API. + +JMS can be roughly divided into two areas of functionality, namely the production and +consumption of messages. The `JmsTemplate` class is used for message production and +synchronous message reception. For asynchronous reception similar to Java EE’s +message-driven bean style, Spring provides a number of message-listener containers that +you can use to create Message-Driven POJOs (MDPs). Spring also provides a declarative way +to create message listeners. + +The `org.springframework.jms.core` package provides the core functionality for using +JMS. It contains JMS template classes that simplify the use of the JMS by handling the +creation and release of resources, much like the `JdbcTemplate` does for JDBC. The +design principle common to Spring template classes is to provide helper methods to +perform common operations and, for more sophisticated usage, delegate the essence of the +processing task to user-implemented callback interfaces. The JMS template follows the +same design. The classes offer various convenience methods for sending messages, +consuming messages synchronously, and exposing the JMS session and message producer to +the user. + +The `org.springframework.jms.support` package provides `JMSException` translation +functionality. The translation converts the checked `JMSException` hierarchy to a +mirrored hierarchy of unchecked exceptions. If any provider-specific +subclasses of the checked `javax.jms.JMSException` exist, this exception is wrapped in the +unchecked `UncategorizedJmsException`. + +The `org.springframework.jms.support.converter` package provides a `MessageConverter`abstraction to convert between Java objects and JMS messages. + +The `org.springframework.jms.support.destination` package provides various strategies +for managing JMS destinations, such as providing a service locator for destinations +stored in JNDI. + +The `org.springframework.jms.annotation` package provides the necessary infrastructure +to support annotation-driven listener endpoints by using `@JmsListener`. + +The `org.springframework.jms.config` package provides the parser implementation for the`jms` namespace as well as the java config support to configure listener containers and +create listener endpoints. + +Finally, the `org.springframework.jms.connection` package provides an implementation of +the `ConnectionFactory` suitable for use in standalone applications. It also contains an +implementation of Spring’s `PlatformTransactionManager` for JMS (the cunningly named`JmsTransactionManager`). This allows for seamless integration of JMS as a transactional +resource into Spring’s transaction management mechanisms. + +| |As of Spring Framework 5, Spring’s JMS package fully supports JMS 2.0 and requires the<br/>JMS 2.0 API to be present at runtime. We recommend the use of a JMS 2.0 compatible provider.<br/><br/>If you happen to use an older message broker in your system, you may try upgrading to a<br/>JMS 2.0 compatible driver for your existing broker generation. Alternatively, you may also<br/>try to run against a JMS 1.1 based driver, simply putting the JMS 2.0 API jar on the<br/>classpath but only using JMS 1.1 compatible API against your driver. Spring’s JMS support<br/>adheres to JMS 1.1 conventions by default, so with corresponding configuration it does<br/>support such a scenario. However, please consider this for transition scenarios only.| +|---|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +### 4.1. Using Spring JMS + +This section describes how to use Spring’s JMS components. + +#### 4.1.1. Using `JmsTemplate` + +The `JmsTemplate` class is the central class in the JMS core package. It simplifies the +use of JMS, since it handles the creation and release of resources when sending or +synchronously receiving messages. + +Code that uses the `JmsTemplate` needs only to implement callback interfaces that give them +a clearly defined high-level contract. The `MessageCreator` callback interface creates a +message when given a `Session` provided by the calling code in `JmsTemplate`. To +allow for more complex usage of the JMS API, `SessionCallback` provides the +JMS session, and `ProducerCallback` exposes a `Session` and`MessageProducer` pair. + +The JMS API exposes two types of send methods, one that takes delivery mode, priority, +and time-to-live as Quality of Service (QOS) parameters and one that takes no QOS +parameters and uses default values. Since `JmsTemplate` has many send methods, +setting the QOS parameters have been exposed as bean properties to +avoid duplication in the number of send methods. Similarly, the timeout value for +synchronous receive calls is set by using the `setReceiveTimeout` property. + +Some JMS providers allow the setting of default QOS values administratively through the +configuration of the `ConnectionFactory`. This has the effect that a call to a`MessageProducer` instance’s `send` method (`send(Destination destination, Message message)`) +uses different QOS default values than those specified in the JMS specification. In order +to provide consistent management of QOS values, the `JmsTemplate` must, therefore, be +specifically enabled to use its own QOS values by setting the boolean property`isExplicitQosEnabled` to `true`. + +For convenience, `JmsTemplate` also exposes a basic request-reply operation that allows +for sending a message and waiting for a reply on a temporary queue that is created as part of +the operation. + +| |Instances of the `JmsTemplate` class are thread-safe, once configured. This is<br/>important, because it means that you can configure a single instance of a `JmsTemplate`and then safely inject this shared reference into multiple collaborators. To be<br/>clear, the `JmsTemplate` is stateful, in that it maintains a reference to a`ConnectionFactory`, but this state is not conversational state.| +|---|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +As of Spring Framework 4.1, `JmsMessagingTemplate` is built on top of `JmsTemplate`and provides an integration with the messaging abstraction — that is,`org.springframework.messaging.Message`. This lets you create the message to +send in a generic manner. + +#### 4.1.2. Connections + +The `JmsTemplate` requires a reference to a `ConnectionFactory`. The `ConnectionFactory`is part of the JMS specification and serves as the entry point for working with JMS. It +is used by the client application as a factory to create connections with the JMS +provider and encapsulates various configuration parameters, many of which are +vendor-specific, such as SSL configuration options. + +When using JMS inside an EJB, the vendor provides implementations of the JMS interfaces +so that they can participate in declarative transaction management and perform pooling +of connections and sessions. In order to use this implementation, Java EE containers +typically require that you declare a JMS connection factory as a `resource-ref` inside +the EJB or servlet deployment descriptors. To ensure the use of these features with the`JmsTemplate` inside an EJB, the client application should ensure that it references the +managed implementation of the `ConnectionFactory`. + +##### Caching Messaging Resources + +The standard API involves creating many intermediate objects. To send a message, the +following 'API' walk is performed: + +``` +ConnectionFactory->Connection->Session->MessageProducer->send +``` + +Between the `ConnectionFactory` and the `Send` operation, three intermediate +objects are created and destroyed. To optimize the resource usage and increase +performance, Spring provides two implementations of `ConnectionFactory`. + +##### Using `SingleConnectionFactory` + +Spring provides an implementation of the `ConnectionFactory` interface,`SingleConnectionFactory`, that returns the same `Connection` on all`createConnection()` calls and ignores calls to `close()`. This is useful for testing and +standalone environments so that the same connection can be used for multiple`JmsTemplate` calls that may span any number of transactions. `SingleConnectionFactory`takes a reference to a standard `ConnectionFactory` that would typically come from JNDI. + +##### Using `CachingConnectionFactory` + +The `CachingConnectionFactory` extends the functionality of `SingleConnectionFactory`and adds the caching of `Session`, `MessageProducer`, and `MessageConsumer` instances. The initial +cache size is set to `1`. You can use the `sessionCacheSize` property to increase the number of +cached sessions. Note that the number of actual cached sessions is more than that +number, as sessions are cached based on their acknowledgment mode, so there can be up to +four cached session instances (one for each +acknowledgment mode) when `sessionCacheSize` is set to one. `MessageProducer` and `MessageConsumer` instances are cached within their +owning session and also take into account the unique properties of the producers and +consumers when caching. MessageProducers are cached based on their destination. +MessageConsumers are cached based on a key composed of the destination, selector, +noLocal delivery flag, and the durable subscription name (if creating durable consumers). + +#### 4.1.3. Destination Management + +Destinations, as `ConnectionFactory` instances, are JMS administered objects that you can store +and retrieve in JNDI. When configuring a Spring application context, you can use the +JNDI `JndiObjectFactoryBean` factory class or `<jee:jndi-lookup>` to perform dependency +injection on your object’s references to JMS destinations. However, this strategy +is often cumbersome if there are a large number of destinations in the application or if there +are advanced destination management features unique to the JMS provider. Examples of +such advanced destination management include the creation of dynamic destinations or +support for a hierarchical namespace of destinations. The `JmsTemplate` delegates the +resolution of a destination name to a JMS destination object that implements the`DestinationResolver` interface. `DynamicDestinationResolver` is the default +implementation used by `JmsTemplate` and accommodates resolving dynamic destinations. A`JndiDestinationResolver` is also provided to act as a service locator for +destinations contained in JNDI and optionally falls back to the behavior contained in`DynamicDestinationResolver`. + +Quite often, the destinations used in a JMS application are only known at runtime and, +therefore, cannot be administratively created when the application is deployed. This is +often because there is shared application logic between interacting system components +that create destinations at runtime according to a well-known naming convention. Even +though the creation of dynamic destinations is not part of the JMS specification, most +vendors have provided this functionality. Dynamic destinations are created with a user-defined name, +which differentiates them from temporary destinations, and are often +not registered in JNDI. The API used to create dynamic destinations varies from provider +to provider since the properties associated with the destination are vendor-specific. +However, a simple implementation choice that is sometimes made by vendors is to +disregard the warnings in the JMS specification and to use the method `TopicSession``createTopic(String topicName)` or the `QueueSession` `createQueue(String +queueName)` method to create a new destination with default destination properties. Depending +on the vendor implementation, `DynamicDestinationResolver` can then also create a +physical destination instead of only resolving one. + +The boolean property `pubSubDomain` is used to configure the `JmsTemplate` with +knowledge of what JMS domain is being used. By default, the value of this property is +false, indicating that the point-to-point domain, `Queues`, is to be used. This property +(used by `JmsTemplate`) determines the behavior of dynamic destination resolution through +implementations of the `DestinationResolver` interface. + +You can also configure the `JmsTemplate` with a default destination through the +property `defaultDestination`. The default destination is with send and receive +operations that do not refer to a specific destination. + +#### 4.1.4. Message Listener Containers + +One of the most common uses of JMS messages in the EJB world is to drive message-driven +beans (MDBs). Spring offers a solution to create message-driven POJOs (MDPs) in a way +that does not tie a user to an EJB container. (See [Asynchronous reception: Message-Driven POJOs](#jms-receiving-async) for detailed +coverage of Spring’s MDP support.) Since Spring Framework 4.1, endpoint methods can be +annotated with `@JmsListener` — see [Annotation-driven Listener Endpoints](#jms-annotated) for more details. + +A message listener container is used to receive messages from a JMS message queue and +drive the `MessageListener` that is injected into it. The listener container is +responsible for all threading of message reception and dispatches into the listener for +processing. A message listener container is the intermediary between an MDP and a +messaging provider and takes care of registering to receive messages, participating in +transactions, resource acquisition and release, exception conversion, and so on. This +lets you write the (possibly complex) business logic +associated with receiving a message (and possibly respond to it), and delegates +boilerplate JMS infrastructure concerns to the framework. + +There are two standard JMS message listener containers packaged with Spring, each with +its specialized feature set. + +* [`SimpleMessageListenerContainer`](#jms-mdp-simple) + +* [`DefaultMessageListenerContainer`](#jms-mdp-default) + +##### Using `SimpleMessageListenerContainer` + +This message listener container is the simpler of the two standard flavors. It creates +a fixed number of JMS sessions and consumers at startup, registers the listener by using +the standard JMS `MessageConsumer.setMessageListener()` method, and leaves it up the JMS +provider to perform listener callbacks. This variant does not allow for dynamic adaption +to runtime demands or for participation in externally managed transactions. +Compatibility-wise, it stays very close to the spirit of the standalone JMS +specification, but is generally not compatible with Java EE’s JMS restrictions. + +| |While `SimpleMessageListenerContainer` does not allow for participation in externally<br/>managed transactions, it does support native JMS transactions. To enable this feature,<br/>you can switch the `sessionTransacted` flag to `true` or, in the XML namespace, set the`acknowledge` attribute to `transacted`. Exceptions thrown from your listener then lead<br/>to a rollback, with the message getting redelivered. Alternatively, consider using`CLIENT_ACKNOWLEDGE` mode, which provides redelivery in case of an exception as well but<br/>does not use transacted `Session` instances and, therefore, does not include any other`Session` operations (such as sending response messages) in the transaction protocol.| +|---|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +| |The default `AUTO_ACKNOWLEDGE` mode does not provide proper reliability guarantees.<br/>Messages can get lost when listener execution fails (since the provider automatically<br/>acknowledges each message after listener invocation, with no exceptions to be propagated to<br/>the provider) or when the listener container shuts down (you can configure this by setting<br/>the `acceptMessagesWhileStopping` flag). Make sure to use transacted sessions in case of<br/>reliability needs (for example, for reliable queue handling and durable topic subscriptions).| +|---|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +##### Using `DefaultMessageListenerContainer` + +This message listener container is used in most cases. In contrast to`SimpleMessageListenerContainer`, this container variant allows for dynamic adaptation +to runtime demands and is able to participate in externally managed transactions. +Each received message is registered with an XA transaction when configured with a`JtaTransactionManager`. As a result, processing may take advantage of XA transaction +semantics. This listener container strikes a good balance between low requirements on +the JMS provider, advanced functionality (such as participation in externally managed +transactions), and compatibility with Java EE environments. + +You can customize the cache level of the container. Note that, when no caching is enabled, +a new connection and a new session is created for each message reception. Combining this +with a non-durable subscription with high loads may lead to message loss. Make sure to +use a proper cache level in such a case. + +This container also has recoverable capabilities when the broker goes down. By default, +a simple `BackOff` implementation retries every five seconds. You can specify +a custom `BackOff` implementation for more fine-grained recovery options. See[`ExponentialBackOff`](https://docs.spring.io/spring-framework/docs/5.3.16/javadoc-api/org/springframework/util/backoff/ExponentialBackOff.html) for an example. + +| |Like its sibling ([`SimpleMessageListenerContainer`](#jms-mdp-simple)),`DefaultMessageListenerContainer` supports native JMS transactions and allows for<br/>customizing the acknowledgment mode. If feasible for your scenario, This is strongly<br/>recommended over externally managed transactions — that is, if you can live with<br/>occasional duplicate messages in case of the JVM dying. Custom duplicate message<br/>detection steps in your business logic can cover such situations — for example,<br/>in the form of a business entity existence check or a protocol table check.<br/>Any such arrangements are significantly more efficient than the alternative:<br/>wrapping your entire processing with an XA transaction (through configuring your`DefaultMessageListenerContainer` with an `JtaTransactionManager`) to cover the<br/>reception of the JMS message as well as the execution of the business logic in your<br/>message listener (including database operations, etc.).| +|---|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +| |The default `AUTO_ACKNOWLEDGE` mode does not provide proper reliability guarantees.<br/>Messages can get lost when listener execution fails (since the provider automatically<br/>acknowledges each message after listener invocation, with no exceptions to be propagated to<br/>the provider) or when the listener container shuts down (you can configure this by setting<br/>the `acceptMessagesWhileStopping` flag). Make sure to use transacted sessions in case of<br/>reliability needs (for example, for reliable queue handling and durable topic subscriptions).| +|---|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +#### 4.1.5. Transaction Management + +Spring provides a `JmsTransactionManager` that manages transactions for a single JMS`ConnectionFactory`. This lets JMS applications leverage the managed-transaction +features of Spring, as described in[Transaction Management section of the Data Access chapter](data-access.html#transaction). +The `JmsTransactionManager` performs local resource transactions, binding a JMS +Connection/Session pair from the specified `ConnectionFactory` to the thread.`JmsTemplate` automatically detects such transactional resources and operates +on them accordingly. + +In a Java EE environment, the `ConnectionFactory` pools Connection and Session instances, +so those resources are efficiently reused across transactions. In a standalone environment, +using Spring’s `SingleConnectionFactory` result in a shared JMS `Connection`, with +each transaction having its own independent `Session`. Alternatively, consider the use +of a provider-specific pooling adapter, such as ActiveMQ’s `PooledConnectionFactory`class. + +You can also use `JmsTemplate` with the `JtaTransactionManager` and an XA-capable JMS`ConnectionFactory` to perform distributed transactions. Note that this requires the +use of a JTA transaction manager as well as a properly XA-configured ConnectionFactory. +(Check your Java EE server’s or JMS provider’s documentation.) + +Reusing code across a managed and unmanaged transactional environment can be confusing +when using the JMS API to create a `Session` from a `Connection`. This is because the +JMS API has only one factory method to create a `Session`, and it requires values for the +transaction and acknowledgment modes. In a managed environment, setting these values is +the responsibility of the environment’s transactional infrastructure, so these values +are ignored by the vendor’s wrapper to the JMS Connection. When you use the `JmsTemplate`in an unmanaged environment, you can specify these values through the use of the +properties `sessionTransacted` and `sessionAcknowledgeMode`. When you use a`PlatformTransactionManager` with `JmsTemplate`, the template is always given a +transactional JMS `Session`. + +### 4.2. Sending a Message + +The `JmsTemplate` contains many convenience methods to send a message. Send +methods specify the destination by using a `javax.jms.Destination` object, and others +specify the destination by using a `String` in a JNDI lookup. The `send` method +that takes no destination argument uses the default destination. + +The following example uses the `MessageCreator` callback to create a text message from the +supplied `Session` object: + +``` +import javax.jms.ConnectionFactory; +import javax.jms.JMSException; +import javax.jms.Message; +import javax.jms.Queue; +import javax.jms.Session; + +import org.springframework.jms.core.MessageCreator; +import org.springframework.jms.core.JmsTemplate; + +public class JmsQueueSender { + + private JmsTemplate jmsTemplate; + private Queue queue; + + public void setConnectionFactory(ConnectionFactory cf) { + this.jmsTemplate = new JmsTemplate(cf); + } + + public void setQueue(Queue queue) { + this.queue = queue; + } + + public void simpleSend() { + this.jmsTemplate.send(this.queue, new MessageCreator() { + public Message createMessage(Session session) throws JMSException { + return session.createTextMessage("hello queue world"); + } + }); + } +} +``` + +In the preceding example, the `JmsTemplate` is constructed by passing a reference to a`ConnectionFactory`. As an alternative, a zero-argument constructor and`connectionFactory` is provided and can be used for constructing the instance in +JavaBean style (using a `BeanFactory` or plain Java code). Alternatively, consider +deriving from Spring’s `JmsGatewaySupport` convenience base class, which provides +pre-built bean properties for JMS configuration. + +The `send(String destinationName, MessageCreator creator)` method lets you send a +message by using the string name of the destination. If these names are registered in JNDI, +you should set the `destinationResolver` property of the template to an instance of`JndiDestinationResolver`. + +If you created the `JmsTemplate` and specified a default destination, the`send(MessageCreator c)` sends a message to that destination. + +#### 4.2.1. Using Message Converters + +To facilitate the sending of domain model objects, the `JmsTemplate` has +various send methods that take a Java object as an argument for a message’s data +content. The overloaded methods `convertAndSend()` and `receiveAndConvert()` methods in`JmsTemplate` delegate the conversion process to an instance of the `MessageConverter`interface. This interface defines a simple contract to convert between Java objects and +JMS messages. The default implementation (`SimpleMessageConverter`) supports conversion +between `String` and `TextMessage`, `byte[]` and `BytesMessage`, and `java.util.Map`and `MapMessage`. By using the converter, you and your application code can focus on the +business object that is being sent or received through JMS and not be concerned with the +details of how it is represented as a JMS message. + +The sandbox currently includes a `MapMessageConverter`, which uses reflection to convert +between a JavaBean and a `MapMessage`. Other popular implementation choices you might +implement yourself are converters that use an existing XML marshalling package (such as +JAXB or XStream) to create a `TextMessage` that represents the object. + +To accommodate the setting of a message’s properties, headers, and body that can not be +generically encapsulated inside a converter class, the `MessagePostProcessor` interface +gives you access to the message after it has been converted but before it is sent. The +following example shows how to modify a message header and a property after a`java.util.Map` is converted to a message: + +``` +public void sendWithConversion() { + Map map = new HashMap(); + map.put("Name", "Mark"); + map.put("Age", new Integer(47)); + jmsTemplate.convertAndSend("testQueue", map, new MessagePostProcessor() { + public Message postProcessMessage(Message message) throws JMSException { + message.setIntProperty("AccountID", 1234); + message.setJMSCorrelationID("123-00001"); + return message; + } + }); +} +``` + +This results in a message of the following form: + +``` +MapMessage={ + Header={ + ... standard headers ... + CorrelationID={123-00001} + } + Properties={ + AccountID={Integer:1234} + } + Fields={ + Name={String:Mark} + Age={Integer:47} + } +} +``` + +#### 4.2.2. Using `SessionCallback` and `ProducerCallback` + +While the send operations cover many common usage scenarios, you might sometimes +want to perform multiple operations on a JMS `Session` or `MessageProducer`. The`SessionCallback` and `ProducerCallback` expose the JMS `Session` and `Session` /`MessageProducer` pair, respectively. The `execute()` methods on `JmsTemplate` run +these callback methods. + +### 4.3. Receiving a Message + +This describes how to receive messages with JMS in Spring. + +#### 4.3.1. Synchronous Reception + +While JMS is typically associated with asynchronous processing, you can +consume messages synchronously. The overloaded `receive(..)` methods provide this +functionality. During a synchronous receive, the calling thread blocks until a message +becomes available. This can be a dangerous operation, since the calling thread can +potentially be blocked indefinitely. The `receiveTimeout` property specifies how long +the receiver should wait before giving up waiting for a message. + +#### 4.3.2. Asynchronous reception: Message-Driven POJOs + +| |Spring also supports annotated-listener endpoints through the use of the `@JmsListener`annotation and provides an open infrastructure to register endpoints programmatically.<br/>This is, by far, the most convenient way to setup an asynchronous receiver.<br/>See [Enable Listener Endpoint Annotations](#jms-annotated-support) for more details.| +|---|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +In a fashion similar to a Message-Driven Bean (MDB) in the EJB world, the Message-Driven +POJO (MDP) acts as a receiver for JMS messages. The one restriction (but see[Using `MessageListenerAdapter`](#jms-receiving-async-message-listener-adapter)) on an MDP is that it must implement +the `javax.jms.MessageListener` interface. Note that, if your POJO receives messages +on multiple threads, it is important to ensure that your implementation is thread-safe. + +The following example shows a simple implementation of an MDP: + +``` +import javax.jms.JMSException; +import javax.jms.Message; +import javax.jms.MessageListener; +import javax.jms.TextMessage; + +public class ExampleListener implements MessageListener { + + public void onMessage(Message message) { + if (message instanceof TextMessage) { + try { + System.out.println(((TextMessage) message).getText()); + } + catch (JMSException ex) { + throw new RuntimeException(ex); + } + } + else { + throw new IllegalArgumentException("Message must be of type TextMessage"); + } + } +} +``` + +Once you have implemented your `MessageListener`, it is time to create a message listener +container. + +The following example shows how to define and configure one of the message listener +containers that ships with Spring (in this case, `DefaultMessageListenerContainer`): + +``` +<!-- this is the Message Driven POJO (MDP) --> +<bean id="messageListener" class="jmsexample.ExampleListener"/> + +<!-- and this is the message listener container --> +<bean id="jmsContainer" class="org.springframework.jms.listener.DefaultMessageListenerContainer"> + <property name="connectionFactory" ref="connectionFactory"/> + <property name="destination" ref="destination"/> + <property name="messageListener" ref="messageListener"/> +</bean> +``` + +See the Spring javadoc of the various message listener containers (all of which implement[MessageListenerContainer](https://docs.spring.io/spring-framework/docs/5.3.16/javadoc-api/org/springframework/jms/listener/MessageListenerContainer.html)) +for a full description of the features supported by each implementation. + +#### 4.3.3. Using the `SessionAwareMessageListener` Interface #### + +The `SessionAwareMessageListener` interface is a Spring-specific interface that provides +a similar contract to the JMS `MessageListener` interface but also gives the message-handling +method access to the JMS `Session` from which the `Message` was received. +The following listing shows the definition of the `SessionAwareMessageListener` interface: + +``` +package org.springframework.jms.listener; + +public interface SessionAwareMessageListener { + + void onMessage(Message message, Session session) throws JMSException; +} +``` + +You can choose to have your MDPs implement this interface (in preference to the standard +JMS `MessageListener` interface) if you want your MDPs to be able to respond to any +received messages (by using the `Session` supplied in the `onMessage(Message, Session)`method). All of the message listener container implementations that ship with Spring +have support for MDPs that implement either the `MessageListener` or`SessionAwareMessageListener` interface. Classes that implement the`SessionAwareMessageListener` come with the caveat that they are then tied to Spring +through the interface. The choice of whether or not to use it is left entirely up to you +as an application developer or architect. + +Note that the `onMessage(..)` method of the `SessionAwareMessageListener`interface throws `JMSException`. In contrast to the standard JMS `MessageListener`interface, when using the `SessionAwareMessageListener` interface, it is the +responsibility of the client code to handle any thrown exceptions. + +#### 4.3.4. Using `MessageListenerAdapter` + +The `MessageListenerAdapter` class is the final component in Spring’s asynchronous +messaging support. In a nutshell, it lets you expose almost any class as an MDP +(though there are some constraints). + +Consider the following interface definition: + +``` +public interface MessageDelegate { + + void handleMessage(String message); + + void handleMessage(Map message); + + void handleMessage(byte[] message); + + void handleMessage(Serializable message); +} +``` + +Notice that, although the interface extends neither the `MessageListener` nor the`SessionAwareMessageListener` interface, you can still use it as an MDP by using the`MessageListenerAdapter` class. Notice also how the various message handling methods are +strongly typed according to the contents of the various `Message` types that they can +receive and handle. + +Now consider the following implementation of the `MessageDelegate` interface: + +``` +public class DefaultMessageDelegate implements MessageDelegate { + // implementation elided for clarity... +} +``` + +In particular, note how the preceding implementation of the `MessageDelegate` interface (the`DefaultMessageDelegate` class) has no JMS dependencies at all. It truly is a +POJO that we can make into an MDP through the following configuration: + +``` +<!-- this is the Message Driven POJO (MDP) --> +<bean id="messageListener" class="org.springframework.jms.listener.adapter.MessageListenerAdapter"> + <constructor-arg> + <bean class="jmsexample.DefaultMessageDelegate"/> + </constructor-arg> +</bean> + +<!-- and this is the message listener container... --> +<bean id="jmsContainer" class="org.springframework.jms.listener.DefaultMessageListenerContainer"> + <property name="connectionFactory" ref="connectionFactory"/> + <property name="destination" ref="destination"/> + <property name="messageListener" ref="messageListener"/> +</bean> +``` + +The next example shows another MDP that can handle only receiving JMS`TextMessage` messages. Notice how the message handling method is actually called`receive` (the name of the message handling method in a `MessageListenerAdapter`defaults to `handleMessage`), but it is configurable (as you can see later in this section). Notice +also how the `receive(..)` method is strongly typed to receive and respond only to JMS`TextMessage` messages. +The following listing shows the definition of the `TextMessageDelegate` interface: + +``` +public interface TextMessageDelegate { + + void receive(TextMessage message); +} +``` + +The following listing shows a class that implements the `TextMessageDelegate` interface: + +``` +public class DefaultTextMessageDelegate implements TextMessageDelegate { + // implementation elided for clarity... +} +``` + +The configuration of the attendant `MessageListenerAdapter` would then be as follows: + +``` +<bean id="messageListener" class="org.springframework.jms.listener.adapter.MessageListenerAdapter"> + <constructor-arg> + <bean class="jmsexample.DefaultTextMessageDelegate"/> + </constructor-arg> + <property name="defaultListenerMethod" value="receive"/> + <!-- we don't want automatic message context extraction --> + <property name="messageConverter"> + <null/> + </property> +</bean> +``` + +Note that, if the `messageListener` receives a JMS `Message` of a type +other than `TextMessage`, an `IllegalStateException` is thrown (and subsequently +swallowed). Another of the capabilities of the `MessageListenerAdapter` class is the +ability to automatically send back a response `Message` if a handler method returns a +non-void value. Consider the following interface and class: + +``` +public interface ResponsiveTextMessageDelegate { + + // notice the return type... + String receive(TextMessage message); +} +``` + +``` +public class DefaultResponsiveTextMessageDelegate implements ResponsiveTextMessageDelegate { + // implementation elided for clarity... +} +``` + +If you use the `DefaultResponsiveTextMessageDelegate` in conjunction with a`MessageListenerAdapter`, any non-null value that is returned from the execution of +the `'receive(..)'` method is (in the default configuration) converted into a`TextMessage`. The resulting `TextMessage` is then sent to the `Destination` (if +one exists) defined in the JMS `Reply-To` property of the original `Message` or the +default `Destination` set on the `MessageListenerAdapter` (if one has been configured). +If no `Destination` is found, an `InvalidDestinationException` is thrown +(note that this exception is not swallowed and propagates up the +call stack). + +#### 4.3.5. Processing Messages Within Transactions + +Invoking a message listener within a transaction requires only reconfiguration of the +listener container. + +You can activate local resource transactions through the `sessionTransacted` flag +on the listener container definition. Each message listener invocation then operates +within an active JMS transaction, with message reception rolled back in case of listener +execution failure. Sending a response message (through `SessionAwareMessageListener`) is +part of the same local transaction, but any other resource operations (such as +database access) operate independently. This usually requires duplicate message +detection in the listener implementation, to cover the case where database processing +has committed but message processing failed to commit. + +Consider the following bean definition: + +``` +<bean id="jmsContainer" class="org.springframework.jms.listener.DefaultMessageListenerContainer"> + <property name="connectionFactory" ref="connectionFactory"/> + <property name="destination" ref="destination"/> + <property name="messageListener" ref="messageListener"/> + <property name="sessionTransacted" value="true"/> +</bean> +``` + +To participate in an externally managed transaction, you need to configure a +transaction manager and use a listener container that supports externally managed +transactions (typically, `DefaultMessageListenerContainer`). + +To configure a message listener container for XA transaction participation, you want +to configure a `JtaTransactionManager` (which, by default, delegates to the Java EE +server’s transaction subsystem). Note that the underlying JMS `ConnectionFactory` needs to +be XA-capable and properly registered with your JTA transaction coordinator. (Check your +Java EE server’s configuration of JNDI resources.) This lets message reception as well +as (for example) database access be part of the same transaction (with unified commit +semantics, at the expense of XA transaction log overhead). + +The following bean definition creates a transaction manager: + +``` +<bean id="transactionManager" class="org.springframework.transaction.jta.JtaTransactionManager"/> +``` + +Then we need to add it to our earlier container configuration. The container +takes care of the rest. The following example shows how to do so: + +``` +<bean id="jmsContainer" class="org.springframework.jms.listener.DefaultMessageListenerContainer"> + <property name="connectionFactory" ref="connectionFactory"/> + <property name="destination" ref="destination"/> + <property name="messageListener" ref="messageListener"/> + <property name="transactionManager" ref="transactionManager"/> (1) +</bean> +``` + +|**1**|Our transaction manager.| +|-----|------------------------| + +### 4.4. Support for JCA Message Endpoints + +Beginning with version 2.5, Spring also provides support for a JCA-based`MessageListener` container. The `JmsMessageEndpointManager` tries to +automatically determine the `ActivationSpec` class name from the provider’s`ResourceAdapter` class name. Therefore, it is typically possible to provide +Spring’s generic `JmsActivationSpecConfig`, as the following example shows: + +``` +<bean class="org.springframework.jms.listener.endpoint.JmsMessageEndpointManager"> + <property name="resourceAdapter" ref="resourceAdapter"/> + <property name="activationSpecConfig"> + <bean class="org.springframework.jms.listener.endpoint.JmsActivationSpecConfig"> + <property name="destinationName" value="myQueue"/> + </bean> + </property> + <property name="messageListener" ref="myMessageListener"/> +</bean> +``` + +Alternatively, you can set up a `JmsMessageEndpointManager` with a given`ActivationSpec` object. The `ActivationSpec` object may also come from a JNDI lookup +(using `<jee:jndi-lookup>`). The following example shows how to do so: + +``` +<bean class="org.springframework.jms.listener.endpoint.JmsMessageEndpointManager"> + <property name="resourceAdapter" ref="resourceAdapter"/> + <property name="activationSpec"> + <bean class="org.apache.activemq.ra.ActiveMQActivationSpec"> + <property name="destination" value="myQueue"/> + <property name="destinationType" value="javax.jms.Queue"/> + </bean> + </property> + <property name="messageListener" ref="myMessageListener"/> +</bean> +``` + +Using Spring’s `ResourceAdapterFactoryBean`, you can configure the target `ResourceAdapter`locally, as the following example shows: + +``` +<bean id="resourceAdapter" class="org.springframework.jca.support.ResourceAdapterFactoryBean"> + <property name="resourceAdapter"> + <bean class="org.apache.activemq.ra.ActiveMQResourceAdapter"> + <property name="serverUrl" value="tcp://localhost:61616"/> + </bean> + </property> + <property name="workManager"> + <bean class="org.springframework.jca.work.SimpleTaskWorkManager"/> + </property> +</bean> +``` + +The specified `WorkManager` can also point to an environment-specific thread pool — typically through a `SimpleTaskWorkManager` instance’s `asyncTaskExecutor` property. Consider +defining a shared thread pool for all your `ResourceAdapter` instances if you happen to +use multiple adapters. + +In some environments (such as WebLogic 9 or above), you can instead obtain the entire `ResourceAdapter` object +from JNDI (by using `<jee:jndi-lookup>`). The Spring-based message +listeners can then interact with the server-hosted `ResourceAdapter`, which also use the +server’s built-in `WorkManager`. + +See the javadoc for [`JmsMessageEndpointManager`](https://docs.spring.io/spring-framework/docs/5.3.16/javadoc-api/org/springframework/jms/listener/endpoint/JmsMessageEndpointManager.html),[`JmsActivationSpecConfig`](https://docs.spring.io/spring-framework/docs/5.3.16/javadoc-api/org/springframework/jms/listener/endpoint/JmsActivationSpecConfig.html), +and [`ResourceAdapterFactoryBean`](https://docs.spring.io/spring-framework/docs/5.3.16/javadoc-api/org/springframework/jca/support/ResourceAdapterFactoryBean.html)for more details. + +Spring also provides a generic JCA message endpoint manager that is not tied to JMS:`org.springframework.jca.endpoint.GenericMessageEndpointManager`. This component allows +for using any message listener type (such as a JMS `MessageListener`) and any +provider-specific `ActivationSpec` object. See your JCA provider’s documentation to +find out about the actual capabilities of your connector, and see the[`GenericMessageEndpointManager`](https://docs.spring.io/spring-framework/docs/5.3.16/javadoc-api/org/springframework/jca/endpoint/GenericMessageEndpointManager.html)javadoc for the Spring-specific configuration details. + +| |JCA-based message endpoint management is very analogous to EJB 2.1 Message-Driven Beans.<br/>It uses the same underlying resource provider contract. As with EJB 2.1 MDBs, you can use any<br/>message listener interface supported by your JCA provider in the Spring context as well.<br/>Spring nevertheless provides explicit “convenience” support for JMS, because JMS is the<br/>most common endpoint API used with the JCA endpoint management contract.| +|---|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +### 4.5. Annotation-driven Listener Endpoints + +The easiest way to receive a message asynchronously is to use the annotated listener +endpoint infrastructure. In a nutshell, it lets you expose a method of a managed +bean as a JMS listener endpoint. The following example shows how to use it: + +``` +@Component +public class MyService { + + @JmsListener(destination = "myDestination") + public void processOrder(String data) { ... } +} +``` + +The idea of the preceding example is that, whenever a message is available on the`javax.jms.Destination` `myDestination`, the `processOrder` method is invoked +accordingly (in this case, with the content of the JMS message, similar to +what the [`MessageListenerAdapter`](#jms-receiving-async-message-listener-adapter)provides). + +The annotated endpoint infrastructure creates a message listener container +behind the scenes for each annotated method, by using a `JmsListenerContainerFactory`. +Such a container is not registered against the application context but can be easily +located for management purposes by using the `JmsListenerEndpointRegistry` bean. + +| |`@JmsListener` is a repeatable annotation on Java 8, so you can associate<br/>several JMS destinations with the same method by adding additional `@JmsListener`declarations to it.| +|---|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +#### 4.5.1. Enable Listener Endpoint Annotations + +To enable support for `@JmsListener` annotations, you can add `@EnableJms` to one of +your `@Configuration` classes, as the following example shows: + +``` +@Configuration +@EnableJms +public class AppConfig { + + @Bean + public DefaultJmsListenerContainerFactory jmsListenerContainerFactory() { + DefaultJmsListenerContainerFactory factory = new DefaultJmsListenerContainerFactory(); + factory.setConnectionFactory(connectionFactory()); + factory.setDestinationResolver(destinationResolver()); + factory.setSessionTransacted(true); + factory.setConcurrency("3-10"); + return factory; + } +} +``` + +By default, the infrastructure looks for a bean named `jmsListenerContainerFactory`as the source for the factory to use to create message listener containers. In this +case (and ignoring the JMS infrastructure setup), you can invoke the `processOrder`method with a core poll size of three threads and a maximum pool size of ten threads. + +You can customize the listener container factory to use for each annotation or you can +configure an explicit default by implementing the `JmsListenerConfigurer` interface. +The default is required only if at least one endpoint is registered without a specific +container factory. See the javadoc of classes that implement[`JmsListenerConfigurer`](https://docs.spring.io/spring-framework/docs/5.3.16/javadoc-api/org/springframework/jms/annotation/JmsListenerConfigurer.html)for details and examples. + +If you prefer [XML configuration](#jms-namespace), you can use the `<jms:annotation-driven>`element, as the following example shows: + +``` +<jms:annotation-driven/> + +<bean id="jmsListenerContainerFactory" + class="org.springframework.jms.config.DefaultJmsListenerContainerFactory"> + <property name="connectionFactory" ref="connectionFactory"/> + <property name="destinationResolver" ref="destinationResolver"/> + <property name="sessionTransacted" value="true"/> + <property name="concurrency" value="3-10"/> +</bean> +``` + +#### 4.5.2. Programmatic Endpoint Registration + +`JmsListenerEndpoint` provides a model of a JMS endpoint and is responsible for configuring +the container for that model. The infrastructure lets you programmatically configure endpoints +in addition to the ones that are detected by the `JmsListener` annotation. +The following example shows how to do so: + +``` +@Configuration +@EnableJms +public class AppConfig implements JmsListenerConfigurer { + + @Override + public void configureJmsListeners(JmsListenerEndpointRegistrar registrar) { + SimpleJmsListenerEndpoint endpoint = new SimpleJmsListenerEndpoint(); + endpoint.setId("myJmsEndpoint"); + endpoint.setDestination("anotherQueue"); + endpoint.setMessageListener(message -> { + // processing + }); + registrar.registerEndpoint(endpoint); + } +} +``` + +In the preceding example, we used `SimpleJmsListenerEndpoint`, which provides the actual`MessageListener` to invoke. However, you could also build your own endpoint variant +to describe a custom invocation mechanism. + +Note that you could skip the use of `@JmsListener` altogether +and programmatically register only your endpoints through `JmsListenerConfigurer`. + +#### 4.5.3. Annotated Endpoint Method Signature + +So far, we have been injecting a simple `String` in our endpoint, but it can actually +have a very flexible method signature. In the following example, we rewrite it to inject the `Order` with +a custom header: + +``` +@Component +public class MyService { + + @JmsListener(destination = "myDestination") + public void processOrder(Order order, @Header("order_type") String orderType) { + ... + } +} +``` + +The main elements you can inject in JMS listener endpoints are as follows: + +* The raw `javax.jms.Message` or any of its subclasses (provided that it + matches the incoming message type). + +* The `javax.jms.Session` for optional access to the native JMS API (for example, for sending + a custom reply). + +* The `org.springframework.messaging.Message` that represents the incoming JMS message. + Note that this message holds both the custom and the standard headers (as defined + by `JmsHeaders`). + +* `@Header`-annotated method arguments to extract a specific header value, including + standard JMS headers. + +* A `@Headers`-annotated argument that must also be assignable to `java.util.Map` for + getting access to all headers. + +* A non-annotated element that is not one of the supported types (`Message` or`Session`) is considered to be the payload. You can make that explicit by annotating + the parameter with `@Payload`. You can also turn on validation by adding an extra`@Valid`. + +The ability to inject Spring’s `Message` abstraction is particularly useful to benefit +from all the information stored in the transport-specific message without relying on +transport-specific API. The following example shows how to do so: + +``` +@JmsListener(destination = "myDestination") +public void processOrder(Message<Order> order) { ... } +``` + +Handling of method arguments is provided by `DefaultMessageHandlerMethodFactory`, which you can +further customize to support additional method arguments. You can customize the conversion and validation +support there as well. + +For instance, if we want to make sure our `Order` is valid before processing it, we can +annotate the payload with `@Valid` and configure the necessary validator, as the following example shows: + +``` +@Configuration +@EnableJms +public class AppConfig implements JmsListenerConfigurer { + + @Override + public void configureJmsListeners(JmsListenerEndpointRegistrar registrar) { + registrar.setMessageHandlerMethodFactory(myJmsHandlerMethodFactory()); + } + + @Bean + public DefaultMessageHandlerMethodFactory myHandlerMethodFactory() { + DefaultMessageHandlerMethodFactory factory = new DefaultMessageHandlerMethodFactory(); + factory.setValidator(myValidator()); + return factory; + } +} +``` + +#### 4.5.4. Response Management + +The existing support in [`MessageListenerAdapter`](#jms-receiving-async-message-listener-adapter)already lets your method have a non-`void` return type. When that is the case, the result of +the invocation is encapsulated in a `javax.jms.Message`, sent either in the destination specified +in the `JMSReplyTo` header of the original message or in the default destination configured on +the listener. You can now set that default destination by using the `@SendTo` annotation of the +messaging abstraction. + +Assuming that our `processOrder` method should now return an `OrderStatus`, we can write it +to automatically send a response, as the following example shows: + +``` +@JmsListener(destination = "myDestination") +@SendTo("status") +public OrderStatus processOrder(Order order) { + // order processing + return status; +} +``` + +| |If you have several `@JmsListener`-annotated methods, you can also place the `@SendTo`annotation at the class level to share a default reply destination.| +|---|---------------------------------------------------------------------------------------------------------------------------------------------------------| + +If you need to set additional headers in a transport-independent manner, you can return a`Message` instead, with a method similar to the following: + +``` +@JmsListener(destination = "myDestination") +@SendTo("status") +public Message<OrderStatus> processOrder(Order order) { + // order processing + return MessageBuilder + .withPayload(status) + .setHeader("code", 1234) + .build(); +} +``` + +If you need to compute the response destination at runtime, you can encapsulate your response +in a `JmsResponse` instance that also provides the destination to use at runtime. We can rewrite the previous +example as follows: + +``` +@JmsListener(destination = "myDestination") +public JmsResponse<Message<OrderStatus>> processOrder(Order order) { + // order processing + Message<OrderStatus> response = MessageBuilder + .withPayload(status) + .setHeader("code", 1234) + .build(); + return JmsResponse.forQueue(response, "status"); +} +``` + +Finally, if you need to specify some QoS values for the response such as the priority or +the time to live, you can configure the `JmsListenerContainerFactory` accordingly, +as the following example shows: + +``` +@Configuration +@EnableJms +public class AppConfig { + + @Bean + public DefaultJmsListenerContainerFactory jmsListenerContainerFactory() { + DefaultJmsListenerContainerFactory factory = new DefaultJmsListenerContainerFactory(); + factory.setConnectionFactory(connectionFactory()); + QosSettings replyQosSettings = new QosSettings(); + replyQosSettings.setPriority(2); + replyQosSettings.setTimeToLive(10000); + factory.setReplyQosSettings(replyQosSettings); + return factory; + } +} +``` + +### 4.6. JMS Namespace Support + +Spring provides an XML namespace for simplifying JMS configuration. To use the JMS +namespace elements, you need to reference the JMS schema, as the following example shows: + +``` +<?xml version="1.0" encoding="UTF-8"?> +<beans xmlns="http://www.springframework.org/schema/beans" + xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" + xmlns:jms="http://www.springframework.org/schema/jms" (1) + xsi:schemaLocation=" + http://www.springframework.org/schema/beans https://www.springframework.org/schema/beans/spring-beans.xsd + http://www.springframework.org/schema/jms https://www.springframework.org/schema/jms/spring-jms.xsd"> + + <!-- bean definitions here --> + +</beans> +``` + +|**1**|Referencing the JMS schema.| +|-----|---------------------------| + +The namespace consists of three top-level elements: `<annotation-driven/>`, `<listener-container/>`and `<jca-listener-container/>`. `<annotation-driven/>` enables the use of [annotation-driven listener endpoints](#jms-annotated). `<listener-container/>` and `<jca-listener-container/>`define shared listener container configuration and can contain `<listener/>` child elements. +The following example shows a basic configuration for two listeners: + +``` +<jms:listener-container> + + <jms:listener destination="queue.orders" ref="orderService" method="placeOrder"/> + + <jms:listener destination="queue.confirmations" ref="confirmationLogger" method="log"/> + +</jms:listener-container> +``` + +The preceding example is equivalent to creating two distinct listener container bean +definitions and two distinct `MessageListenerAdapter` bean definitions, as shown +in [Using `MessageListenerAdapter`](#jms-receiving-async-message-listener-adapter). In addition to the attributes shown +in the preceding example, the `listener` element can contain several optional ones. +The following table describes all of the available attributes: + +| Attribute | Description | +|------------------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| `id` | A bean name for the hosting listener container. If not specified, a bean name is<br/>automatically generated. | +|`destination` (required)| The destination name for this listener, resolved through the `DestinationResolver`strategy. | +| `ref` (required) | The bean name of the handler object. | +| `method` | The name of the handler method to invoke. If the `ref` attribute points to a `MessageListener`or Spring `SessionAwareMessageListener`, you can omit this attribute. | +| `response-destination` |The name of the default response destination to which to send response messages. This is<br/>applied in case of a request message that does not carry a `JMSReplyTo` field. The<br/>type of this destination is determined by the listener-container’s`response-destination-type` attribute. Note that this applies only to a listener method with a<br/>return value, for which each result object is converted into a response message.| +| `subscription` | The name of the durable subscription, if any. | +| `selector` | An optional message selector for this listener. | +| `concurrency` | The number of concurrent sessions or consumers to start for this listener. This value can either be<br/>a simple number indicating the maximum number (for example, `5`) or a range indicating the lower<br/>as well as the upper limit (for example, `3-5`). Note that a specified minimum is only a hint<br/>and might be ignored at runtime. The default is the value provided by the container. | + +The `<listener-container/>` element also accepts several optional attributes. This +allows for customization of the various strategies (for example, `taskExecutor` and`destinationResolver`) as well as basic JMS settings and resource references. By using +these attributes, you can define highly-customized listener containers while +still benefiting from the convenience of the namespace. + +You can automatically expose such settings as a `JmsListenerContainerFactory` by +specifying the `id` of the bean to expose through the `factory-id` attribute, +as the following example shows: + +``` +<jms:listener-container connection-factory="myConnectionFactory" + task-executor="myTaskExecutor" + destination-resolver="myDestinationResolver" + transaction-manager="myTransactionManager" + concurrency="10"> + + <jms:listener destination="queue.orders" ref="orderService" method="placeOrder"/> + + <jms:listener destination="queue.confirmations" ref="confirmationLogger" method="log"/> + +</jms:listener-container> +``` + +The following table describes all available attributes. See the class-level javadoc +of the [`AbstractMessageListenerContainer`](https://docs.spring.io/spring-framework/docs/5.3.16/javadoc-api/org/springframework/jms/listener/AbstractMessageListenerContainer.html)and its concrete subclasses for more details on the individual properties. The javadoc +also provides a discussion of transaction choices and message redelivery scenarios. + +| Attribute | Description | +|---------------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| `container-type` | The type of this listener container. The available options are `default`, `simple`,`default102`, or `simple102` (the default option is `default`). | +| `container-class` | A custom listener container implementation class as a fully qualified class name.<br/>The default is Spring’s standard `DefaultMessageListenerContainer` or`SimpleMessageListenerContainer`, according to the `container-type` attribute. | +| `factory-id` | Exposes the settings defined by this element as a `JmsListenerContainerFactory`with the specified `id` so that they can be reused with other endpoints. | +| `connection-factory` | A reference to the JMS `ConnectionFactory` bean (the default bean name is`connectionFactory`). | +| `task-executor` | A reference to the Spring `TaskExecutor` for the JMS listener invokers. | +| `destination-resolver` | A reference to the `DestinationResolver` strategy for resolving JMS `Destination` instances. | +| `message-converter` | A reference to the `MessageConverter` strategy for converting JMS Messages to listener<br/>method arguments. The default is a `SimpleMessageConverter`. | +| `error-handler` | A reference to an `ErrorHandler` strategy for handling any uncaught exceptions that<br/>may occur during the execution of the `MessageListener`. | +| `destination-type` | The JMS destination type for this listener: `queue`, `topic`, `durableTopic`, `sharedTopic`,<br/>or `sharedDurableTopic`. This potentially enables the `pubSubDomain`, `subscriptionDurable`and `subscriptionShared` properties of the container. The default is `queue` (which disables<br/>those three properties). | +|`response-destination-type`| The JMS destination type for responses: `queue` or `topic`. The default is the value of the`destination-type` attribute. | +| `client-id` | The JMS client ID for this listener container. You must specify it when you use<br/>durable subscriptions. | +| `cache` | The cache level for JMS resources: `none`, `connection`, `session`, `consumer`, or`auto`. By default (`auto`), the cache level is effectively `consumer`, unless<br/>an external transaction manager has been specified — in which case, the effective<br/>default will be `none` (assuming Java EE-style transaction management, where the given<br/>ConnectionFactory is an XA-aware pool). | +| `acknowledge` | The native JMS acknowledge mode: `auto`, `client`, `dups-ok`, or `transacted`. A value<br/>of `transacted` activates a locally transacted `Session`. As an alternative, you can specify<br/>the `transaction-manager` attribute, described later in table. The default is `auto`. | +| `transaction-manager` | A reference to an external `PlatformTransactionManager` (typically an XA-based<br/>transaction coordinator, such as Spring’s `JtaTransactionManager`). If not specified,<br/>native acknowledging is used (see the `acknowledge` attribute). | +| `concurrency` |The number of concurrent sessions or consumers to start for each listener. It can either be<br/>a simple number indicating the maximum number (for example, `5`) or a range indicating the<br/>lower as well as the upper limit (for example, `3-5`). Note that a specified minimum is just a<br/>hint and might be ignored at runtime. The default is `1`. You should keep concurrency limited to `1` in<br/>case of a topic listener or if queue ordering is important. Consider raising it for<br/>general queues.| +| `prefetch` | The maximum number of messages to load into a single session. Note that raising this<br/>number might lead to starvation of concurrent consumers. | +| `receive-timeout` | The timeout (in milliseconds) to use for receive calls. The default is `1000` (one<br/>second). `-1` indicates no timeout. | +| `back-off` | Specifies the `BackOff` instance to use to compute the interval between recovery<br/>attempts. If the `BackOffExecution` implementation returns `BackOffExecution#STOP`,<br/>the listener container does not further try to recover. The `recovery-interval`value is ignored when this property is set. The default is a `FixedBackOff` with<br/>an interval of 5000 milliseconds (that is, five seconds). | +| `recovery-interval` | Specifies the interval between recovery attempts, in milliseconds. It offers a convenient<br/>way to create a `FixedBackOff` with the specified interval. For more recovery<br/>options, consider specifying a `BackOff` instance instead. The default is 5000 milliseconds<br/>(that is, five seconds). | +| `phase` | The lifecycle phase within which this container should start and stop. The lower the<br/>value, the earlier this container starts and the later it stops. The default is`Integer.MAX_VALUE`, meaning that the container starts as late as possible and stops as<br/>soon as possible. | + +Configuring a JCA-based listener container with the `jms` schema support is very similar, +as the following example shows: + +``` +<jms:jca-listener-container resource-adapter="myResourceAdapter" + destination-resolver="myDestinationResolver" + transaction-manager="myTransactionManager" + concurrency="10"> + + <jms:listener destination="queue.orders" ref="myMessageListener"/> + +</jms:jca-listener-container> +``` + +The following table describes the available configuration options for the JCA variant: + +| Attribute | Description | +|---------------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| `factory-id` | Exposes the settings defined by this element as a `JmsListenerContainerFactory`with the specified `id` so that they can be reused with other endpoints. | +| `resource-adapter` | A reference to the JCA `ResourceAdapter` bean (the default bean name is`resourceAdapter`). | +| `activation-spec-factory` | A reference to the `JmsActivationSpecFactory`. The default is to autodetect the JMS<br/>provider and its `ActivationSpec` class (see [`DefaultJmsActivationSpecFactory`](https://docs.spring.io/spring-framework/docs/5.3.16/javadoc-api/org/springframework/jms/listener/endpoint/DefaultJmsActivationSpecFactory.html)). | +| `destination-resolver` | A reference to the `DestinationResolver` strategy for resolving JMS `Destinations`. | +| `message-converter` | A reference to the `MessageConverter` strategy for converting JMS Messages to listener<br/>method arguments. The default is `SimpleMessageConverter`. | +| `destination-type` | The JMS destination type for this listener: `queue`, `topic`, `durableTopic`, `sharedTopic`.<br/>or `sharedDurableTopic`. This potentially enables the `pubSubDomain`, `subscriptionDurable`,<br/>and `subscriptionShared` properties of the container. The default is `queue` (which disables<br/>those three properties). | +|`response-destination-type`| The JMS destination type for responses: `queue` or `topic`. The default is the value of the`destination-type` attribute. | +| `client-id` | The JMS client ID for this listener container. It needs to be specified when using<br/>durable subscriptions. | +| `acknowledge` | The native JMS acknowledge mode: `auto`, `client`, `dups-ok`, or `transacted`. A value<br/>of `transacted` activates a locally transacted `Session`. As an alternative, you can specify<br/>the `transaction-manager` attribute described later. The default is `auto`. | +| `transaction-manager` | A reference to a Spring `JtaTransactionManager` or a`javax.transaction.TransactionManager` for kicking off an XA transaction for each<br/>incoming message. If not specified, native acknowledging is used (see the`acknowledge` attribute). | +| `concurrency` |The number of concurrent sessions or consumers to start for each listener. It can either be<br/>a simple number indicating the maximum number (for example `5`) or a range indicating the<br/>lower as well as the upper limit (for example, `3-5`). Note that a specified minimum is only a<br/>hint and is typically ignored at runtime when you use a JCA listener container.<br/>The default is 1.| +| `prefetch` | The maximum number of messages to load into a single session. Note that raising this<br/>number might lead to starvation of concurrent consumers. | + +## 5. JMX + +The JMX (Java Management Extensions) support in Spring provides features that let you +easily and transparently integrate your Spring application into a JMX infrastructure. + +JMX? + +This chapter is not an introduction to JMX. It does not try to explain why you might want +to use JMX. If you are new to JMX, see [Further Resources](#jmx-resources) at the end of this chapter. + +Specifically, Spring’s JMX support provides four core features: + +* The automatic registration of any Spring bean as a JMX MBean. + +* A flexible mechanism for controlling the management interface of your beans. + +* The declarative exposure of MBeans over remote, JSR-160 connectors. + +* The simple proxying of both local and remote MBean resources. + +These features are designed to work without coupling your application components to +either Spring or JMX interfaces and classes. Indeed, for the most part, your application +classes need not be aware of either Spring or JMX in order to take advantage of the +Spring JMX features. + +### 5.1. Exporting Your Beans to JMX + +The core class in Spring’s JMX framework is the `MBeanExporter`. This class is +responsible for taking your Spring beans and registering them with a JMX `MBeanServer`. +For example, consider the following class: + +``` +package org.springframework.jmx; + +public class JmxTestBean implements IJmxTestBean { + + private String name; + private int age; + private boolean isSuperman; + + public int getAge() { + return age; + } + + public void setAge(int age) { + this.age = age; + } + + public void setName(String name) { + this.name = name; + } + + public String getName() { + return name; + } + + public int add(int x, int y) { + return x + y; + } + + public void dontExposeMe() { + throw new RuntimeException(); + } +} +``` + +To expose the properties and methods of this bean as attributes and operations of an +MBean, you can configure an instance of the `MBeanExporter` class in your +configuration file and pass in the bean, as the following example shows: + +``` +<beans> + <!-- this bean must not be lazily initialized if the exporting is to happen --> + <bean id="exporter" class="org.springframework.jmx.export.MBeanExporter" lazy-init="false"> + <property name="beans"> + <map> + <entry key="bean:name=testBean1" value-ref="testBean"/> + </map> + </property> + </bean> + <bean id="testBean" class="org.springframework.jmx.JmxTestBean"> + <property name="name" value="TEST"/> + <property name="age" value="100"/> + </bean> +</beans> +``` + +The pertinent bean definition from the preceding configuration snippet is the `exporter`bean. The `beans` property tells the `MBeanExporter` exactly which of your beans must be +exported to the JMX `MBeanServer`. In the default configuration, the key of each entry +in the `beans` `Map` is used as the `ObjectName` for the bean referenced by the +corresponding entry value. You can change this behavior, as described in [Controlling `ObjectName` Instances for Your Beans](#jmx-naming). + +With this configuration, the `testBean` bean is exposed as an MBean under the`ObjectName` `bean:name=testBean1`. By default, all `public` properties of the bean +are exposed as attributes and all `public` methods (except those inherited from the`Object` class) are exposed as operations. + +| |`MBeanExporter` is a `Lifecycle` bean (see [Startup and Shutdown Callbacks](core.html#beans-factory-lifecycle-processor)). By default, MBeans are exported as late as possible during<br/>the application lifecycle. You can configure the `phase` at which<br/>the export happens or disable automatic registration by setting the `autoStartup` flag.| +|---|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +#### 5.1.1. Creating an MBeanServer + +The configuration shown in the [preceding section](#jmx-exporting) assumes that the +application is running in an environment that has one (and only one) `MBeanServer`already running. In this case, Spring tries to locate the running `MBeanServer` and +register your beans with that server (if any). This behavior is useful when your +application runs inside a container (such as Tomcat or IBM WebSphere) that has its +own `MBeanServer`. + +However, this approach is of no use in a standalone environment or when running inside +a container that does not provide an `MBeanServer`. To address this, you can create an`MBeanServer` instance declaratively by adding an instance of the`org.springframework.jmx.support.MBeanServerFactoryBean` class to your configuration. +You can also ensure that a specific `MBeanServer` is used by setting the value of the`MBeanExporter` instance’s `server` property to the `MBeanServer` value returned by an`MBeanServerFactoryBean`, as the following example shows: + +``` +<beans> + + <bean id="mbeanServer" class="org.springframework.jmx.support.MBeanServerFactoryBean"/> + + <!-- + this bean needs to be eagerly pre-instantiated in order for the exporting to occur; + this means that it must not be marked as lazily initialized + --> + <bean id="exporter" class="org.springframework.jmx.export.MBeanExporter"> + <property name="beans"> + <map> + <entry key="bean:name=testBean1" value-ref="testBean"/> + </map> + </property> + <property name="server" ref="mbeanServer"/> + </bean> + + <bean id="testBean" class="org.springframework.jmx.JmxTestBean"> + <property name="name" value="TEST"/> + <property name="age" value="100"/> + </bean> + +</beans> +``` + +In the preceding example, an instance of `MBeanServer` is created by the `MBeanServerFactoryBean` and is +supplied to the `MBeanExporter` through the `server` property. When you supply your own`MBeanServer` instance, the `MBeanExporter` does not try to locate a running`MBeanServer` and uses the supplied `MBeanServer` instance. For this to work +correctly, you must have a JMX implementation on your classpath. + +#### 5.1.2. Reusing an Existing `MBeanServer` + +If no server is specified, the `MBeanExporter` tries to automatically detect a running`MBeanServer`. This works in most environments, where only one `MBeanServer` instance is +used. However, when multiple instances exist, the exporter might pick the wrong server. +In such cases, you should use the `MBeanServer` `agentId` to indicate which instance to +be used, as the following example shows: + +``` +<beans> + <bean id="mbeanServer" class="org.springframework.jmx.support.MBeanServerFactoryBean"> + <!-- indicate to first look for a server --> + <property name="locateExistingServerIfPossible" value="true"/> + <!-- search for the MBeanServer instance with the given agentId --> + <property name="agentId" value="MBeanServer_instance_agentId>"/> + </bean> + <bean id="exporter" class="org.springframework.jmx.export.MBeanExporter"> + <property name="server" ref="mbeanServer"/> + ... + </bean> +</beans> +``` + +For platforms or cases where the existing `MBeanServer` has a dynamic (or unknown)`agentId` that is retrieved through lookup methods, you should use[factory-method](core.html#beans-factory-class-static-factory-method), +as the following example shows: + +``` +<beans> + <bean id="exporter" class="org.springframework.jmx.export.MBeanExporter"> + <property name="server"> + <!-- Custom MBeanServerLocator --> + <bean class="platform.package.MBeanServerLocator" factory-method="locateMBeanServer"/> + </property> + </bean> + + <!-- other beans here --> + +</beans> +``` + +#### 5.1.3. Lazily Initialized MBeans + +If you configure a bean with an `MBeanExporter` that is also configured for lazy +initialization, the `MBeanExporter` does not break this contract and avoids +instantiating the bean. Instead, it registers a proxy with the `MBeanServer` and +defers obtaining the bean from the container until the first invocation on the proxy +occurs. + +#### 5.1.4. Automatic Registration of MBeans + +Any beans that are exported through the `MBeanExporter` and are already valid MBeans are +registered as-is with the `MBeanServer` without further intervention from Spring. You can cause MBeans +to be automatically detected by the `MBeanExporter` by setting the `autodetect`property to `true`, as the following example shows: + +``` +<bean id="exporter" class="org.springframework.jmx.export.MBeanExporter"> + <property name="autodetect" value="true"/> +</bean> + +<bean name="spring:mbean=true" class="org.springframework.jmx.export.TestDynamicMBean"/> +``` + +In the preceding example, the bean called `spring:mbean=true` is already a valid JMX MBean +and is automatically registered by Spring. By default, a bean that is autodetected for JMX +registration has its bean name used as the `ObjectName`. You can override this behavior, +as detailed in [Controlling `ObjectName` Instances for Your Beans](#jmx-naming). + +#### 5.1.5. Controlling the Registration Behavior + +Consider the scenario where a Spring `MBeanExporter` attempts to register an `MBean`with an `MBeanServer` by using the `ObjectName` `bean:name=testBean1`. If an `MBean`instance has already been registered under that same `ObjectName`, the default behavior +is to fail (and throw an `InstanceAlreadyExistsException`). + +You can control exactly what happens when an `MBean` is +registered with an `MBeanServer`. Spring’s JMX support allows for three different +registration behaviors to control the registration behavior when the registration +process finds that an `MBean` has already been registered under the same `ObjectName`. +The following table summarizes these registration behaviors: + +|Registration behavior| Explanation | +|---------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| `FAIL_ON_EXISTING` | This is the default registration behavior. If an `MBean` instance has already been<br/>registered under the same `ObjectName`, the `MBean` that is being registered is not<br/>registered, and an `InstanceAlreadyExistsException` is thrown. The existing`MBean` is unaffected. | +| `IGNORE_EXISTING` |If an `MBean` instance has already been registered under the same `ObjectName`, the`MBean` that is being registered is not registered. The existing `MBean` is<br/>unaffected, and no `Exception` is thrown. This is useful in settings where<br/>multiple applications want to share a common `MBean` in a shared `MBeanServer`.| +| `REPLACE_EXISTING` | If an `MBean` instance has already been registered under the same `ObjectName`, the<br/>existing `MBean` that was previously registered is unregistered, and the new`MBean` is registered in its place (the new `MBean` effectively replaces the<br/>previous instance). | + +The values in the preceding table are defined as enums on the `RegistrationPolicy` class. +If you want to change the default registration behavior, you need to set the value of the`registrationPolicy` property on your `MBeanExporter` definition to one of those +values. + +The following example shows how to change from the default registration +behavior to the `REPLACE_EXISTING` behavior: + +``` +<beans> + + <bean id="exporter" class="org.springframework.jmx.export.MBeanExporter"> + <property name="beans"> + <map> + <entry key="bean:name=testBean1" value-ref="testBean"/> + </map> + </property> + <property name="registrationPolicy" value="REPLACE_EXISTING"/> + </bean> + + <bean id="testBean" class="org.springframework.jmx.JmxTestBean"> + <property name="name" value="TEST"/> + <property name="age" value="100"/> + </bean> + +</beans> +``` + +### 5.2. Controlling the Management Interface of Your Beans + +In the example in the [preceding section](#jmx-exporting-registration-behavior), +you had little control over the management interface of your bean. All of the `public`properties and methods of each exported bean were exposed as JMX attributes and +operations, respectively. To exercise finer-grained control over exactly which +properties and methods of your exported beans are actually exposed as JMX attributes +and operations, Spring JMX provides a comprehensive and extensible mechanism for +controlling the management interfaces of your beans. + +#### 5.2.1. Using the `MBeanInfoAssembler` Interface + +Behind the scenes, the `MBeanExporter` delegates to an implementation of the`org.springframework.jmx.export.assembler.MBeanInfoAssembler` interface, which is +responsible for defining the management interface of each bean that is exposed. +The default implementation,`org.springframework.jmx.export.assembler.SimpleReflectiveMBeanInfoAssembler`, +defines a management interface that exposes all public properties and methods +(as you saw in the examples in the preceding sections). Spring provides two +additional implementations of the `MBeanInfoAssembler` interface that let you +control the generated management interface by using either source-level metadata +or any arbitrary interface. + +#### 5.2.2. Using Source-level Metadata: Java Annotations + +By using the `MetadataMBeanInfoAssembler`, you can define the management interfaces +for your beans by using source-level metadata. The reading of metadata is encapsulated +by the `org.springframework.jmx.export.metadata.JmxAttributeSource` interface. +Spring JMX provides a default implementation that uses Java annotations, namely`org.springframework.jmx.export.annotation.AnnotationJmxAttributeSource`. +You must configure the `MetadataMBeanInfoAssembler` with an implementation instance of +the `JmxAttributeSource` interface for it to function correctly (there is no default). + +To mark a bean for export to JMX, you should annotate the bean class with the`ManagedResource` annotation. You must mark each method you wish to expose as an operation +with the `ManagedOperation` annotation and mark each property you wish to expose +with the `ManagedAttribute` annotation. When marking properties, you can omit +either the annotation of the getter or the setter to create a write-only or read-only +attribute, respectively. + +| |A `ManagedResource`-annotated bean must be public, as must the methods exposing<br/>an operation or an attribute.| +|---|-----------------------------------------------------------------------------------------------------------------| + +The following example shows the annotated version of the `JmxTestBean` class that we +used in [Creating an MBeanServer](#jmx-exporting-mbeanserver): + +``` +package org.springframework.jmx; + +import org.springframework.jmx.export.annotation.ManagedResource; +import org.springframework.jmx.export.annotation.ManagedOperation; +import org.springframework.jmx.export.annotation.ManagedAttribute; + +@ManagedResource( + objectName="bean:name=testBean4", + description="My Managed Bean", + log=true, + logFile="jmx.log", + currencyTimeLimit=15, + persistPolicy="OnUpdate", + persistPeriod=200, + persistLocation="foo", + persistName="bar") +public class AnnotationTestBean implements IJmxTestBean { + + private String name; + private int age; + + @ManagedAttribute(description="The Age Attribute", currencyTimeLimit=15) + public int getAge() { + return age; + } + + public void setAge(int age) { + this.age = age; + } + + @ManagedAttribute(description="The Name Attribute", + currencyTimeLimit=20, + defaultValue="bar", + persistPolicy="OnUpdate") + public void setName(String name) { + this.name = name; + } + + @ManagedAttribute(defaultValue="foo", persistPeriod=300) + public String getName() { + return name; + } + + @ManagedOperation(description="Add two numbers") + @ManagedOperationParameters({ + @ManagedOperationParameter(name = "x", description = "The first number"), + @ManagedOperationParameter(name = "y", description = "The second number")}) + public int add(int x, int y) { + return x + y; + } + + public void dontExposeMe() { + throw new RuntimeException(); + } + +} +``` + +In the preceding example, you can see that the `JmxTestBean` class is marked with the`ManagedResource` annotation and that this `ManagedResource` annotation is configured +with a set of properties. These properties can be used to configure various aspects +of the MBean that is generated by the `MBeanExporter` and are explained in greater +detail later in [Source-level Metadata Types](#jmx-interface-metadata-types). + +Both the `age` and `name` properties are annotated with the `ManagedAttribute`annotation, but, in the case of the `age` property, only the getter is marked. +This causes both of these properties to be included in the management interface +as attributes, but the `age` attribute is read-only. + +Finally, the `add(int, int)` method is marked with the `ManagedOperation` attribute, +whereas the `dontExposeMe()` method is not. This causes the management interface to +contain only one operation (`add(int, int)`) when you use the `MetadataMBeanInfoAssembler`. + +The following configuration shows how you can configure the `MBeanExporter` to use the`MetadataMBeanInfoAssembler`: + +``` +<beans> + <bean id="exporter" class="org.springframework.jmx.export.MBeanExporter"> + <property name="assembler" ref="assembler"/> + <property name="namingStrategy" ref="namingStrategy"/> + <property name="autodetect" value="true"/> + </bean> + + <bean id="jmxAttributeSource" + class="org.springframework.jmx.export.annotation.AnnotationJmxAttributeSource"/> + + <!-- will create management interface using annotation metadata --> + <bean id="assembler" + class="org.springframework.jmx.export.assembler.MetadataMBeanInfoAssembler"> + <property name="attributeSource" ref="jmxAttributeSource"/> + </bean> + + <!-- will pick up the ObjectName from the annotation --> + <bean id="namingStrategy" + class="org.springframework.jmx.export.naming.MetadataNamingStrategy"> + <property name="attributeSource" ref="jmxAttributeSource"/> + </bean> + + <bean id="testBean" class="org.springframework.jmx.AnnotationTestBean"> + <property name="name" value="TEST"/> + <property name="age" value="100"/> + </bean> +</beans> +``` + +In the preceding example, an `MetadataMBeanInfoAssembler` bean has been configured with an +instance of the `AnnotationJmxAttributeSource` class and passed to the `MBeanExporter`through the assembler property. This is all that is required to take advantage of +metadata-driven management interfaces for your Spring-exposed MBeans. + +#### 5.2.3. Source-level Metadata Types + +The following table describes the source-level metadata types that are available for use in Spring JMX: + +| Purpose | Annotation | Annotation Type | +|---------------------------------------------------------|--------------------------------------------------------------|---------------------------------| +|Mark all instances of a `Class` as JMX managed resources.| `@ManagedResource` | Class | +| Mark a method as a JMX operation. | `@ManagedOperation` | Method | +| Mark a getter or setter as one half of a JMX attribute. | `@ManagedAttribute` |Method (only getters and setters)| +| Define descriptions for operation parameters. |`@ManagedOperationParameter` and `@ManagedOperationParameters`| Method | + +The following table describes the configuration parameters that are available for use on these source-level +metadata types: + +| Parameter | Description | Applies to | +|-------------------|-------------------------------------------------------------------------------------|-----------------------------------------------------------------------------------------| +| `ObjectName` |Used by `MetadataNamingStrategy` to determine the `ObjectName` of a managed resource.| `ManagedResource` | +| `description` | Sets the friendly description of the resource, attribute or operation. |`ManagedResource`, `ManagedAttribute`, `ManagedOperation`, or `ManagedOperationParameter`| +|`currencyTimeLimit`| Sets the value of the `currencyTimeLimit` descriptor field. | `ManagedResource` or `ManagedAttribute` | +| `defaultValue` | Sets the value of the `defaultValue` descriptor field. | `ManagedAttribute` | +| `log` | Sets the value of the `log` descriptor field. | `ManagedResource` | +| `logFile` | Sets the value of the `logFile` descriptor field. | `ManagedResource` | +| `persistPolicy` | Sets the value of the `persistPolicy` descriptor field. | `ManagedResource` | +| `persistPeriod` | Sets the value of the `persistPeriod` descriptor field. | `ManagedResource` | +| `persistLocation` | Sets the value of the `persistLocation` descriptor field. | `ManagedResource` | +| `persistName` | Sets the value of the `persistName` descriptor field. | `ManagedResource` | +| `name` | Sets the display name of an operation parameter. | `ManagedOperationParameter` | +| `index` | Sets the index of an operation parameter. | `ManagedOperationParameter` | + +#### 5.2.4. Using the `AutodetectCapableMBeanInfoAssembler` Interface + +To simplify configuration even further, Spring includes the`AutodetectCapableMBeanInfoAssembler` interface, which extends the `MBeanInfoAssembler`interface to add support for autodetection of MBean resources. If you configure the`MBeanExporter` with an instance of `AutodetectCapableMBeanInfoAssembler`, it is +allowed to “vote” on the inclusion of beans for exposure to JMX. + +The only implementation of the `AutodetectCapableMBeanInfo` interface is +the `MetadataMBeanInfoAssembler`, which votes to include any bean that is marked +with the `ManagedResource` attribute. The default approach in this case is to use the +bean name as the `ObjectName`, which results in a configuration similar to the following: + +``` +<beans> + + <bean id="exporter" class="org.springframework.jmx.export.MBeanExporter"> + <!-- notice how no 'beans' are explicitly configured here --> + <property name="autodetect" value="true"/> + <property name="assembler" ref="assembler"/> + </bean> + + <bean id="testBean" class="org.springframework.jmx.JmxTestBean"> + <property name="name" value="TEST"/> + <property name="age" value="100"/> + </bean> + + <bean id="assembler" class="org.springframework.jmx.export.assembler.MetadataMBeanInfoAssembler"> + <property name="attributeSource"> + <bean class="org.springframework.jmx.export.annotation.AnnotationJmxAttributeSource"/> + </property> + </bean> + +</beans> +``` + +Notice that, in the preceding configuration, no beans are passed to the `MBeanExporter`. +However, the `JmxTestBean` is still registered, since it is marked with the `ManagedResource`attribute and the `MetadataMBeanInfoAssembler` detects this and votes to include it. +The only problem with this approach is that the name of the `JmxTestBean` now has business +meaning. You can address this issue by changing the default behavior for `ObjectName`creation as defined in [Controlling `ObjectName` Instances for Your Beans](#jmx-naming). + +#### 5.2.5. Defining Management Interfaces by Using Java Interfaces + +In addition to the `MetadataMBeanInfoAssembler`, Spring also includes the`InterfaceBasedMBeanInfoAssembler`, which lets you constrain the methods and +properties that are exposed based on the set of methods defined in a collection of +interfaces. + +Although the standard mechanism for exposing MBeans is to use interfaces and a simple +naming scheme, `InterfaceBasedMBeanInfoAssembler` extends this functionality by +removing the need for naming conventions, letting you use more than one interface +and removing the need for your beans to implement the MBean interfaces. + +Consider the following interface, which is used to define a management interface for the`JmxTestBean` class that we showed earlier: + +``` +public interface IJmxTestBean { + + public int add(int x, int y); + + public long myOperation(); + + public int getAge(); + + public void setAge(int age); + + public void setName(String name); + + public String getName(); + +} +``` + +This interface defines the methods and properties that are exposed as operations and +attributes on the JMX MBean. The following code shows how to configure Spring JMX to use +this interface as the definition for the management interface: + +``` +<beans> + + <bean id="exporter" class="org.springframework.jmx.export.MBeanExporter"> + <property name="beans"> + <map> + <entry key="bean:name=testBean5" value-ref="testBean"/> + </map> + </property> + <property name="assembler"> + <bean class="org.springframework.jmx.export.assembler.InterfaceBasedMBeanInfoAssembler"> + <property name="managedInterfaces"> + <value>org.springframework.jmx.IJmxTestBean</value> + </property> + </bean> + </property> + </bean> + + <bean id="testBean" class="org.springframework.jmx.JmxTestBean"> + <property name="name" value="TEST"/> + <property name="age" value="100"/> + </bean> + +</beans> +``` + +In the preceding example, the `InterfaceBasedMBeanInfoAssembler` is configured to use the`IJmxTestBean` interface when constructing the management interface for any bean. It is +important to understand that beans processed by the `InterfaceBasedMBeanInfoAssembler`are not required to implement the interface used to generate the JMX management +interface. + +In the preceding case, the `IJmxTestBean` interface is used to construct all management +interfaces for all beans. In many cases, this is not the desired behavior, and you may +want to use different interfaces for different beans. In this case, you can pass`InterfaceBasedMBeanInfoAssembler` a `Properties` instance through the `interfaceMappings`property, where the key of each entry is the bean name and the value of each entry is a +comma-separated list of interface names to use for that bean. + +If no management interface is specified through either the `managedInterfaces` or`interfaceMappings` properties, the `InterfaceBasedMBeanInfoAssembler` reflects +on the bean and uses all of the interfaces implemented by that bean to create the +management interface. + +#### 5.2.6. Using `MethodNameBasedMBeanInfoAssembler` + +`MethodNameBasedMBeanInfoAssembler` lets you specify a list of method names +that are exposed to JMX as attributes and operations. The following code shows a sample +configuration: + +``` +<bean id="exporter" class="org.springframework.jmx.export.MBeanExporter"> + <property name="beans"> + <map> + <entry key="bean:name=testBean5" value-ref="testBean"/> + </map> + </property> + <property name="assembler"> + <bean class="org.springframework.jmx.export.assembler.MethodNameBasedMBeanInfoAssembler"> + <property name="managedMethods"> + <value>add,myOperation,getName,setName,getAge</value> + </property> + </bean> + </property> +</bean> +``` + +In the preceding example, you can see that the `add` and `myOperation` methods are exposed as JMX +operations, and `getName()`, `setName(String)`, and `getAge()` are exposed as the +appropriate half of a JMX attribute. In the preceding code, the method mappings apply to +beans that are exposed to JMX. To control method exposure on a bean-by-bean basis, you can use +the `methodMappings` property of `MethodNameMBeanInfoAssembler` to map bean names to +lists of method names. + +### 5.3. Controlling `ObjectName` Instances for Your Beans + +Behind the scenes, the `MBeanExporter` delegates to an implementation of the`ObjectNamingStrategy` to obtain an `ObjectName` instance for each of the beans it registers. +By default, the default implementation, `KeyNamingStrategy` uses the key of the`beans` `Map` as the `ObjectName`. In addition, the `KeyNamingStrategy` can map the key +of the `beans` `Map` to an entry in a `Properties` file (or files) to resolve the`ObjectName`. In addition to the `KeyNamingStrategy`, Spring provides two additional`ObjectNamingStrategy` implementations: the `IdentityNamingStrategy` (which builds an`ObjectName` based on the JVM identity of the bean) and the `MetadataNamingStrategy` (which +uses source-level metadata to obtain the `ObjectName`). + +#### 5.3.1. Reading `ObjectName` Instances from Properties + +You can configure your own `KeyNamingStrategy` instance and configure it to read`ObjectName` instances from a `Properties` instance rather than use a bean key. The`KeyNamingStrategy` tries to locate an entry in the `Properties` with a key +that corresponds to the bean key. If no entry is found or if the `Properties` instance is`null`, the bean key itself is used. + +The following code shows a sample configuration for the `KeyNamingStrategy`: + +``` +<beans> + + <bean id="exporter" class="org.springframework.jmx.export.MBeanExporter"> + <property name="beans"> + <map> + <entry key="testBean" value-ref="testBean"/> + </map> + </property> + <property name="namingStrategy" ref="namingStrategy"/> + </bean> + + <bean id="testBean" class="org.springframework.jmx.JmxTestBean"> + <property name="name" value="TEST"/> + <property name="age" value="100"/> + </bean> + + <bean id="namingStrategy" class="org.springframework.jmx.export.naming.KeyNamingStrategy"> + <property name="mappings"> + <props> + <prop key="testBean">bean:name=testBean1</prop> + </props> + </property> + <property name="mappingLocations"> + <value>names1.properties,names2.properties</value> + </property> + </bean> + +</beans> +``` + +The preceding example configures an instance of `KeyNamingStrategy` with a `Properties` instance that +is merged from the `Properties` instance defined by the mapping property and the +properties files located in the paths defined by the mappings property. In this +configuration, the `testBean` bean is given an `ObjectName` of `bean:name=testBean1`, +since this is the entry in the `Properties` instance that has a key corresponding to the +bean key. + +If no entry in the `Properties` instance can be found, the bean key name is used as +the `ObjectName`. + +#### 5.3.2. Using `MetadataNamingStrategy` + +`MetadataNamingStrategy` uses the `objectName` property of the `ManagedResource`attribute on each bean to create the `ObjectName`. The following code shows the +configuration for the `MetadataNamingStrategy`: + +``` +<beans> + + <bean id="exporter" class="org.springframework.jmx.export.MBeanExporter"> + <property name="beans"> + <map> + <entry key="testBean" value-ref="testBean"/> + </map> + </property> + <property name="namingStrategy" ref="namingStrategy"/> + </bean> + + <bean id="testBean" class="org.springframework.jmx.JmxTestBean"> + <property name="name" value="TEST"/> + <property name="age" value="100"/> + </bean> + + <bean id="namingStrategy" class="org.springframework.jmx.export.naming.MetadataNamingStrategy"> + <property name="attributeSource" ref="attributeSource"/> + </bean> + + <bean id="attributeSource" + class="org.springframework.jmx.export.annotation.AnnotationJmxAttributeSource"/> + +</beans> +``` + +If no `objectName` has been provided for the `ManagedResource` attribute, an`ObjectName` is created with the following +format: *[fully-qualified-package-name]:type=[short-classname],name=[bean-name]*. For +example, the generated `ObjectName` for the following bean would be`com.example:type=MyClass,name=myBean`: + +``` +<bean id="myBean" class="com.example.MyClass"/> +``` + +#### 5.3.3. Configuring Annotation-based MBean Export + +If you prefer to use [the annotation-based approach](#jmx-interface-metadata) to define +your management interfaces, a convenience subclass of `MBeanExporter` is available:`AnnotationMBeanExporter`. When defining an instance of this subclass, you no longer need the`namingStrategy`, `assembler`, and `attributeSource` configuration, +since it always uses standard Java annotation-based metadata (autodetection is +always enabled as well). In fact, rather than defining an `MBeanExporter` bean, an even +simpler syntax is supported by the `@EnableMBeanExport` `@Configuration` annotation, +as the following example shows: + +``` +@Configuration +@EnableMBeanExport +public class AppConfig { + +} +``` + +If you prefer XML-based configuration, the `<context:mbean-export/>` element serves the +same purpose and is shown in the following listing: + +``` +<context:mbean-export/> +``` + +If necessary, you can provide a reference to a particular MBean `server`, and the`defaultDomain` attribute (a property of `AnnotationMBeanExporter`) accepts an alternate +value for the generated MBean `ObjectName` domains. This is used in place of the +fully qualified package name as described in the previous section on[MetadataNamingStrategy](#jmx-naming-metadata), as the following example shows: + +``` +@EnableMBeanExport(server="myMBeanServer", defaultDomain="myDomain") +@Configuration +ContextConfiguration { + +} +``` + +The following example shows the XML equivalent of the preceding annotation-based example: + +``` +<context:mbean-export server="myMBeanServer" default-domain="myDomain"/> +``` + +| |Do not use interface-based AOP proxies in combination with autodetection of JMX<br/>annotations in your bean classes. Interface-based proxies “hide” the target class, which<br/>also hides the JMX-managed resource annotations. Hence, you should use target-class proxies in that<br/>case (through setting the 'proxy-target-class' flag on `<aop:config/>`,`<tx:annotation-driven/>` and so on). Otherwise, your JMX beans might be silently ignored at<br/>startup.| +|---|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +### 5.4. Using JSR-160 Connectors + +For remote access, Spring JMX module offers two `FactoryBean` implementations inside the`org.springframework.jmx.support` package for creating both server- and client-side +connectors. + +#### 5.4.1. Server-side Connectors + +To have Spring JMX create, start, and expose a JSR-160 `JMXConnectorServer`, you can use the +following configuration: + +``` +<bean id="serverConnector" class="org.springframework.jmx.support.ConnectorServerFactoryBean"/> +``` + +By default, `ConnectorServerFactoryBean` creates a `JMXConnectorServer` bound to`service:jmx:jmxmp://localhost:9875`. The `serverConnector` bean thus exposes the +local `MBeanServer` to clients through the JMXMP protocol on localhost, port 9875. Note +that the JMXMP protocol is marked as optional by the JSR 160 specification. Currently, +the main open-source JMX implementation, MX4J, and the one provided with the JDK +do not support JMXMP. + +To specify another URL and register the `JMXConnectorServer` itself with the`MBeanServer`, you can use the `serviceUrl` and `ObjectName` properties, respectively, +as the following example shows: + +``` +<bean id="serverConnector" + class="org.springframework.jmx.support.ConnectorServerFactoryBean"> + <property name="objectName" value="connector:name=rmi"/> + <property name="serviceUrl" + value="service:jmx:rmi://localhost/jndi/rmi://localhost:1099/myconnector"/> +</bean> +``` + +If the `ObjectName` property is set, Spring automatically registers your connector +with the `MBeanServer` under that `ObjectName`. The following example shows the full set of +parameters that you can pass to the `ConnectorServerFactoryBean` when creating a`JMXConnector`: + +``` +<bean id="serverConnector" + class="org.springframework.jmx.support.ConnectorServerFactoryBean"> + <property name="objectName" value="connector:name=iiop"/> + <property name="serviceUrl" + value="service:jmx:iiop://localhost/jndi/iiop://localhost:900/myconnector"/> + <property name="threaded" value="true"/> + <property name="daemon" value="true"/> + <property name="environment"> + <map> + <entry key="someKey" value="someValue"/> + </map> + </property> +</bean> +``` + +Note that, when you use a RMI-based connector, you need the lookup service (`tnameserv` or`rmiregistry`) to be started in order for the name registration to complete. If you +use Spring to export remote services for you through RMI, Spring has already +constructed an RMI registry. If not, you can easily start a registry by using the following +snippet of configuration: + +``` +<bean id="registry" class="org.springframework.remoting.rmi.RmiRegistryFactoryBean"> + <property name="port" value="1099"/> +</bean> +``` + +#### 5.4.2. Client-side Connectors + +To create an `MBeanServerConnection` to a remote JSR-160-enabled `MBeanServer`, you can use the`MBeanServerConnectionFactoryBean`, as the following example shows: + +``` +<bean id="clientConnector" class="org.springframework.jmx.support.MBeanServerConnectionFactoryBean"> + <property name="serviceUrl" value="service:jmx:rmi://localhost/jndi/rmi://localhost:1099/jmxrmi"/> +</bean> +``` + +#### 5.4.3. JMX over Hessian or SOAP + +JSR-160 permits extensions to the way in which communication is done between the client +and the server. The examples shown in the preceding sections use the mandatory RMI-based implementation +required by the JSR-160 specification (IIOP and JRMP) and the (optional) JMXMP. By using +other providers or JMX implementations (such as [MX4J](http://mx4j.sourceforge.net)) you +can take advantage of protocols such as SOAP or Hessian over simple HTTP or SSL and others, +as the following example shows: + +``` +<bean id="serverConnector" class="org.springframework.jmx.support.ConnectorServerFactoryBean"> + <property name="objectName" value="connector:name=burlap"/> + <property name="serviceUrl" value="service:jmx:burlap://localhost:9874"/> +</bean> +``` + +In the preceding example, we used MX4J 3.0.0. See the official MX4J +documentation for more information. + +### 5.5. Accessing MBeans through Proxies + +Spring JMX lets you create proxies that re-route calls to MBeans that are registered in a +local or remote `MBeanServer`. These proxies provide you with a standard Java interface, +through which you can interact with your MBeans. The following code shows how to configure a +proxy for an MBean running in a local `MBeanServer`: + +``` +<bean id="proxy" class="org.springframework.jmx.access.MBeanProxyFactoryBean"> + <property name="objectName" value="bean:name=testBean"/> + <property name="proxyInterface" value="org.springframework.jmx.IJmxTestBean"/> +</bean> +``` + +In the preceding example, you can see that a proxy is created for the MBean registered under the`ObjectName` of `bean:name=testBean`. The set of interfaces that the proxy implements +is controlled by the `proxyInterfaces` property, and the rules for mapping methods and +properties on these interfaces to operations and attributes on the MBean are the same +rules used by the `InterfaceBasedMBeanInfoAssembler`. + +The `MBeanProxyFactoryBean` can create a proxy to any MBean that is accessible through an`MBeanServerConnection`. By default, the local `MBeanServer` is located and used, but +you can override this and provide an `MBeanServerConnection` that points to a remote`MBeanServer` to cater for proxies that point to remote MBeans: + +``` +<bean id="clientConnector" + class="org.springframework.jmx.support.MBeanServerConnectionFactoryBean"> + <property name="serviceUrl" value="service:jmx:rmi://remotehost:9875"/> +</bean> + +<bean id="proxy" class="org.springframework.jmx.access.MBeanProxyFactoryBean"> + <property name="objectName" value="bean:name=testBean"/> + <property name="proxyInterface" value="org.springframework.jmx.IJmxTestBean"/> + <property name="server" ref="clientConnector"/> +</bean> +``` + +In the preceding example, we create an `MBeanServerConnection` that points to a remote machine +that uses the `MBeanServerConnectionFactoryBean`. This `MBeanServerConnection` is then +passed to the `MBeanProxyFactoryBean` through the `server` property. The proxy that is +created forwards all invocations to the `MBeanServer` through this`MBeanServerConnection`. + +### 5.6. Notifications + +Spring’s JMX offering includes comprehensive support for JMX notifications. + +#### 5.6.1. Registering Listeners for Notifications + +Spring’s JMX support makes it easy to register any number of`NotificationListeners` with any number of MBeans (this includes MBeans exported by +Spring’s `MBeanExporter` and MBeans registered through some other mechanism). For +example, consider the scenario where one would like to be informed (through a`Notification`) each and every time an attribute of a target MBean changes. The following +example writes notifications to the console: + +``` +package com.example; + +import javax.management.AttributeChangeNotification; +import javax.management.Notification; +import javax.management.NotificationFilter; +import javax.management.NotificationListener; + +public class ConsoleLoggingNotificationListener + implements NotificationListener, NotificationFilter { + + public void handleNotification(Notification notification, Object handback) { + System.out.println(notification); + System.out.println(handback); + } + + public boolean isNotificationEnabled(Notification notification) { + return AttributeChangeNotification.class.isAssignableFrom(notification.getClass()); + } + +} +``` + +The following example adds `ConsoleLoggingNotificationListener` (defined in the preceding +example) to `notificationListenerMappings`: + +``` +<beans> + + <bean id="exporter" class="org.springframework.jmx.export.MBeanExporter"> + <property name="beans"> + <map> + <entry key="bean:name=testBean1" value-ref="testBean"/> + </map> + </property> + <property name="notificationListenerMappings"> + <map> + <entry key="bean:name=testBean1"> + <bean class="com.example.ConsoleLoggingNotificationListener"/> + </entry> + </map> + </property> + </bean> + + <bean id="testBean" class="org.springframework.jmx.JmxTestBean"> + <property name="name" value="TEST"/> + <property name="age" value="100"/> + </bean> + +</beans> +``` + +With the preceding configuration in place, every time a JMX `Notification` is broadcast from +the target MBean (`bean:name=testBean1`), the `ConsoleLoggingNotificationListener` bean +that was registered as a listener through the `notificationListenerMappings` property is +notified. The `ConsoleLoggingNotificationListener` bean can then take whatever action +it deems appropriate in response to the `Notification`. + +You can also use straight bean names as the link between exported beans and listeners, +as the following example shows: + +``` +<beans> + + <bean id="exporter" class="org.springframework.jmx.export.MBeanExporter"> + <property name="beans"> + <map> + <entry key="bean:name=testBean1" value-ref="testBean"/> + </map> + </property> + <property name="notificationListenerMappings"> + <map> + <entry key="testBean"> + <bean class="com.example.ConsoleLoggingNotificationListener"/> + </entry> + </map> + </property> + </bean> + + <bean id="testBean" class="org.springframework.jmx.JmxTestBean"> + <property name="name" value="TEST"/> + <property name="age" value="100"/> + </bean> + +</beans> +``` + +If you want to register a single `NotificationListener` instance for all of the beans +that the enclosing `MBeanExporter` exports, you can use the special wildcard (`*`) +as the key for an entry in the `notificationListenerMappings` property +map, as the following example shows: + +``` +<property name="notificationListenerMappings"> + <map> + <entry key="*"> + <bean class="com.example.ConsoleLoggingNotificationListener"/> + </entry> + </map> +</property> +``` + +If you need to do the inverse (that is, register a number of distinct listeners against +an MBean), you must instead use the `notificationListeners` list property (in +preference to the `notificationListenerMappings` property). This time, instead of +configuring a `NotificationListener` for a single MBean, we configure`NotificationListenerBean` instances. A `NotificationListenerBean` encapsulates a`NotificationListener` and the `ObjectName` (or `ObjectNames`) that it is to be +registered against in an `MBeanServer`. The `NotificationListenerBean` also encapsulates +a number of other properties, such as a `NotificationFilter` and an arbitrary handback +object that can be used in advanced JMX notification scenarios. + +The configuration when using `NotificationListenerBean` instances is not wildly +different to what was presented previously, as the following example shows: + +``` +<beans> + + <bean id="exporter" class="org.springframework.jmx.export.MBeanExporter"> + <property name="beans"> + <map> + <entry key="bean:name=testBean1" value-ref="testBean"/> + </map> + </property> + <property name="notificationListeners"> + <list> + <bean class="org.springframework.jmx.export.NotificationListenerBean"> + <constructor-arg> + <bean class="com.example.ConsoleLoggingNotificationListener"/> + </constructor-arg> + <property name="mappedObjectNames"> + <list> + <value>bean:name=testBean1</value> + </list> + </property> + </bean> + </list> + </property> + </bean> + + <bean id="testBean" class="org.springframework.jmx.JmxTestBean"> + <property name="name" value="TEST"/> + <property name="age" value="100"/> + </bean> + +</beans> +``` + +The preceding example is equivalent to the first notification example. Assume, then, that +we want to be given a handback object every time a `Notification` is raised and that +we also want to filter out extraneous `Notifications` by supplying a`NotificationFilter`. The following example accomplishes these goals: + +``` +<beans> + + <bean id="exporter" class="org.springframework.jmx.export.MBeanExporter"> + <property name="beans"> + <map> + <entry key="bean:name=testBean1" value-ref="testBean1"/> + <entry key="bean:name=testBean2" value-ref="testBean2"/> + </map> + </property> + <property name="notificationListeners"> + <list> + <bean class="org.springframework.jmx.export.NotificationListenerBean"> + <constructor-arg ref="customerNotificationListener"/> + <property name="mappedObjectNames"> + <list> + <!-- handles notifications from two distinct MBeans --> + <value>bean:name=testBean1</value> + <value>bean:name=testBean2</value> + </list> + </property> + <property name="handback"> + <bean class="java.lang.String"> + <constructor-arg value="This could be anything..."/> + </bean> + </property> + <property name="notificationFilter" ref="customerNotificationListener"/> + </bean> + </list> + </property> + </bean> + + <!-- implements both the NotificationListener and NotificationFilter interfaces --> + <bean id="customerNotificationListener" class="com.example.ConsoleLoggingNotificationListener"/> + + <bean id="testBean1" class="org.springframework.jmx.JmxTestBean"> + <property name="name" value="TEST"/> + <property name="age" value="100"/> + </bean> + + <bean id="testBean2" class="org.springframework.jmx.JmxTestBean"> + <property name="name" value="ANOTHER TEST"/> + <property name="age" value="200"/> + </bean> + +</beans> +``` + +(For a full discussion of what a handback object is and, +indeed, what a `NotificationFilter` is, see the section of the JMX +specification (1.2) entitled 'The JMX Notification Model'.) + +#### 5.6.2. Publishing Notifications + +Spring provides support not only for registering to receive `Notifications` but also +for publishing `Notifications`. + +| |This section is really only relevant to Spring-managed beans that have<br/>been exposed as MBeans through an `MBeanExporter`. Any existing user-defined MBeans should<br/>use the standard JMX APIs for notification publication.| +|---|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +The key interface in Spring’s JMX notification publication support is the`NotificationPublisher` interface (defined in the`org.springframework.jmx.export.notification` package). Any bean that is going to be +exported as an MBean through an `MBeanExporter` instance can implement the related`NotificationPublisherAware` interface to gain access to a `NotificationPublisher`instance. The `NotificationPublisherAware` interface supplies an instance of a`NotificationPublisher` to the implementing bean through a simple setter method, +which the bean can then use to publish `Notifications`. + +As stated in the javadoc of the[`NotificationPublisher`](https://docs.spring.io/spring-framework/docs/5.3.16/javadoc-api/org/springframework/jmx/export/notification/NotificationPublisher.html)interface, managed beans that publish events through the `NotificationPublisher`mechanism are not responsible for the state management of notification listeners. +Spring’s JMX support takes care of handling all the JMX infrastructure issues. +All you need to do, as an application developer, is implement the`NotificationPublisherAware` interface and start publishing events by using the +supplied `NotificationPublisher` instance. Note that the `NotificationPublisher`is set after the managed bean has been registered with an `MBeanServer`. + +Using a `NotificationPublisher` instance is quite straightforward. You create a JMX`Notification` instance (or an instance of an appropriate `Notification` subclass), +populate the notification with the data pertinent to the event that is to be +published, and invoke the `sendNotification(Notification)` on the`NotificationPublisher` instance, passing in the `Notification`. + +In the following example, exported instances of the `JmxTestBean` publish a`NotificationEvent` every time the `add(int, int)` operation is invoked: + +``` +package org.springframework.jmx; + +import org.springframework.jmx.export.notification.NotificationPublisherAware; +import org.springframework.jmx.export.notification.NotificationPublisher; +import javax.management.Notification; + +public class JmxTestBean implements IJmxTestBean, NotificationPublisherAware { + + private String name; + private int age; + private boolean isSuperman; + private NotificationPublisher publisher; + + // other getters and setters omitted for clarity + + public int add(int x, int y) { + int answer = x + y; + this.publisher.sendNotification(new Notification("add", this, 0)); + return answer; + } + + public void dontExposeMe() { + throw new RuntimeException(); + } + + public void setNotificationPublisher(NotificationPublisher notificationPublisher) { + this.publisher = notificationPublisher; + } + +} +``` + +The `NotificationPublisher` interface and the machinery to get it all working is one of +the nicer features of Spring’s JMX support. It does, however, come with the price tag of +coupling your classes to both Spring and JMX. As always, the advice here is to be +pragmatic. If you need the functionality offered by the `NotificationPublisher` and +you can accept the coupling to both Spring and JMX, then do so. + +### 5.7. Further Resources + +This section contains links to further resources about JMX: + +* The [JMX + homepage](https://www.oracle.com/technetwork/java/javase/tech/javamanagement-140525.html) at Oracle. + +* The [JMX + specification](https://jcp.org/aboutJava/communityprocess/final/jsr003/index3.html) (JSR-000003). + +* The [JMX Remote API + specification](https://jcp.org/aboutJava/communityprocess/final/jsr160/index.html) (JSR-000160). + +* The [MX4J homepage](http://mx4j.sourceforge.net/). (MX4J is an open-source implementation of + various JMX specs.) + +## 6. Email + +This section describes how to send email with the Spring Framework. + +Library dependencies + +The following JAR needs to be on the classpath of your application in order to use +the Spring Framework’s email library: + +* The [JavaMail / Jakarta Mail 1.6](https://eclipse-ee4j.github.io/mail/) library + +This library is freely available on the web — for example, in Maven Central as`com.sun.mail:jakarta.mail`. Please make sure to use the latest 1.6.x version +rather than Jakarta Mail 2.0 (which comes with a different package namespace). + +The Spring Framework provides a helpful utility library for sending email that shields +you from the specifics of the underlying mailing system and is responsible for +low-level resource handling on behalf of the client. + +The `org.springframework.mail` package is the root level package for the Spring +Framework’s email support. The central interface for sending emails is the `MailSender`interface. A simple value object that encapsulates the properties of a simple mail such +as `from` and `to` (plus many others) is the `SimpleMailMessage` class. This package +also contains a hierarchy of checked exceptions that provide a higher level of +abstraction over the lower level mail system exceptions, with the root exception being`MailException`. See the [javadoc](https://docs.spring.io/spring-framework/docs/5.3.16/javadoc-api/org/springframework/mail/MailException.html)for more information on the rich mail exception hierarchy. + +The `org.springframework.mail.javamail.JavaMailSender` interface adds specialized +JavaMail features, such as MIME message support to the `MailSender` interface +(from which it inherits). `JavaMailSender` also provides a callback interface called`org.springframework.mail.javamail.MimeMessagePreparator` for preparing a `MimeMessage`. + +### 6.1. Usage + +Assume that we have a business interface called `OrderManager`, as the following example shows: + +``` +public interface OrderManager { + + void placeOrder(Order order); + +} +``` + +Further assume that we have a requirement stating that an email message with an +order number needs to be generated and sent to a customer who placed the relevant order. + +#### 6.1.1. Basic `MailSender` and `SimpleMailMessage` Usage + +The following example shows how to use `MailSender` and `SimpleMailMessage` to send an +email when someone places an order: + +``` +import org.springframework.mail.MailException; +import org.springframework.mail.MailSender; +import org.springframework.mail.SimpleMailMessage; + +public class SimpleOrderManager implements OrderManager { + + private MailSender mailSender; + private SimpleMailMessage templateMessage; + + public void setMailSender(MailSender mailSender) { + this.mailSender = mailSender; + } + + public void setTemplateMessage(SimpleMailMessage templateMessage) { + this.templateMessage = templateMessage; + } + + public void placeOrder(Order order) { + + // Do the business calculations... + + // Call the collaborators to persist the order... + + // Create a thread safe "copy" of the template message and customize it + SimpleMailMessage msg = new SimpleMailMessage(this.templateMessage); + msg.setTo(order.getCustomer().getEmailAddress()); + msg.setText( + "Dear " + order.getCustomer().getFirstName() + + order.getCustomer().getLastName() + + ", thank you for placing order. Your order number is " + + order.getOrderNumber()); + try { + this.mailSender.send(msg); + } + catch (MailException ex) { + // simply log it and go on... + System.err.println(ex.getMessage()); + } + } + +} +``` + +The following example shows the bean definitions for the preceding code: + +``` +<bean id="mailSender" class="org.springframework.mail.javamail.JavaMailSenderImpl"> + <property name="host" value="mail.mycompany.example"/> +</bean> + +<!-- this is a template message that we can pre-load with default state --> +<bean id="templateMessage" class="org.springframework.mail.SimpleMailMessage"> + <property name="from" value="[email protected]"/> + <property name="subject" value="Your order"/> +</bean> + +<bean id="orderManager" class="com.mycompany.businessapp.support.SimpleOrderManager"> + <property name="mailSender" ref="mailSender"/> + <property name="templateMessage" ref="templateMessage"/> +</bean> +``` + +#### 6.1.2. Using `JavaMailSender` and `MimeMessagePreparator` + +This section describes another implementation of `OrderManager` that uses the `MimeMessagePreparator`callback interface. In the following example, the `mailSender` property is of type`JavaMailSender` so that we are able to use the JavaMail `MimeMessage` class: + +``` +import javax.mail.Message; +import javax.mail.MessagingException; +import javax.mail.internet.InternetAddress; +import javax.mail.internet.MimeMessage; + +import javax.mail.internet.MimeMessage; +import org.springframework.mail.MailException; +import org.springframework.mail.javamail.JavaMailSender; +import org.springframework.mail.javamail.MimeMessagePreparator; + +public class SimpleOrderManager implements OrderManager { + + private JavaMailSender mailSender; + + public void setMailSender(JavaMailSender mailSender) { + this.mailSender = mailSender; + } + + public void placeOrder(final Order order) { + // Do the business calculations... + // Call the collaborators to persist the order... + + MimeMessagePreparator preparator = new MimeMessagePreparator() { + public void prepare(MimeMessage mimeMessage) throws Exception { + mimeMessage.setRecipient(Message.RecipientType.TO, + new InternetAddress(order.getCustomer().getEmailAddress())); + mimeMessage.setFrom(new InternetAddress("[email protected]")); + mimeMessage.setText("Dear " + order.getCustomer().getFirstName() + " " + + order.getCustomer().getLastName() + ", thanks for your order. " + + "Your order number is " + order.getOrderNumber() + "."); + } + }; + + try { + this.mailSender.send(preparator); + } + catch (MailException ex) { + // simply log it and go on... + System.err.println(ex.getMessage()); + } + } + +} +``` + +| |The mail code is a crosscutting concern and could well be a candidate for<br/>refactoring into a [custom Spring AOP aspect](core.html#aop), which could then<br/>be run at appropriate joinpoints on the `OrderManager` target.| +|---|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +The Spring Framework’s mail support ships with the standard JavaMail implementation. +See the relevant javadoc for more information. + +### 6.2. Using the JavaMail `MimeMessageHelper` + +A class that comes in pretty handy when dealing with JavaMail messages is`org.springframework.mail.javamail.MimeMessageHelper`, which shields you from +having to use the verbose JavaMail API. Using the `MimeMessageHelper`, it is +pretty easy to create a `MimeMessage`, as the following example shows: + +``` +// of course you would use DI in any real-world cases +JavaMailSenderImpl sender = new JavaMailSenderImpl(); +sender.setHost("mail.host.com"); + +MimeMessage message = sender.createMimeMessage(); +MimeMessageHelper helper = new MimeMessageHelper(message); +helper.setTo("[email protected]"); +helper.setText("Thank you for ordering!"); + +sender.send(message); +``` + +#### 6.2.1. Sending Attachments and Inline Resources + +Multipart email messages allow for both attachments and inline resources. Examples of +inline resources include an image or a stylesheet that you want to use in your message but +that you do not want displayed as an attachment. + +##### Attachments + +The following example shows you how to use the `MimeMessageHelper` to send an email +with a single JPEG image attachment: + +``` +JavaMailSenderImpl sender = new JavaMailSenderImpl(); +sender.setHost("mail.host.com"); + +MimeMessage message = sender.createMimeMessage(); + +// use the true flag to indicate you need a multipart message +MimeMessageHelper helper = new MimeMessageHelper(message, true); +helper.setTo("[email protected]"); + +helper.setText("Check out this image!"); + +// let's attach the infamous windows Sample file (this time copied to c:/) +FileSystemResource file = new FileSystemResource(new File("c:/Sample.jpg")); +helper.addAttachment("CoolImage.jpg", file); + +sender.send(message); +``` + +##### Inline Resources + +The following example shows you how to use the `MimeMessageHelper` to send an email +with an inline image: + +``` +JavaMailSenderImpl sender = new JavaMailSenderImpl(); +sender.setHost("mail.host.com"); + +MimeMessage message = sender.createMimeMessage(); + +// use the true flag to indicate you need a multipart message +MimeMessageHelper helper = new MimeMessageHelper(message, true); +helper.setTo("[email protected]"); + +// use the true flag to indicate the text included is HTML +helper.setText("<html><body><img src='cid:identifier1234'></body></html>", true); + +// let's include the infamous windows Sample file (this time copied to c:/) +FileSystemResource res = new FileSystemResource(new File("c:/Sample.jpg")); +helper.addInline("identifier1234", res); + +sender.send(message); +``` + +| |Inline resources are added to the `MimeMessage` by using the specified `Content-ID`(`identifier1234` in the above example). The order in which you add the text<br/>and the resource are very important. Be sure to first add the text and then<br/>the resources. If you are doing it the other way around, it does not work.| +|---|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +#### 6.2.2. Creating Email Content by Using a Templating Library + +The code in the examples shown in the previous sections explicitly created the content of the email message, +by using methods calls such as `message.setText(..)`. This is fine for simple cases, and it +is okay in the context of the aforementioned examples, where the intent was to show you +the very basics of the API. + +In your typical enterprise application, though, developers often do not create the content +of email messages by using the previously shown approach for a number of reasons: + +* Creating HTML-based email content in Java code is tedious and error prone. + +* There is no clear separation between display logic and business logic. + +* Changing the display structure of the email content requires writing Java code, + recompiling, redeploying, and so on. + +Typically, the approach taken to address these issues is to use a template library (such +as FreeMarker) to define the display structure of email content. This leaves your code +tasked only with creating the data that is to be rendered in the email template and +sending the email. It is definitely a best practice when the content of your email messages +becomes even moderately complex, and, with the Spring Framework’s support classes for +FreeMarker, it becomes quite easy to do. + +## 7. Task Execution and Scheduling + +The Spring Framework provides abstractions for the asynchronous execution and scheduling of +tasks with the `TaskExecutor` and `TaskScheduler` interfaces, respectively. Spring also +features implementations of those interfaces that support thread pools or delegation to +CommonJ within an application server environment. Ultimately, the use of these +implementations behind the common interfaces abstracts away the differences between Java +SE 5, Java SE 6, and Java EE environments. + +Spring also features integration classes to support scheduling with the `Timer`(part of the JDK since 1.3) and the Quartz Scheduler ( [https://www.quartz-scheduler.org/](https://www.quartz-scheduler.org/)). +You can set up both of those schedulers by using a `FactoryBean` with optional references to`Timer` or `Trigger` instances, respectively. Furthermore, a convenience class for both +the Quartz Scheduler and the `Timer` is available that lets you invoke a method of +an existing target object (analogous to the normal `MethodInvokingFactoryBean`operation). + +### 7.1. The Spring `TaskExecutor` Abstraction + +Executors are the JDK name for the concept of thread pools. The “executor” naming is +due to the fact that there is no guarantee that the underlying implementation is +actually a pool. An executor may be single-threaded or even synchronous. Spring’s +abstraction hides implementation details between the Java SE and Java EE environments. + +Spring’s `TaskExecutor` interface is identical to the `java.util.concurrent.Executor`interface. In fact, originally, its primary reason for existence was to abstract away +the need for Java 5 when using thread pools. The interface has a single method +(`execute(Runnable task)`) that accepts a task for execution based on the semantics +and configuration of the thread pool. + +The `TaskExecutor` was originally created to give other Spring components an abstraction +for thread pooling where needed. Components such as the `ApplicationEventMulticaster`, +JMS’s `AbstractMessageListenerContainer`, and Quartz integration all use the`TaskExecutor` abstraction to pool threads. However, if your beans need thread pooling +behavior, you can also use this abstraction for your own needs. + +#### 7.1.1. `TaskExecutor` Types + +Spring includes a number of pre-built implementations of `TaskExecutor`. +In all likelihood, you should never need to implement your own. +The variants that Spring provides are as follows: + +* `SyncTaskExecutor`: + This implementation does not run invocations asynchronously. Instead, each + invocation takes place in the calling thread. It is primarily used in situations + where multi-threading is not necessary, such as in simple test cases. + +* `SimpleAsyncTaskExecutor`: + This implementation does not reuse any threads. Rather, it starts up a new thread + for each invocation. However, it does support a concurrency limit that blocks + any invocations that are over the limit until a slot has been freed up. If you + are looking for true pooling, see `ThreadPoolTaskExecutor`, later in this list. + +* `ConcurrentTaskExecutor`: + This implementation is an adapter for a `java.util.concurrent.Executor` instance. + There is an alternative (`ThreadPoolTaskExecutor`) that exposes the `Executor`configuration parameters as bean properties. There is rarely a need to use`ConcurrentTaskExecutor` directly. However, if the `ThreadPoolTaskExecutor` is not + flexible enough for your needs, `ConcurrentTaskExecutor` is an alternative. + +* `ThreadPoolTaskExecutor`: + This implementation is most commonly used. It exposes bean properties for + configuring a `java.util.concurrent.ThreadPoolExecutor` and wraps it in a `TaskExecutor`. + If you need to adapt to a different kind of `java.util.concurrent.Executor`, we + recommend that you use a `ConcurrentTaskExecutor` instead. + +* `WorkManagerTaskExecutor`: + This implementation uses a CommonJ `WorkManager` as its backing service provider + and is the central convenience class for setting up CommonJ-based thread pool + integration on WebLogic or WebSphere within a Spring application context. + +* `DefaultManagedTaskExecutor`: + This implementation uses a JNDI-obtained `ManagedExecutorService` in a JSR-236 + compatible runtime environment (such as a Java EE 7+ application server), + replacing a CommonJ WorkManager for that purpose. + +#### 7.1.2. Using a `TaskExecutor` + +Spring’s `TaskExecutor` implementations are used as simple JavaBeans. In the following example, +we define a bean that uses the `ThreadPoolTaskExecutor` to asynchronously print +out a set of messages: + +``` +import org.springframework.core.task.TaskExecutor; + +public class TaskExecutorExample { + + private class MessagePrinterTask implements Runnable { + + private String message; + + public MessagePrinterTask(String message) { + this.message = message; + } + + public void run() { + System.out.println(message); + } + } + + private TaskExecutor taskExecutor; + + public TaskExecutorExample(TaskExecutor taskExecutor) { + this.taskExecutor = taskExecutor; + } + + public void printMessages() { + for(int i = 0; i < 25; i++) { + taskExecutor.execute(new MessagePrinterTask("Message" + i)); + } + } +} +``` + +As you can see, rather than retrieving a thread from the pool and executing it yourself, +you add your `Runnable` to the queue. Then the `TaskExecutor` uses its internal rules to +decide when the task gets run. + +To configure the rules that the `TaskExecutor` uses, we expose simple bean properties: + +``` +<bean id="taskExecutor" class="org.springframework.scheduling.concurrent.ThreadPoolTaskExecutor"> + <property name="corePoolSize" value="5"/> + <property name="maxPoolSize" value="10"/> + <property name="queueCapacity" value="25"/> +</bean> + +<bean id="taskExecutorExample" class="TaskExecutorExample"> + <constructor-arg ref="taskExecutor"/> +</bean> +``` + +### 7.2. The Spring `TaskScheduler` Abstraction + +In addition to the `TaskExecutor` abstraction, Spring 3.0 introduced a `TaskScheduler`with a variety of methods for scheduling tasks to run at some point in the future. +The following listing shows the `TaskScheduler` interface definition: + +``` +public interface TaskScheduler { + + ScheduledFuture schedule(Runnable task, Trigger trigger); + + ScheduledFuture schedule(Runnable task, Instant startTime); + + ScheduledFuture schedule(Runnable task, Date startTime); + + ScheduledFuture scheduleAtFixedRate(Runnable task, Instant startTime, Duration period); + + ScheduledFuture scheduleAtFixedRate(Runnable task, Date startTime, long period); + + ScheduledFuture scheduleAtFixedRate(Runnable task, Duration period); + + ScheduledFuture scheduleAtFixedRate(Runnable task, long period); + + ScheduledFuture scheduleWithFixedDelay(Runnable task, Instant startTime, Duration delay); + + ScheduledFuture scheduleWithFixedDelay(Runnable task, Date startTime, long delay); + + ScheduledFuture scheduleWithFixedDelay(Runnable task, Duration delay); + + ScheduledFuture scheduleWithFixedDelay(Runnable task, long delay); +} +``` + +The simplest method is the one named `schedule` that takes only a `Runnable` and a `Date`. +That causes the task to run once after the specified time. All of the other methods +are capable of scheduling tasks to run repeatedly. The fixed-rate and fixed-delay +methods are for simple, periodic execution, but the method that accepts a `Trigger` is +much more flexible. + +#### 7.2.1. `Trigger` Interface + +The `Trigger` interface is essentially inspired by JSR-236 which, as of Spring 3.0, +was not yet officially implemented. The basic idea of the `Trigger` is that execution +times may be determined based on past execution outcomes or even arbitrary conditions. +If these determinations do take into account the outcome of the preceding execution, +that information is available within a `TriggerContext`. The `Trigger` interface itself +is quite simple, as the following listing shows: + +``` +public interface Trigger { + + Date nextExecutionTime(TriggerContext triggerContext); +} +``` + +The `TriggerContext` is the most important part. It encapsulates all of +the relevant data and is open for extension in the future, if necessary. The`TriggerContext` is an interface (a `SimpleTriggerContext` implementation is used by +default). The following listing shows the available methods for `Trigger` implementations. + +``` +public interface TriggerContext { + + Date lastScheduledExecutionTime(); + + Date lastActualExecutionTime(); + + Date lastCompletionTime(); +} +``` + +#### 7.2.2. `Trigger` Implementations + +Spring provides two implementations of the `Trigger` interface. The most interesting one +is the `CronTrigger`. It enables the scheduling of tasks based on[cron expressions](#scheduling-cron-expression). +For example, the following task is scheduled to run 15 minutes past each hour but only +during the 9-to-5 “business hours” on weekdays: + +``` +scheduler.schedule(task, new CronTrigger("0 15 9-17 * * MON-FRI")); +``` + +The other implementation is a `PeriodicTrigger` that accepts a fixed +period, an optional initial delay value, and a boolean to indicate whether the period +should be interpreted as a fixed-rate or a fixed-delay. Since the `TaskScheduler`interface already defines methods for scheduling tasks at a fixed rate or with a +fixed delay, those methods should be used directly whenever possible. The value of the`PeriodicTrigger` implementation is that you can use it within components that rely on +the `Trigger` abstraction. For example, it may be convenient to allow periodic triggers, +cron-based triggers, and even custom trigger implementations to be used interchangeably. +Such a component could take advantage of dependency injection so that you can configure such `Triggers`externally and, therefore, easily modify or extend them. + +#### 7.2.3. `TaskScheduler` implementations + +As with Spring’s `TaskExecutor` abstraction, the primary benefit of the `TaskScheduler`arrangement is that an application’s scheduling needs are decoupled from the deployment +environment. This abstraction level is particularly relevant when deploying to an +application server environment where threads should not be created directly by the +application itself. For such scenarios, Spring provides a `TimerManagerTaskScheduler`that delegates to a CommonJ `TimerManager` on WebLogic or WebSphere as well as a more recent`DefaultManagedTaskScheduler` that delegates to a JSR-236 `ManagedScheduledExecutorService`in a Java EE 7+ environment. Both are typically configured with a JNDI lookup. + +Whenever external thread management is not a requirement, a simpler alternative is +a local `ScheduledExecutorService` setup within the application, which can be adapted +through Spring’s `ConcurrentTaskScheduler`. As a convenience, Spring also provides a`ThreadPoolTaskScheduler`, which internally delegates to a `ScheduledExecutorService`to provide common bean-style configuration along the lines of `ThreadPoolTaskExecutor`. +These variants work perfectly fine for locally embedded thread pool setups in lenient +application server environments, as well — in particular on Tomcat and Jetty. + +### 7.3. Annotation Support for Scheduling and Asynchronous Execution + +Spring provides annotation support for both task scheduling and asynchronous method +execution. + +#### 7.3.1. Enable Scheduling Annotations + +To enable support for `@Scheduled` and `@Async` annotations, you can add `@EnableScheduling` and`@EnableAsync` to one of your `@Configuration` classes, as the following example shows: + +``` +@Configuration +@EnableAsync +@EnableScheduling +public class AppConfig { +} +``` + +You can pick and choose the relevant annotations for your application. For example, +if you need only support for `@Scheduled`, you can omit `@EnableAsync`. For more +fine-grained control, you can additionally implement the `SchedulingConfigurer`interface, the `AsyncConfigurer` interface, or both. See the[`SchedulingConfigurer`](https://docs.spring.io/spring-framework/docs/5.3.16/javadoc-api/org/springframework/scheduling/annotation/SchedulingConfigurer.html)and [`AsyncConfigurer`](https://docs.spring.io/spring-framework/docs/5.3.16/javadoc-api/org/springframework/scheduling/annotation/AsyncConfigurer.html)javadoc for full details. + +If you prefer XML configuration, you can use the `<task:annotation-driven>` element, +as the following example shows: + +``` +<task:annotation-driven executor="myExecutor" scheduler="myScheduler"/> +<task:executor id="myExecutor" pool-size="5"/> +<task:scheduler id="myScheduler" pool-size="10"/> +``` + +Note that, with the preceding XML, an executor reference is provided for handling those +tasks that correspond to methods with the `@Async` annotation, and the scheduler +reference is provided for managing those methods annotated with `@Scheduled`. + +| |The default advice mode for processing `@Async` annotations is `proxy` which allows<br/>for interception of calls through the proxy only. Local calls within the same class<br/>cannot get intercepted that way. For a more advanced mode of interception, consider<br/>switching to `aspectj` mode in combination with compile-time or load-time weaving.| +|---|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +#### 7.3.2. The `@Scheduled` annotation + +You can add the `@Scheduled` annotation to a method, along with trigger metadata. For +example, the following method is invoked every five seconds (5000 milliseconds) with a +fixed delay, meaning that the period is measured from the completion time of each +preceding invocation. + +``` +@Scheduled(fixedDelay = 5000) +public void doSomething() { + // something that should run periodically +} +``` + +| |By default, milliseconds will be used as the time unit for fixed delay, fixed rate, and<br/>initial delay values. If you would like to use a different time unit such as seconds or<br/>minutes, you can configure this via the `timeUnit` attribute in `@Scheduled`.<br/><br/>For example, the previous example can also be written as follows.<br/><br/>```<br/>@Scheduled(fixedDelay = 5, timeUnit = TimeUnit.SECONDS)<br/>public void doSomething() {<br/> // something that should run periodically<br/>}<br/>```| +|---|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +If you need a fixed-rate execution, you can use the `fixedRate` attribute within the +annotation. The following method is invoked every five seconds (measured between the +successive start times of each invocation). + +``` +@Scheduled(fixedRate = 5, timeUnit = TimeUnit.SECONDS) +public void doSomething() { + // something that should run periodically +} +``` + +For fixed-delay and fixed-rate tasks, you can specify an initial delay by indicating the +amount of time to wait before the first execution of the method, as the following`fixedRate` example shows. + +``` +@Scheduled(initialDelay = 1000, fixedRate = 5000) +public void doSomething() { + // something that should run periodically +} +``` + +If simple periodic scheduling is not expressive enough, you can provide a[cron expression](#scheduling-cron-expression). +The following example runs only on weekdays: + +``` +@Scheduled(cron="*/5 * * * * MON-FRI") +public void doSomething() { + // something that should run on weekdays only +} +``` + +| |You can also use the `zone` attribute to specify the time zone in which the cron<br/>expression is resolved.| +|---|------------------------------------------------------------------------------------------------------------| + +Notice that the methods to be scheduled must have void returns and must not accept any +arguments. If the method needs to interact with other objects from the application +context, those would typically have been provided through dependency injection. + +| |As of Spring Framework 4.3, `@Scheduled` methods are supported on beans of any scope.<br/><br/>Make sure that you are not initializing multiple instances of the same `@Scheduled`annotation class at runtime, unless you do want to schedule callbacks to each such<br/>instance. Related to this, make sure that you do not use `@Configurable` on bean<br/>classes that are annotated with `@Scheduled` and registered as regular Spring beans<br/>with the container. Otherwise, you would get double initialization (once through the<br/>container and once through the `@Configurable` aspect), with the consequence of each`@Scheduled` method being invoked twice.| +|---|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +#### 7.3.3. The `@Async` annotation + +You can provide the `@Async` annotation on a method so that invocation of that method +occurs asynchronously. In other words, the caller returns immediately upon +invocation, while the actual execution of the method occurs in a task that has been +submitted to a Spring `TaskExecutor`. In the simplest case, you can apply the annotation +to a method that returns `void`, as the following example shows: + +``` +@Async +void doSomething() { + // this will be run asynchronously +} +``` + +Unlike the methods annotated with the `@Scheduled` annotation, these methods can expect +arguments, because they are invoked in the “normal” way by callers at runtime rather +than from a scheduled task being managed by the container. For example, the following code is +a legitimate application of the `@Async` annotation: + +``` +@Async +void doSomething(String s) { + // this will be run asynchronously +} +``` + +Even methods that return a value can be invoked asynchronously. However, such methods +are required to have a `Future`-typed return value. This still provides the benefit of +asynchronous execution so that the caller can perform other tasks prior to calling`get()` on that `Future`. The following example shows how to use `@Async` on a method +that returns a value: + +``` +@Async +Future<String> returnSomething(int i) { + // this will be run asynchronously +} +``` + +| |`@Async` methods may not only declare a regular `java.util.concurrent.Future` return type<br/>but also Spring’s `org.springframework.util.concurrent.ListenableFuture` or, as of Spring<br/>4.2, JDK 8’s `java.util.concurrent.CompletableFuture`, for richer interaction with the<br/>asynchronous task and for immediate composition with further processing steps.| +|---|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +You can not use `@Async` in conjunction with lifecycle callbacks such as`@PostConstruct`. To asynchronously initialize Spring beans, you currently have to use +a separate initializing Spring bean that then invokes the `@Async` annotated method on the +target, as the following example shows: + +``` +public class SampleBeanImpl implements SampleBean { + + @Async + void doSomething() { + // ... + } + +} + +public class SampleBeanInitializer { + + private final SampleBean bean; + + public SampleBeanInitializer(SampleBean bean) { + this.bean = bean; + } + + @PostConstruct + public void initialize() { + bean.doSomething(); + } + +} +``` + +| |There is no direct XML equivalent for `@Async`, since such methods should be designed<br/>for asynchronous execution in the first place, not externally re-declared to be asynchronous.<br/>However, you can manually set up Spring’s `AsyncExecutionInterceptor` with Spring AOP,<br/>in combination with a custom pointcut.| +|---|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +#### 7.3.4. Executor Qualification with `@Async` + +By default, when specifying `@Async` on a method, the executor that is used is the +one [configured when enabling async support](#scheduling-enable-annotation-support), +i.e. the “annotation-driven” element if you are using XML or your `AsyncConfigurer`implementation, if any. However, you can use the `value` attribute of the `@Async`annotation when you need to indicate that an executor other than the default should be +used when executing a given method. The following example shows how to do so: + +``` +@Async("otherExecutor") +void doSomething(String s) { + // this will be run asynchronously by "otherExecutor" +} +``` + +In this case, `"otherExecutor"` can be the name of any `Executor` bean in the Spring +container, or it may be the name of a qualifier associated with any `Executor` (for example, as +specified with the `<qualifier>` element or Spring’s `@Qualifier` annotation). + +#### 7.3.5. Exception Management with `@Async` + +When an `@Async` method has a `Future`-typed return value, it is easy to manage +an exception that was thrown during the method execution, as this exception is +thrown when calling `get` on the `Future` result. With a `void` return type, +however, the exception is uncaught and cannot be transmitted. You can provide an`AsyncUncaughtExceptionHandler` to handle such exceptions. The following example shows +how to do so: + +``` +public class MyAsyncUncaughtExceptionHandler implements AsyncUncaughtExceptionHandler { + + @Override + public void handleUncaughtException(Throwable ex, Method method, Object... params) { + // handle exception + } +} +``` + +By default, the exception is merely logged. You can define a custom `AsyncUncaughtExceptionHandler`by using `AsyncConfigurer` or the `<task:annotation-driven/>` XML element. + +### 7.4. The `task` Namespace + +As of version 3.0, Spring includes an XML namespace for configuring `TaskExecutor` and`TaskScheduler` instances. It also provides a convenient way to configure tasks to be +scheduled with a trigger. + +#### 7.4.1. The 'scheduler' Element + +The following element creates a `ThreadPoolTaskScheduler` instance with the +specified thread pool size: + +``` +<task:scheduler id="scheduler" pool-size="10"/> +``` + +The value provided for the `id` attribute is used as the prefix for thread names +within the pool. The `scheduler` element is relatively straightforward. If you do not +provide a `pool-size` attribute, the default thread pool has only a single thread. +There are no other configuration options for the scheduler. + +#### 7.4.2. The `executor` Element + +The following creates a `ThreadPoolTaskExecutor` instance: + +``` +<task:executor id="executor" pool-size="10"/> +``` + +As with the scheduler shown in the [previous section](#scheduling-task-namespace-scheduler), +the value provided for the `id` attribute is used as the prefix for thread names within +the pool. As far as the pool size is concerned, the `executor` element supports more +configuration options than the `scheduler` element. For one thing, the thread pool for +a `ThreadPoolTaskExecutor` is itself more configurable. Rather than only a single size, +an executor’s thread pool can have different values for the core and the max size. +If you provide a single value, the executor has a fixed-size thread pool (the core and +max sizes are the same). However, the `executor` element’s `pool-size` attribute also +accepts a range in the form of `min-max`. The following example sets a minimum value of`5` and a maximum value of `25`: + +``` +<task:executor + id="executorWithPoolSizeRange" + pool-size="5-25" + queue-capacity="100"/> +``` + +In the preceding configuration, a `queue-capacity` value has also been provided. +The configuration of the thread pool should also be considered in light of the +executor’s queue capacity. For the full description of the relationship between pool +size and queue capacity, see the documentation for[`ThreadPoolExecutor`](https://docs.oracle.com/javase/8/docs/api/java/util/concurrent/ThreadPoolExecutor.html). +The main idea is that, when a task is submitted, the executor first tries to use a +free thread if the number of active threads is currently less than the core size. +If the core size has been reached, the task is added to the queue, as long as its +capacity has not yet been reached. Only then, if the queue’s capacity has been +reached, does the executor create a new thread beyond the core size. If the max size +has also been reached, then the executor rejects the task. + +By default, the queue is unbounded, but this is rarely the desired configuration, +because it can lead to `OutOfMemoryErrors` if enough tasks are added to that queue while +all pool threads are busy. Furthermore, if the queue is unbounded, the max size has +no effect at all. Since the executor always tries the queue before creating a new +thread beyond the core size, a queue must have a finite capacity for the thread pool to +grow beyond the core size (this is why a fixed-size pool is the only sensible case +when using an unbounded queue). + +Consider the case, as mentioned above, when a task is rejected. By default, when a +task is rejected, a thread pool executor throws a `TaskRejectedException`. However, +the rejection policy is actually configurable. The exception is thrown when using +the default rejection policy, which is the `AbortPolicy` implementation. +For applications where some tasks can be skipped under heavy load, you can instead +configure either `DiscardPolicy` or `DiscardOldestPolicy`. Another option that works +well for applications that need to throttle the submitted tasks under heavy load is +the `CallerRunsPolicy`. Instead of throwing an exception or discarding tasks, +that policy forces the thread that is calling the submit method to run the task itself. +The idea is that such a caller is busy while running that task and not able to submit +other tasks immediately. Therefore, it provides a simple way to throttle the incoming +load while maintaining the limits of the thread pool and queue. Typically, this allows +the executor to “catch up” on the tasks it is handling and thereby frees up some +capacity on the queue, in the pool, or both. You can choose any of these options from an +enumeration of values available for the `rejection-policy` attribute on the `executor`element. + +The following example shows an `executor` element with a number of attributes to specify +various behaviors: + +``` +<task:executor + id="executorWithCallerRunsPolicy" + pool-size="5-25" + queue-capacity="100" + rejection-policy="CALLER_RUNS"/> +``` + +Finally, the `keep-alive` setting determines the time limit (in seconds) for which threads +may remain idle before being stopped. If there are more than the core number of threads +currently in the pool, after waiting this amount of time without processing a task, excess +threads get stopped. A time value of zero causes excess threads to stop +immediately after executing a task without remaining follow-up work in the task queue. +The following example sets the `keep-alive` value to two minutes: + +``` +<task:executor + id="executorWithKeepAlive" + pool-size="5-25" + keep-alive="120"/> +``` + +#### 7.4.3. The 'scheduled-tasks' Element + +The most powerful feature of Spring’s task namespace is the support for configuring +tasks to be scheduled within a Spring Application Context. This follows an approach +similar to other “method-invokers” in Spring, such as that provided by the JMS namespace +for configuring message-driven POJOs. Basically, a `ref` attribute can point to any +Spring-managed object, and the `method` attribute provides the name of a method to be +invoked on that object. The following listing shows a simple example: + +``` +<task:scheduled-tasks scheduler="myScheduler"> + <task:scheduled ref="beanA" method="methodA" fixed-delay="5000"/> +</task:scheduled-tasks> + +<task:scheduler id="myScheduler" pool-size="10"/> +``` + +The scheduler is referenced by the outer element, and each individual +task includes the configuration of its trigger metadata. In the preceding example, that +metadata defines a periodic trigger with a fixed delay indicating the number of +milliseconds to wait after each task execution has completed. Another option is`fixed-rate`, indicating how often the method should be run regardless of how long +any previous execution takes. Additionally, for both `fixed-delay` and `fixed-rate` tasks, you can specify an +'initial-delay' parameter, indicating the number of milliseconds to wait +before the first execution of the method. For more control, you can instead provide a `cron` attribute +to provide a [cron expression](#scheduling-cron-expression). +The following example shows these other options: + +``` +<task:scheduled-tasks scheduler="myScheduler"> + <task:scheduled ref="beanA" method="methodA" fixed-delay="5000" initial-delay="1000"/> + <task:scheduled ref="beanB" method="methodB" fixed-rate="5000"/> + <task:scheduled ref="beanC" method="methodC" cron="*/5 * * * * MON-FRI"/> +</task:scheduled-tasks> + +<task:scheduler id="myScheduler" pool-size="10"/> +``` + +### 7.5. Cron Expressions + +All Spring cron expressions have to conform to the same format, whether you are using them in[`@Scheduled` annotations](#scheduling-annotation-support-scheduled),[`task:scheduled-tasks` elements](#scheduling-task-namespace-scheduled-tasks), +or someplace else. +A well-formed cron expression, such as `* * * * * *`, consists of six space-separated time and date +fields, each with its own range of valid values: + +``` + ┌───────────── second (0-59) + │ ┌───────────── minute (0 - 59) + │ │ ┌───────────── hour (0 - 23) + │ │ │ ┌───────────── day of the month (1 - 31) + │ │ │ │ ┌───────────── month (1 - 12) (or JAN-DEC) + │ │ │ │ │ ┌───────────── day of the week (0 - 7) + │ │ │ │ │ │ (0 or 7 is Sunday, or MON-SUN) + │ │ │ │ │ │ + * * * * * * +``` + +There are some rules that apply: + +* A field may be an asterisk (`*`), which always stands for “first-last”. + For the day-of-the-month or day-of-the-week fields, a question mark (`?`) may be used instead of an + asterisk. + +* Commas (`,`) are used to separate items of a list. + +* Two numbers separated with a hyphen (`-`) express a range of numbers. + The specified range is inclusive. + +* Following a range (or `*`) with `/` specifies the interval of the number’s value through the range. + +* English names can also be used for the day-of-month and day-of-week fields. + Use the first three letters of the particular day or month (case does not matter). + +* The day-of-month and day-of-week fields can contain a `L` character, which has a different meaning + + * In the day-of-month field, `L` stands for *the last day of the month*. + If followed by a negative offset (that is, `L-n`), it means *`n`th-to-last day of the month*. + + * In the day-of-week field, `L` stands for *the last day of the week*. + If prefixed by a number or three-letter name (`dL` or `DDDL`), it means *the last day of week (`d`or `DDD`) in the month*. + +* The day-of-month field can be `nW`, which stands for *the nearest weekday to day of the month `n`*. + If `n` falls on Saturday, this yields the Friday before it. + If `n` falls on Sunday, this yields the Monday after, which also happens if `n` is `1` and falls on + a Saturday (that is: `1W` stands for *the first weekday of the month*). + +* If the day-of-month field is `LW`, it means *the last weekday of the month*. + +* The day-of-week field can be `d#n` (or `DDD#n`), which stands for *the `n`th day of week `d`(or `DDD`) in the month*. + +Here are some examples: + +| Cron Expression | Meaning | +|----------------------|-------------------------------------------------| +| `0 0 * * * *` | top of every hour of every day | +| `*/10 * * * * *` | every ten seconds | +| `0 0 8-10 * * *` | 8, 9 and 10 o’clock of every day | +| `0 0 6,19 * * *` | 6:00 AM and 7:00 PM every day | +| `0 0/30 8-10 * * *` |8:00, 8:30, 9:00, 9:30, 10:00 and 10:30 every day| +|`0 0 9-17 * * MON-FRI`| on the hour nine-to-five weekdays | +| `0 0 0 25 DEC ?` | every Christmas Day at midnight | +| `0 0 0 L * *` | last day of the month at midnight | +| `0 0 0 L-3 * *` | third-to-last day of the month at midnight | +| `0 0 0 * * 5L` | last Friday of the month at midnight | +| `0 0 0 * * THUL` | last Thursday of the month at midnight | +| `0 0 0 1W * *` | first weekday of the month at midnight | +| `0 0 0 LW * *` | last weekday of the month at midnight | +| `0 0 0 ? * 5#2` | the second Friday in the month at midnight | +| `0 0 0 ? * MON#1` | the first Monday in the month at midnight | + +#### 7.5.1. Macros + +Expressions such as `0 0 * * * *` are hard for humans to parse and are, therefore, hard to fix in case of bugs. +To improve readability, Spring supports the following macros, which represent commonly used sequences. +You can use these macros instead of the six-digit value, thus: `@Scheduled(cron = "@hourly")`. + +| Macro | Meaning | +|--------------------------|------------------------------| +|`@yearly` (or `@annually`)| once a year (`0 0 0 1 1 *`) | +| `@monthly` | once a month (`0 0 0 1 * *`) | +| `@weekly` | once a week (`0 0 0 * * 0`) | +|`@daily` (or `@midnight`) |once a day (`0 0 0 * * *`), or| +| `@hourly` |once an hour, (`0 0 * * * *`) | + +### 7.6. Using the Quartz Scheduler + +Quartz uses `Trigger`, `Job`, and `JobDetail` objects to realize scheduling of all kinds +of jobs. For the basic concepts behind Quartz, see[https://www.quartz-scheduler.org/](https://www.quartz-scheduler.org/). For convenience purposes, Spring offers a couple of +classes that simplify using Quartz within Spring-based applications. + +#### 7.6.1. Using the `JobDetailFactoryBean` + +Quartz `JobDetail` objects contain all the information needed to run a job. Spring provides a`JobDetailFactoryBean`, which provides bean-style properties for XML configuration purposes. +Consider the following example: + +``` +<bean name="exampleJob" class="org.springframework.scheduling.quartz.JobDetailFactoryBean"> + <property name="jobClass" value="example.ExampleJob"/> + <property name="jobDataAsMap"> + <map> + <entry key="timeout" value="5"/> + </map> + </property> +</bean> +``` + +The job detail configuration has all the information it needs to run the job (`ExampleJob`). +The timeout is specified in the job data map. The job data map is available through the`JobExecutionContext` (passed to you at execution time), but the `JobDetail` also gets +its properties from the job data mapped to properties of the job instance. So, in the following example, +the `ExampleJob` contains a bean property named `timeout`, and the `JobDetail`has it applied automatically: + +``` +package example; + +public class ExampleJob extends QuartzJobBean { + + private int timeout; + + /** + * Setter called after the ExampleJob is instantiated + * with the value from the JobDetailFactoryBean (5) + */ + public void setTimeout(int timeout) { + this.timeout = timeout; + } + + protected void executeInternal(JobExecutionContext ctx) throws JobExecutionException { + // do the actual work + } +} +``` + +All additional properties from the job data map are available to you as well. + +| |By using the `name` and `group` properties, you can modify the name and the group<br/>of the job, respectively. By default, the name of the job matches the bean name<br/>of the `JobDetailFactoryBean` (`exampleJob` in the preceding example above).| +|---|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +#### 7.6.2. Using the `MethodInvokingJobDetailFactoryBean` + +Often you merely need to invoke a method on a specific object. By using the`MethodInvokingJobDetailFactoryBean`, you can do exactly this, as the following example shows: + +``` +<bean id="jobDetail" class="org.springframework.scheduling.quartz.MethodInvokingJobDetailFactoryBean"> + <property name="targetObject" ref="exampleBusinessObject"/> + <property name="targetMethod" value="doIt"/> +</bean> +``` + +The preceding example results in the `doIt` method being called on the`exampleBusinessObject` method, as the following example shows: + +``` +public class ExampleBusinessObject { + + // properties and collaborators + + public void doIt() { + // do the actual work + } +} +``` + +``` +<bean id="exampleBusinessObject" class="examples.ExampleBusinessObject"/> +``` + +By using the `MethodInvokingJobDetailFactoryBean`, you need not create one-line jobs +that merely invoke a method. You need only create the actual business object and +wire up the detail object. + +By default, Quartz Jobs are stateless, resulting in the possibility of jobs interfering +with each other. If you specify two triggers for the same `JobDetail`, it is +possible that, before the first job has finished, the second one starts. If`JobDetail` classes implement the `Stateful` interface, this does not happen. The second +job does not start before the first one has finished. To make jobs resulting from the`MethodInvokingJobDetailFactoryBean` be non-concurrent, set the `concurrent` flag to`false`, as the following example shows: + +``` +<bean id="jobDetail" class="org.springframework.scheduling.quartz.MethodInvokingJobDetailFactoryBean"> + <property name="targetObject" ref="exampleBusinessObject"/> + <property name="targetMethod" value="doIt"/> + <property name="concurrent" value="false"/> +</bean> +``` + +| |By default, jobs will run in a concurrent fashion.| +|---|--------------------------------------------------| + +#### 7.6.3. Wiring up Jobs by Using Triggers and `SchedulerFactoryBean` + +We have created job details and jobs. We have also reviewed the convenience bean that lets +you invoke a method on a specific object. Of course, we still need to schedule the +jobs themselves. This is done by using triggers and a `SchedulerFactoryBean`. Several +triggers are available within Quartz, and Spring offers two Quartz `FactoryBean`implementations with convenient defaults: `CronTriggerFactoryBean` and`SimpleTriggerFactoryBean`. + +Triggers need to be scheduled. Spring offers a `SchedulerFactoryBean` that exposes +triggers to be set as properties. `SchedulerFactoryBean` schedules the actual jobs with +those triggers. + +The following listing uses both a `SimpleTriggerFactoryBean` and a `CronTriggerFactoryBean`: + +``` +<bean id="simpleTrigger" class="org.springframework.scheduling.quartz.SimpleTriggerFactoryBean"> + <!-- see the example of method invoking job above --> + <property name="jobDetail" ref="jobDetail"/> + <!-- 10 seconds --> + <property name="startDelay" value="10000"/> + <!-- repeat every 50 seconds --> + <property name="repeatInterval" value="50000"/> +</bean> + +<bean id="cronTrigger" class="org.springframework.scheduling.quartz.CronTriggerFactoryBean"> + <property name="jobDetail" ref="exampleJob"/> + <!-- run every morning at 6 AM --> + <property name="cronExpression" value="0 0 6 * * ?"/> +</bean> +``` + +The preceding example sets up two triggers, one running every 50 seconds with a starting delay of 10 +seconds and one running every morning at 6 AM. To finalize everything, we need to set up the`SchedulerFactoryBean`, as the following example shows: + +``` +<bean class="org.springframework.scheduling.quartz.SchedulerFactoryBean"> + <property name="triggers"> + <list> + <ref bean="cronTrigger"/> + <ref bean="simpleTrigger"/> + </list> + </property> +</bean> +``` + +More properties are available for the `SchedulerFactoryBean`, such as the calendars used by the +job details, properties to customize Quartz with, and a Spring-provided JDBC DataSource. See +the [`SchedulerFactoryBean`](https://docs.spring.io/spring-framework/docs/5.3.16/javadoc-api/org/springframework/scheduling/quartz/SchedulerFactoryBean.html)javadoc for more information. + +| |`SchedulerFactoryBean` also recognizes a `quartz.properties` file in the classpath,<br/>based on Quartz property keys, as with regular Quartz configuration. Please note that many`SchedulerFactoryBean` settings interact with common Quartz settings in the properties file;<br/>it is therefore not recommended to specify values at both levels. For example, do not set<br/>an "org.quartz.jobStore.class" property if you mean to rely on a Spring-provided DataSource,<br/>or specify an `org.springframework.scheduling.quartz.LocalDataSourceJobStore` variant which<br/>is a full-fledged replacement for the standard `org.quartz.impl.jdbcjobstore.JobStoreTX`.| +|---|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +## 8. Cache Abstraction + +Since version 3.1, the Spring Framework provides support for transparently adding caching to +an existing Spring application. Similar to the [transaction](data-access.html#transaction)support, the caching abstraction allows consistent use of various caching solutions with +minimal impact on the code. + +In Spring Framework 4.1, the cache abstraction was significantly extended with support +for [JSR-107 annotations](#cache-jsr-107) and more customization options. + +### 8.1. Understanding the Cache Abstraction + +Cache vs Buffer + +The terms, “buffer” and “cache,” tend to be used interchangeably. Note, however, +that they represent different things. Traditionally, a buffer is used as an intermediate +temporary store for data between a fast and a slow entity. As one party would have to wait +for the other (which affects performance), the buffer alleviates this by allowing entire +blocks of data to move at once rather than in small chunks. The data is written and read +only once from the buffer. Furthermore, the buffers are visible to at least one party +that is aware of it. + +A cache, on the other hand, is, by definition, hidden, and neither party is aware that +caching occurs. It also improves performance but does so by letting the same data be +read multiple times in a fast fashion. + +You can find a further explanation of the differences between a buffer and a cache[here](https://en.wikipedia.org/wiki/Cache_(computing)#The_difference_between_buffer_and_cache). + +At its core, the cache abstraction applies caching to Java methods, thus reducing the +number of executions based on the information available in the cache. That is, each time +a targeted method is invoked, the abstraction applies a caching behavior that checks +whether the method has been already invoked for the given arguments. If it has been +invoked, the cached result is returned without having to invoke the actual method. +If the method has not been invoked, then it is invoked, and the result is cached and +returned to the user so that, the next time the method is invoked, the cached result is +returned. This way, expensive methods (whether CPU- or IO-bound) can be invoked only +once for a given set of parameters and the result reused without having to actually +invoke the method again. The caching logic is applied transparently without any +interference to the invoker. + +| |This approach works only for methods that are guaranteed to return the same<br/>output (result) for a given input (or arguments) no matter how many times it is invoked.| +|---|------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +The caching abstraction provides other cache-related operations, such as the ability +to update the content of the cache or to remove one or all entries. These are useful if +the cache deals with data that can change during the course of the application. + +As with other services in the Spring Framework, the caching service is an abstraction +(not a cache implementation) and requires the use of actual storage to store the cache data — that is, the abstraction frees you from having to write the caching logic but does not +provide the actual data store. This abstraction is materialized by the`org.springframework.cache.Cache` and `org.springframework.cache.CacheManager` interfaces. + +Spring provides [a few implementations](#cache-store-configuration) of that abstraction: +JDK `java.util.concurrent.ConcurrentMap` based caches, [Ehcache 2.x](https://www.ehcache.org/), +Gemfire cache, [Caffeine](https://github.com/ben-manes/caffeine/wiki), and JSR-107 +compliant caches (such as Ehcache 3.x). See [Plugging-in Different Back-end Caches](#cache-plug) for more information on +plugging in other cache stores and providers. + +| |The caching abstraction has no special handling for multi-threaded and<br/>multi-process environments, as such features are handled by the cache implementation.| +|---|----------------------------------------------------------------------------------------------------------------------------------------------------------------| + +If you have a multi-process environment (that is, an application deployed on several nodes), +you need to configure your cache provider accordingly. Depending on your use cases, a copy +of the same data on several nodes can be enough. However, if you change the data during +the course of the application, you may need to enable other propagation mechanisms. + +Caching a particular item is a direct equivalent of the typical +get-if-not-found-then-proceed-and-put-eventually code blocks +found with programmatic cache interaction. +No locks are applied, and several threads may try to load the same item concurrently. +The same applies to eviction. If several threads are trying to update or evict data +concurrently, you may use stale data. Certain cache providers offer advanced features +in that area. See the documentation of your cache provider for more details. + +To use the cache abstraction, you need to take care of two aspects: + +* Caching declaration: Identify the methods that need to be cached and their policy. + +* Cache configuration: The backing cache where the data is stored and from which it is read. + +### 8.2. Declarative Annotation-based Caching + +For caching declaration, Spring’s caching abstraction provides a set of Java annotations: + +* `@Cacheable`: Triggers cache population. + +* `@CacheEvict`: Triggers cache eviction. + +* `@CachePut`: Updates the cache without interfering with the method execution. + +* `@Caching`: Regroups multiple cache operations to be applied on a method. + +* `@CacheConfig`: Shares some common cache-related settings at class-level. + +#### 8.2.1. The `@Cacheable` Annotation + +As the name implies, you can use `@Cacheable` to demarcate methods that are cacheable — that is, methods for which the result is stored in the cache so that, on subsequent +invocations (with the same arguments), the value in the cache is returned without +having to actually invoke the method. In its simplest form, the annotation declaration +requires the name of the cache associated with the annotated method, as the following +example shows: + +``` +@Cacheable("books") +public Book findBook(ISBN isbn) {...} +``` + +In the preceding snippet, the `findBook` method is associated with the cache named `books`. +Each time the method is called, the cache is checked to see whether the invocation has +already been run and does not have to be repeated. While in most cases, only one +cache is declared, the annotation lets multiple names be specified so that more than one +cache is being used. In this case, each of the caches is checked before invoking the +method — if at least one cache is hit, the associated value is returned. + +| |All the other caches that do not contain the value are also updated, even though<br/>the cached method was not actually invoked.| +|---|--------------------------------------------------------------------------------------------------------------------------------| + +The following example uses `@Cacheable` on the `findBook` method with multiple caches: + +``` +@Cacheable({"books", "isbns"}) +public Book findBook(ISBN isbn) {...} +``` + +##### Default Key Generation + +Since caches are essentially key-value stores, each invocation of a cached method +needs to be translated into a suitable key for cache access. The caching abstraction +uses a simple `KeyGenerator` based on the following algorithm: + +* If no params are given, return `SimpleKey.EMPTY`. + +* If only one param is given, return that instance. + +* If more than one param is given, return a `SimpleKey` that contains all parameters. + +This approach works well for most use-cases, as long as parameters have natural keys +and implement valid `hashCode()` and `equals()` methods. If that is not the case, +you need to change the strategy. + +To provide a different default key generator, you need to implement the`org.springframework.cache.interceptor.KeyGenerator` interface. + +| |The default key generation strategy changed with the release of Spring 4.0. Earlier<br/>versions of Spring used a key generation strategy that, for multiple key parameters,<br/>considered only the `hashCode()` of parameters and not `equals()`. This could cause<br/>unexpected key collisions (see [SPR-10237](https://jira.spring.io/browse/SPR-10237)for background). The new `SimpleKeyGenerator` uses a compound key for such scenarios.<br/><br/>If you want to keep using the previous key strategy, you can configure the deprecated`org.springframework.cache.interceptor.DefaultKeyGenerator` class or create a custom<br/>hash-based `KeyGenerator` implementation.| +|---|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +##### Custom Key Generation Declaration + +Since caching is generic, the target methods are quite likely to have various signatures +that cannot be readily mapped on top of the cache structure. This tends to become obvious +when the target method has multiple arguments out of which only some are suitable for +caching (while the rest are used only by the method logic). Consider the following example: + +``` +@Cacheable("books") +public Book findBook(ISBN isbn, boolean checkWarehouse, boolean includeUsed) +``` + +At first glance, while the two `boolean` arguments influence the way the book is found, +they are no use for the cache. Furthermore, what if only one of the two is important +while the other is not? + +For such cases, the `@Cacheable` annotation lets you specify how the key is generated +through its `key` attribute. You can use [SpEL](core.html#expressions) to pick the +arguments of interest (or their nested properties), perform operations, or even +invoke arbitrary methods without having to write any code or implement any interface. +This is the recommended approach over the[default generator](#cache-annotations-cacheable-default-key), since methods tend to be +quite different in signatures as the code base grows. While the default strategy might +work for some methods, it rarely works for all methods. + +The following examples use various SpEL declarations (if you are not familiar with SpEL, +do yourself a favor and read [Spring Expression Language](core.html#expressions)): + +``` +@Cacheable(cacheNames="books", key="#isbn") +public Book findBook(ISBN isbn, boolean checkWarehouse, boolean includeUsed) + +@Cacheable(cacheNames="books", key="#isbn.rawNumber") +public Book findBook(ISBN isbn, boolean checkWarehouse, boolean includeUsed) + +@Cacheable(cacheNames="books", key="T(someType).hash(#isbn)") +public Book findBook(ISBN isbn, boolean checkWarehouse, boolean includeUsed) +``` + +The preceding snippets show how easy it is to select a certain argument, one of its +properties, or even an arbitrary (static) method. + +If the algorithm responsible for generating the key is too specific or if it needs +to be shared, you can define a custom `keyGenerator` on the operation. To do so, +specify the name of the `KeyGenerator` bean implementation to use, as the following +example shows: + +``` +@Cacheable(cacheNames="books", keyGenerator="myKeyGenerator") +public Book findBook(ISBN isbn, boolean checkWarehouse, boolean includeUsed) +``` + +| |The `key` and `keyGenerator` parameters are mutually exclusive and an operation<br/>that specifies both results in an exception.| +|---|--------------------------------------------------------------------------------------------------------------------------------| + +##### Default Cache Resolution + +The caching abstraction uses a simple `CacheResolver` that +retrieves the caches defined at the operation level by using the configured`CacheManager`. + +To provide a different default cache resolver, you need to implement the`org.springframework.cache.interceptor.CacheResolver` interface. + +##### Custom Cache Resolution + +The default cache resolution fits well for applications that work with a +single `CacheManager` and have no complex cache resolution requirements. + +For applications that work with several cache managers, you can set the`cacheManager` to use for each operation, as the following example shows: + +``` +@Cacheable(cacheNames="books", cacheManager="anotherCacheManager") (1) +public Book findBook(ISBN isbn) {...} +``` + +|**1**|Specifying `anotherCacheManager`.| +|-----|---------------------------------| + +You can also replace the `CacheResolver` entirely in a fashion similar to that of +replacing [key generation](#cache-annotations-cacheable-key). The resolution is +requested for every cache operation, letting the implementation actually resolve +the caches to use based on runtime arguments. The following example shows how to +specify a `CacheResolver`: + +``` +@Cacheable(cacheResolver="runtimeCacheResolver") (1) +public Book findBook(ISBN isbn) {...} +``` + +|**1**|Specifying the `CacheResolver`.| +|-----|-------------------------------| + +| |Since Spring 4.1, the `value` attribute of the cache annotations are no longer<br/>mandatory, since this particular information can be provided by the `CacheResolver`regardless of the content of the annotation.<br/><br/>Similarly to `key` and `keyGenerator`, the `cacheManager` and `cacheResolver`parameters are mutually exclusive, and an operation specifying both<br/>results in an exception, as a custom `CacheManager` is ignored by the`CacheResolver` implementation. This is probably not what you expect.| +|---|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +##### Synchronized Caching + +In a multi-threaded environment, certain operations might be concurrently invoked for +the same argument (typically on startup). By default, the cache abstraction does not +lock anything, and the same value may be computed several times, defeating the purpose +of caching. + +For those particular cases, you can use the `sync` attribute to instruct the underlying +cache provider to lock the cache entry while the value is being computed. As a result, +only one thread is busy computing the value, while the others are blocked until the entry +is updated in the cache. The following example shows how to use the `sync` attribute: + +``` +@Cacheable(cacheNames="foos", sync=true) (1) +public Foo executeExpensiveOperation(String id) {...} +``` + +|**1**|Using the `sync` attribute.| +|-----|---------------------------| + +| |This is an optional feature, and your favorite cache library may not support it.<br/>All `CacheManager` implementations provided by the core framework support it. See the<br/>documentation of your cache provider for more details.| +|---|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +##### Conditional Caching + +Sometimes, a method might not be suitable for caching all the time (for example, it might +depend on the given arguments). The cache annotations support such use cases through the`condition` parameter, which takes a `SpEL` expression that is evaluated to either `true`or `false`. If `true`, the method is cached. If not, it behaves as if the method is not +cached (that is, the method is invoked every time no matter what values are in the cache +or what arguments are used). For example, the following method is cached only if the +argument `name` has a length shorter than 32: + +``` +@Cacheable(cacheNames="book", condition="#name.length() < 32") (1) +public Book findBook(String name) +``` + +|**1**|Setting a condition on `@Cacheable`.| +|-----|------------------------------------| + +In addition to the `condition` parameter, you can use the `unless` parameter to veto the +adding of a value to the cache. Unlike `condition`, `unless` expressions are evaluated +after the method has been invoked. To expand on the previous example, perhaps we only +want to cache paperback books, as the following example does: + +``` +@Cacheable(cacheNames="book", condition="#name.length() < 32", unless="#result.hardback") (1) +public Book findBook(String name) +``` + +|**1**|Using the `unless` attribute to block hardbacks.| +|-----|------------------------------------------------| + +The cache abstraction supports `java.util.Optional` return types. If an `Optional` value +is *present*, it will be stored in the associated cache. If an `Optional` value is not +present, `null` will be stored in the associated cache. `#result` always refers to the +business entity and never a supported wrapper, so the previous example can be rewritten +as follows: + +``` +@Cacheable(cacheNames="book", condition="#name.length() < 32", unless="#result?.hardback") +public Optional<Book> findBook(String name) +``` + +Note that `#result` still refers to `Book` and not `Optional<Book>`. Since it might be`null`, we use SpEL’s [safe navigation operator](core.html#expressions-operator-safe-navigation). + +##### Available Caching SpEL Evaluation Context + +Each `SpEL` expression evaluates against a dedicated [`context`](core.html#expressions-language-ref). +In addition to the built-in parameters, the framework provides dedicated caching-related +metadata, such as the argument names. The following table describes the items made +available to the context so that you can use them for key and conditional computations: + +| Name | Location | Description | Example | +|-------------|------------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|-----------------------------------------------------------------------------| +|`methodName` | Root object | The name of the method being invoked | `#root.methodName` | +| `method` | Root object | The method being invoked | `#root.method.name` | +| `target` | Root object | The target object being invoked | `#root.target` | +|`targetClass`| Root object | The class of the target being invoked | `#root.targetClass` | +| `args` | Root object | The arguments (as array) used for invoking the target | `#root.args[0]` | +| `caches` | Root object | Collection of caches against which the current method is run | `#root.caches[0].name` | +|Argument name|Evaluation context| Name of any of the method arguments. If the names are not available<br/>(perhaps due to having no debug information), the argument names are also available under the `#a<#arg>`where `#arg` stands for the argument index (starting from `0`). |`#iban` or `#a0` (you can also use `#p0` or `#p<#arg>` notation as an alias).| +| `result` |Evaluation context|The result of the method call (the value to be cached). Only available in `unless`expressions, `cache put` expressions (to compute the `key`), or `cache evict`expressions (when `beforeInvocation` is `false`). For supported wrappers (such as`Optional`), `#result` refers to the actual object, not the wrapper.| `#result` | + +#### 8.2.2. The `@CachePut` Annotation + +When the cache needs to be updated without interfering with the method execution, +you can use the `@CachePut` annotation. That is, the method is always invoked and its +result is placed into the cache (according to the `@CachePut` options). It supports +the same options as `@Cacheable` and should be used for cache population rather than +method flow optimization. The following example uses the `@CachePut` annotation: + +``` +@CachePut(cacheNames="book", key="#isbn") +public Book updateBook(ISBN isbn, BookDescriptor descriptor) +``` + +| |Using `@CachePut` and `@Cacheable` annotations on the same method is generally<br/>strongly discouraged because they have different behaviors. While the latter causes the<br/>method invocation to be skipped by using the cache, the former forces the invocation in<br/>order to run a cache update. This leads to unexpected behavior and, with the exception<br/>of specific corner-cases (such as annotations having conditions that exclude them from each<br/>other), such declarations should be avoided. Note also that such conditions should not rely<br/>on the result object (that is, the `#result` variable), as these are validated up-front to<br/>confirm the exclusion.| +|---|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +#### 8.2.3. The `@CacheEvict` annotation + +The cache abstraction allows not just population of a cache store but also eviction. +This process is useful for removing stale or unused data from the cache. As opposed to`@Cacheable`, `@CacheEvict` demarcates methods that perform cache +eviction (that is, methods that act as triggers for removing data from the cache). +Similarly to its sibling, `@CacheEvict` requires specifying one or more caches +that are affected by the action, allows a custom cache and key resolution or a +condition to be specified, and features an extra parameter +(`allEntries`) that indicates whether a cache-wide eviction needs to be performed +rather than just an entry eviction (based on the key). The following example evicts +all entries from the `books` cache: + +``` +@CacheEvict(cacheNames="books", allEntries=true) (1) +public void loadBooks(InputStream batch) +``` + +|**1**|Using the `allEntries` attribute to evict all entries from the cache.| +|-----|---------------------------------------------------------------------| + +This option comes in handy when an entire cache region needs to be cleared out. +Rather than evicting each entry (which would take a long time, since it is inefficient), +all the entries are removed in one operation, as the preceding example shows. +Note that the framework ignores any key specified in this scenario as it does not apply +(the entire cache is evicted, not only one entry). + +You can also indicate whether the eviction should occur after (the default) or before +the method is invoked by using the `beforeInvocation` attribute. The former provides the +same semantics as the rest of the annotations: Once the method completes successfully, +an action (in this case, eviction) on the cache is run. If the method does not +run (as it might be cached) or an exception is thrown, the eviction does not occur. +The latter (`beforeInvocation=true`) causes the eviction to always occur before the +method is invoked. This is useful in cases where the eviction does not need to be tied +to the method outcome. + +Note that `void` methods can be used with `@CacheEvict` - as the methods act as a +trigger, the return values are ignored (as they do not interact with the cache). This is +not the case with `@Cacheable` which adds data to the cache or updates data in the cache +and, thus, requires a result. + +#### 8.2.4. The `@Caching` Annotation + +Sometimes, multiple annotations of the same type (such as `@CacheEvict` or`@CachePut`) need to be specified — for example, because the condition or the key +expression is different between different caches. `@Caching` lets multiple nested`@Cacheable`, `@CachePut`, and `@CacheEvict` annotations be used on the same method. +The following example uses two `@CacheEvict` annotations: + +``` +@Caching(evict = { @CacheEvict("primary"), @CacheEvict(cacheNames="secondary", key="#p0") }) +public Book importBooks(String deposit, Date date) +``` + +#### 8.2.5. The `@CacheConfig` annotation + +So far, we have seen that caching operations offer many customization options and that +you can set these options for each operation. However, some of the customization options +can be tedious to configure if they apply to all operations of the class. For +instance, specifying the name of the cache to use for every cache operation of the +class can be replaced by a single class-level definition. This is where `@CacheConfig`comes into play. The following examples uses `@CacheConfig` to set the name of the cache: + +``` +@CacheConfig("books") (1) +public class BookRepositoryImpl implements BookRepository { + + @Cacheable + public Book findBook(ISBN isbn) {...} +} +``` + +|**1**|Using `@CacheConfig` to set the name of the cache.| +|-----|--------------------------------------------------| + +`@CacheConfig` is a class-level annotation that allows sharing the cache names, +the custom `KeyGenerator`, the custom `CacheManager`, and the custom `CacheResolver`. +Placing this annotation on the class does not turn on any caching operation. + +An operation-level customization always overrides a customization set on `@CacheConfig`. +Therefore, this gives three levels of customizations for each cache operation: + +* Globally configured, available for `CacheManager`, `KeyGenerator`. + +* At the class level, using `@CacheConfig`. + +* At the operation level. + +#### 8.2.6. Enabling Caching Annotations + +It is important to note that even though declaring the cache annotations does not +automatically trigger their actions - like many things in Spring, the feature has to be +declaratively enabled (which means if you ever suspect caching is to blame, you can +disable it by removing only one configuration line rather than all the annotations in +your code). + +To enable caching annotations add the annotation `@EnableCaching` to one of your`@Configuration` classes: + +``` +@Configuration +@EnableCaching +public class AppConfig { +} +``` + +Alternatively, for XML configuration you can use the `cache:annotation-driven` element: + +``` +<beans xmlns="http://www.springframework.org/schema/beans" + xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" + xmlns:cache="http://www.springframework.org/schema/cache" + xsi:schemaLocation=" + http://www.springframework.org/schema/beans https://www.springframework.org/schema/beans/spring-beans.xsd + http://www.springframework.org/schema/cache https://www.springframework.org/schema/cache/spring-cache.xsd"> + + <cache:annotation-driven/> +</beans> +``` + +Both the `cache:annotation-driven` element and the `@EnableCaching` annotation let you +specify various options that influence the way the caching behavior is added to the +application through AOP. The configuration is intentionally similar with that of[`@Transactional`](data-access.html#tx-annotation-driven-settings). + +| |The default advice mode for processing caching annotations is `proxy`, which allows<br/>for interception of calls through the proxy only. Local calls within the same class<br/>cannot get intercepted that way. For a more advanced mode of interception, consider<br/>switching to `aspectj` mode in combination with compile-time or load-time weaving.| +|---|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +| |For more detail about advanced customizations (using Java configuration) that are<br/>required to implement `CachingConfigurer`, see the[javadoc](https://docs.spring.io/spring-framework/docs/5.3.16/javadoc-api/org/springframework/cache/annotation/CachingConfigurer.html).| +|---|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +| XML Attribute | Annotation Attribute | Default | Description | +|--------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------|------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| `cache-manager` |N/A (see the [`CachingConfigurer`](https://docs.spring.io/spring-framework/docs/5.3.16/javadoc-api/org/springframework/cache/annotation/CachingConfigurer.html) javadoc)| `cacheManager` | The name of the cache manager to use. A default `CacheResolver` is initialized behind<br/>the scenes with this cache manager (or `cacheManager` if not set). For more<br/>fine-grained management of the cache resolution, consider setting the 'cache-resolver'<br/>attribute. | +| `cache-resolver` |N/A (see the [`CachingConfigurer`](https://docs.spring.io/spring-framework/docs/5.3.16/javadoc-api/org/springframework/cache/annotation/CachingConfigurer.html) javadoc)|A `SimpleCacheResolver` using the configured `cacheManager`.| The bean name of the CacheResolver that is to be used to resolve the backing caches.<br/>This attribute is not required and needs to be specified only as an alternative to<br/>the 'cache-manager' attribute. | +| `key-generator` |N/A (see the [`CachingConfigurer`](https://docs.spring.io/spring-framework/docs/5.3.16/javadoc-api/org/springframework/cache/annotation/CachingConfigurer.html) javadoc)| `SimpleKeyGenerator` | Name of the custom key generator to use. | +| `error-handler` |N/A (see the [`CachingConfigurer`](https://docs.spring.io/spring-framework/docs/5.3.16/javadoc-api/org/springframework/cache/annotation/CachingConfigurer.html) javadoc)| `SimpleCacheErrorHandler` | The name of the custom cache error handler to use. By default, any exception thrown during<br/>a cache related operation is thrown back at the client. | +| `mode` | `mode` | `proxy` |The default mode (`proxy`) processes annotated beans to be proxied by using Spring’s AOP<br/>framework (following proxy semantics, as discussed earlier, applying to method calls<br/>coming in through the proxy only). The alternative mode (`aspectj`) instead weaves the<br/>affected classes with Spring’s AspectJ caching aspect, modifying the target class byte<br/>code to apply to any kind of method call. AspectJ weaving requires `spring-aspects.jar`in the classpath as well as load-time weaving (or compile-time weaving) enabled. (See[Spring configuration](core.html#aop-aj-ltw-spring) for details on how to set up<br/>load-time weaving.)| +|`proxy-target-class`| `proxyTargetClass` | `false` | Applies to proxy mode only. Controls what type of caching proxies are created for<br/>classes annotated with the `@Cacheable` or `@CacheEvict` annotations. If the`proxy-target-class` attribute is set to `true`, class-based proxies are created.<br/>If `proxy-target-class` is `false` or if the attribute is omitted, standard JDK<br/>interface-based proxies are created. (See [Proxying Mechanisms](core.html#aop-proxying)for a detailed examination of the different proxy types.) | +| `order` | `order` | Ordered.LOWEST\_PRECEDENCE | Defines the order of the cache advice that is applied to beans annotated with`@Cacheable` or `@CacheEvict`. (For more information about the rules related to<br/>ordering AOP advice, see [Advice Ordering](core.html#aop-ataspectj-advice-ordering).)<br/>No specified ordering means that the AOP subsystem determines the order of the advice. | + +| |`<cache:annotation-driven/>` looks for `@Cacheable/@CachePut/@CacheEvict/@Caching`only on beans in the same application context in which it is defined. This means that,<br/>if you put `<cache:annotation-driven/>` in a `WebApplicationContext` for a`DispatcherServlet`, it checks for beans only in your controllers, not your services.<br/>See [the MVC section](web.html#mvc-servlet) for more information.| +|---|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +Method visibility and cache annotations + +When you use proxies, you should apply the cache annotations only to methods with +public visibility. If you do annotate protected, private, or package-visible methods +with these annotations, no error is raised, but the annotated method does not exhibit +the configured caching settings. Consider using AspectJ (see the rest of this section) +if you need to annotate non-public methods, as it changes the bytecode itself. + +| |Spring recommends that you only annotate concrete classes (and methods of concrete<br/>classes) with the `@Cache*` annotations, as opposed to annotating interfaces.<br/>You certainly can place an `@Cache*` annotation on an interface (or an interface<br/>method), but this works only if you use the proxy mode (`mode="proxy"`). If you use the<br/>weaving-based aspect (`mode="aspectj"`), the caching settings are not recognized on<br/>interface-level declarations by the weaving infrastructure.| +|---|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +| |In proxy mode (the default), only external method calls coming in through the<br/>proxy are intercepted. This means that self-invocation (in effect, a method within the<br/>target object that calls another method of the target object) does not lead to actual<br/>caching at runtime even if the invoked method is marked with `@Cacheable`. Consider<br/>using the `aspectj` mode in this case. Also, the proxy must be fully initialized to<br/>provide the expected behavior, so you should not rely on this feature in your<br/>initialization code (that is, `@PostConstruct`).| +|---|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +#### 8.2.7. Using Custom Annotations + +Custom annotation and AspectJ + +This feature works only with the proxy-based approach but can be enabled +with a bit of extra effort by using AspectJ. + +The `spring-aspects` module defines an aspect for the standard annotations only. +If you have defined your own annotations, you also need to define an aspect for +those. Check `AnnotationCacheAspect` for an example. + +The caching abstraction lets you use your own annotations to identify what method +triggers cache population or eviction. This is quite handy as a template mechanism, +as it eliminates the need to duplicate cache annotation declarations, which is +especially useful if the key or condition are specified or if the foreign imports +(`org.springframework`) are not allowed in your code base. Similarly to the rest +of the [stereotype](core.html#beans-stereotype-annotations) annotations, you can +use `@Cacheable`, `@CachePut`, `@CacheEvict`, and `@CacheConfig` as[meta-annotations](core.html#beans-meta-annotations) (that is, annotations that +can annotate other annotations). In the following example, we replace a common`@Cacheable` declaration with our own custom annotation: + +``` +@Retention(RetentionPolicy.RUNTIME) +@Target({ElementType.METHOD}) +@Cacheable(cacheNames="books", key="#isbn") +public @interface SlowService { +} +``` + +In the preceding example, we have defined our own `SlowService` annotation, +which itself is annotated with `@Cacheable`. Now we can replace the following code: + +``` +@Cacheable(cacheNames="books", key="#isbn") +public Book findBook(ISBN isbn, boolean checkWarehouse, boolean includeUsed) +``` + +The following example shows the custom annotation with which we can replace the +preceding code: + +``` +@SlowService +public Book findBook(ISBN isbn, boolean checkWarehouse, boolean includeUsed) +``` + +Even though `@SlowService` is not a Spring annotation, the container automatically picks +up its declaration at runtime and understands its meaning. Note that, as mentioned[earlier](#cache-annotation-enable), annotation-driven behavior needs to be enabled. + +### Annotations + +Since version 4.1, Spring’s caching abstraction fully supports the JCache standard +(JSR-107) annotations: `@CacheResult`, `@CachePut`, `@CacheRemove`, and `@CacheRemoveAll`as well as the `@CacheDefaults`, `@CacheKey`, and `@CacheValue` companions. +You can use these annotations even without migrating your cache store to JSR-107. +The internal implementation uses Spring’s caching abstraction and provides default`CacheResolver` and `KeyGenerator` implementations that are compliant with the +specification. In other words, if you are already using Spring’s caching abstraction, +you can switch to these standard annotations without changing your cache storage +(or configuration, for that matter). + +#### 8.3.1. Feature Summary + +For those who are familiar with Spring’s caching annotations, the following table +describes the main differences between the Spring annotations and their JSR-107 +counterparts: + +| Spring | JSR-107 | Remark | +|------------------------------|-----------------|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| `@Cacheable` | `@CacheResult` | Fairly similar. `@CacheResult` can cache specific exceptions and force the<br/>execution of the method regardless of the content of the cache. | +| `@CachePut` | `@CachePut` |While Spring updates the cache with the result of the method invocation, JCache<br/>requires that it be passed it as an argument that is annotated with `@CacheValue`.<br/>Due to this difference, JCache allows updating the cache before or after the<br/>actual method invocation.| +| `@CacheEvict` | `@CacheRemove` | Fairly similar. `@CacheRemove` supports conditional eviction when the<br/>method invocation results in an exception. | +|`@CacheEvict(allEntries=true)`|`@CacheRemoveAll`| See `@CacheRemove`. | +| `@CacheConfig` |`@CacheDefaults` | Lets you configure the same concepts, in a similar fashion. | + +JCache has the notion of `javax.cache.annotation.CacheResolver`, which is identical +to the Spring’s `CacheResolver` interface, except that JCache supports only a single +cache. By default, a simple implementation retrieves the cache to use based on the +name declared on the annotation. It should be noted that, if no cache name is +specified on the annotation, a default is automatically generated. See the javadoc +of `@CacheResult#cacheName()` for more information. + +`CacheResolver` instances are retrieved by a `CacheResolverFactory`. It is possible +to customize the factory for each cache operation, as the following example shows: + +``` +@CacheResult(cacheNames="books", cacheResolverFactory=MyCacheResolverFactory.class) (1) +public Book findBook(ISBN isbn) +``` + +|**1**|Customizing the factory for this operation.| +|-----|-------------------------------------------| + +| |For all referenced classes, Spring tries to locate a bean with the given type.<br/>If more than one match exists, a new instance is created and can use the regular<br/>bean lifecycle callbacks, such as dependency injection.| +|---|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +Keys are generated by a `javax.cache.annotation.CacheKeyGenerator` that serves the +same purpose as Spring’s `KeyGenerator`. By default, all method arguments are taken +into account, unless at least one parameter is annotated with `@CacheKey`. This is +similar to Spring’s [custom key generation +declaration](#cache-annotations-cacheable-key). For instance, the following are identical operations, one using +Spring’s abstraction and the other using JCache: + +``` +@Cacheable(cacheNames="books", key="#isbn") +public Book findBook(ISBN isbn, boolean checkWarehouse, boolean includeUsed) + +@CacheResult(cacheName="books") +public Book findBook(@CacheKey ISBN isbn, boolean checkWarehouse, boolean includeUsed) +``` + +You can also specify the `CacheKeyResolver` on the operation, similar to how you can +specify the `CacheResolverFactory`. + +JCache can manage exceptions thrown by annotated methods. This can prevent an update of +the cache, but it can also cache the exception as an indicator of the failure instead of +calling the method again. Assume that `InvalidIsbnNotFoundException` is thrown if the +structure of the ISBN is invalid. This is a permanent failure (no book could ever be +retrieved with such a parameter). The following caches the exception so that further +calls with the same, invalid, ISBN throw the cached exception directly instead of +invoking the method again: + +``` +@CacheResult(cacheName="books", exceptionCacheName="failures" + cachedExceptions = InvalidIsbnNotFoundException.class) +public Book findBook(ISBN isbn) +``` + +#### 8.3.2. Enabling JSR-107 Support + +You do not need to do anything specific to enable the JSR-107 support alongside Spring’s +declarative annotation support. Both `@EnableCaching` and the `cache:annotation-driven`XML element automatically enable the JCache support if both the JSR-107 API and the`spring-context-support` module are present in the classpath. + +| |Depending on your use case, the choice is basically yours. You can even mix and<br/>match services by using the JSR-107 API on some and using Spring’s own annotations on<br/>others. However, if these services impact the same caches, you should use a consistent<br/>and identical key generation implementation.| +|---|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +### 8.4. Declarative XML-based Caching + +If annotations are not an option (perhaps due to having no access to the sources +or no external code), you can use XML for declarative caching. So, instead of +annotating the methods for caching, you can specify the target method and the +caching directives externally (similar to the declarative transaction management[advice](data-access.html#transaction-declarative-first-example)). The example +from the previous section can be translated into the following example: + +``` +<!-- the service we want to make cacheable --> +<bean id="bookService" class="x.y.service.DefaultBookService"/> + +<!-- cache definitions --> +<cache:advice id="cacheAdvice" cache-manager="cacheManager"> + <cache:caching cache="books"> + <cache:cacheable method="findBook" key="#isbn"/> + <cache:cache-evict method="loadBooks" all-entries="true"/> + </cache:caching> +</cache:advice> + +<!-- apply the cacheable behavior to all BookService interfaces --> +<aop:config> + <aop:advisor advice-ref="cacheAdvice" pointcut="execution(* x.y.BookService.*(..))"/> +</aop:config> + +<!-- cache manager definition omitted --> +``` + +In the preceding configuration, the `bookService` is made cacheable. The caching semantics +to apply are encapsulated in the `cache:advice` definition, which causes the `findBooks`method to be used for putting data into the cache and the `loadBooks` method for evicting +data. Both definitions work against the `books` cache. + +The `aop:config` definition applies the cache advice to the appropriate points in the +program by using the AspectJ pointcut expression (more information is available in[Aspect Oriented Programming with Spring](core.html#aop)). In the preceding example, +all methods from the `BookService` are considered and the cache advice is applied to them. + +The declarative XML caching supports all of the annotation-based model, so moving between +the two should be fairly easy. Furthermore, both can be used inside the same application. +The XML-based approach does not touch the target code. However, it is inherently more +verbose. When dealing with classes that have overloaded methods that are targeted for +caching, identifying the proper methods does take an extra effort, since the `method`argument is not a good discriminator. In these cases, you can use the AspectJ pointcut +to cherry pick the target methods and apply the appropriate caching functionality. +However, through XML, it is easier to apply package or group or interface-wide caching +(again, due to the AspectJ pointcut) and to create template-like definitions (as we did +in the preceding example by defining the target cache through the `cache:definitions``cache` attribute). + +### 8.5. Configuring the Cache Storage + +The cache abstraction provides several storage integration options. To use them, you need +to declare an appropriate `CacheManager` (an entity that controls and manages `Cache`instances and that can be used to retrieve these for storage). + +#### 8.5.1. JDK `ConcurrentMap`-based Cache + +The JDK-based `Cache` implementation resides under`org.springframework.cache.concurrent` package. It lets you use `ConcurrentHashMap`as a backing `Cache` store. The following example shows how to configure two caches: + +``` +<!-- simple cache manager --> +<bean id="cacheManager" class="org.springframework.cache.support.SimpleCacheManager"> + <property name="caches"> + <set> + <bean class="org.springframework.cache.concurrent.ConcurrentMapCacheFactoryBean" p:name="default"/> + <bean class="org.springframework.cache.concurrent.ConcurrentMapCacheFactoryBean" p:name="books"/> + </set> + </property> +</bean> +``` + +The preceding snippet uses the `SimpleCacheManager` to create a `CacheManager` for the +two nested `ConcurrentMapCache` instances named `default` and `books`. Note that the +names are configured directly for each cache. + +As the cache is created by the application, it is bound to its lifecycle, making it +suitable for basic use cases, tests, or simple applications. The cache scales well +and is very fast, but it does not provide any management, persistence capabilities, +or eviction contracts. + +#### 8.5.2. Ehcache-based Cache + +| |Ehcache 3.x is fully JSR-107 compliant and no dedicated support is required for it.| +|---|-----------------------------------------------------------------------------------| + +The Ehcache 2.x implementation is located in the `org.springframework.cache.ehcache`package. Again, to use it, you need to declare the appropriate `CacheManager`. +The following example shows how to do so: + +``` +<bean id="cacheManager" + class="org.springframework.cache.ehcache.EhCacheCacheManager" p:cache-manager-ref="ehcache"/> + +<!-- EhCache library setup --> +<bean id="ehcache" + class="org.springframework.cache.ehcache.EhCacheManagerFactoryBean" p:config-location="ehcache.xml"/> +``` + +This setup bootstraps the ehcache library inside the Spring IoC (through the `ehcache`bean), which is then wired into the dedicated `CacheManager` implementation. Note that +the entire Ehcache-specific configuration is read from `ehcache.xml`. + +#### 8.5.3. Caffeine Cache + +Caffeine is a Java 8 rewrite of Guava’s cache, and its implementation is located in the`org.springframework.cache.caffeine` package and provides access to several features +of Caffeine. + +The following example configures a `CacheManager` that creates the cache on demand: + +``` +<bean id="cacheManager" + class="org.springframework.cache.caffeine.CaffeineCacheManager"/> +``` + +You can also provide the caches to use explicitly. In that case, only those +are made available by the manager. The following example shows how to do so: + +``` +<bean id="cacheManager" class="org.springframework.cache.caffeine.CaffeineCacheManager"> + <property name="cacheNames"> + <set> + <value>default</value> + <value>books</value> + </set> + </property> +</bean> +``` + +The Caffeine `CacheManager` also supports custom `Caffeine` and `CacheLoader`. +See the [Caffeine documentation](https://github.com/ben-manes/caffeine/wiki)for more information about those. + +#### 8.5.4. GemFire-based Cache + +GemFire is a memory-oriented, disk-backed, elastically scalable, continuously available, +active (with built-in pattern-based subscription notifications), globally replicated +database and provides fully-featured edge caching. For further information on how to +use GemFire as a `CacheManager` (and more), see the[Spring Data GemFire reference documentation](https://docs.spring.io/spring-gemfire/docs/current/reference/html/). + +#### 8.5.5. JSR-107 Cache + +Spring’s caching abstraction can also use JSR-107-compliant caches. The JCache +implementation is located in the `org.springframework.cache.jcache` package. + +Again, to use it, you need to declare the appropriate `CacheManager`. +The following example shows how to do so: + +``` +<bean id="cacheManager" + class="org.springframework.cache.jcache.JCacheCacheManager" + p:cache-manager-ref="jCacheManager"/> + +<!-- JSR-107 cache manager setup --> +<bean id="jCacheManager" .../> +``` + +#### 8.5.6. Dealing with Caches without a Backing Store + +Sometimes, when switching environments or doing testing, you might have cache +declarations without having an actual backing cache configured. As this is an invalid +configuration, an exception is thrown at runtime, since the caching infrastructure +is unable to find a suitable store. In situations like this, rather than removing the +cache declarations (which can prove tedious), you can wire in a simple dummy cache that +performs no caching — that is, it forces the cached methods to be invoked every time. +The following example shows how to do so: + +``` +<bean id="cacheManager" class="org.springframework.cache.support.CompositeCacheManager"> + <property name="cacheManagers"> + <list> + <ref bean="jdkCache"/> + <ref bean="gemfireCache"/> + </list> + </property> + <property name="fallbackToNoOpCache" value="true"/> +</bean> +``` + +The `CompositeCacheManager` in the preceding chains multiple `CacheManager` instances and, +through the `fallbackToNoOpCache` flag, adds a no-op cache for all the definitions not +handled by the configured cache managers. That is, every cache definition not found in +either `jdkCache` or `gemfireCache` (configured earlier in the example) is handled by +the no-op cache, which does not store any information, causing the target method to be +invoked every time. + +### 8.6. Plugging-in Different Back-end Caches + +Clearly, there are plenty of caching products out there that you can use as a backing +store. For those that do not support JSR-107 you need to provide a `CacheManager` and a`Cache` implementation. This may sound harder than it is, since, in practice, the classes +tend to be simple [adapters](https://en.wikipedia.org/wiki/Adapter_pattern) that map the +caching abstraction framework on top of the storage API, as the `ehcache` classes do. +Most `CacheManager` classes can use the classes in the`org.springframework.cache.support` package (such as `AbstractCacheManager` which takes +care of the boiler-plate code, leaving only the actual mapping to be completed). + +### 8.7. How can I Set the TTL/TTI/Eviction policy/XXX feature? + +Directly through your cache provider. The cache abstraction is an abstraction, +not a cache implementation. The solution you use might support various data +policies and different topologies that other solutions do not support (for example, +the JDK `ConcurrentHashMap` — exposing that in the cache abstraction would be useless +because there would no backing support). Such functionality should be controlled +directly through the backing cache (when configuring it) or through its native API. + +## 9. Appendix + +### 9.1. XML Schemas + +This part of the appendix lists XML schemas related to integration technologies. + +#### 9.1.1. The `jee` Schema + +The `jee` elements deal with issues related to Java EE (Java Enterprise Edition) configuration, +such as looking up a JNDI object and defining EJB references. + +To use the elements in the `jee` schema, you need to have the following preamble at the top +of your Spring XML configuration file. The text in the following snippet references the +correct schema so that the elements in the `jee` namespace are available to you: + +``` +<?xml version="1.0" encoding="UTF-8"?> +<beans xmlns="http://www.springframework.org/schema/beans" + xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" + xmlns:jee="http://www.springframework.org/schema/jee" + xsi:schemaLocation=" + http://www.springframework.org/schema/beans https://www.springframework.org/schema/beans/spring-beans.xsd + http://www.springframework.org/schema/jee https://www.springframework.org/schema/jee/spring-jee.xsd"> + + <!-- bean definitions here --> + +</beans> +``` + +##### \<jee:jndi-lookup/\> (simple) + +The following example shows how to use JNDI to look up a data source without the `jee` schema: + +``` +<bean id="dataSource" class="org.springframework.jndi.JndiObjectFactoryBean"> + <property name="jndiName" value="jdbc/MyDataSource"/> +</bean> +<bean id="userDao" class="com.foo.JdbcUserDao"> + <!-- Spring will do the cast automatically (as usual) --> + <property name="dataSource" ref="dataSource"/> +</bean> +``` + +The following example shows how to use JNDI to look up a data source with the `jee`schema: + +``` +<jee:jndi-lookup id="dataSource" jndi-name="jdbc/MyDataSource"/> + +<bean id="userDao" class="com.foo.JdbcUserDao"> + <!-- Spring will do the cast automatically (as usual) --> + <property name="dataSource" ref="dataSource"/> +</bean> +``` + +##### `<jee:jndi-lookup/>` (with Single JNDI Environment Setting) + +The following example shows how to use JNDI to look up an environment variable without`jee`: + +``` +<bean id="simple" class="org.springframework.jndi.JndiObjectFactoryBean"> + <property name="jndiName" value="jdbc/MyDataSource"/> + <property name="jndiEnvironment"> + <props> + <prop key="ping">pong</prop> + </props> + </property> +</bean> +``` + +The following example shows how to use JNDI to look up an environment variable with `jee`: + +``` +<jee:jndi-lookup id="simple" jndi-name="jdbc/MyDataSource"> + <jee:environment>ping=pong</jee:environment> +</jee:jndi-lookup> +``` + +##### `<jee:jndi-lookup/>` (with Multiple JNDI Environment Settings) + +The following example shows how to use JNDI to look up multiple environment variables +without `jee`: + +``` +<bean id="simple" class="org.springframework.jndi.JndiObjectFactoryBean"> + <property name="jndiName" value="jdbc/MyDataSource"/> + <property name="jndiEnvironment"> + <props> + <prop key="sing">song</prop> + <prop key="ping">pong</prop> + </props> + </property> +</bean> +``` + +The following example shows how to use JNDI to look up multiple environment variables with`jee`: + +``` +<jee:jndi-lookup id="simple" jndi-name="jdbc/MyDataSource"> + <!-- newline-separated, key-value pairs for the environment (standard Properties format) --> + <jee:environment> + sing=song + ping=pong + </jee:environment> +</jee:jndi-lookup> +``` + +##### `<jee:jndi-lookup/>` (Complex) + +The following example shows how to use JNDI to look up a data source and a number of +different properties without `jee`: + +``` +<bean id="simple" class="org.springframework.jndi.JndiObjectFactoryBean"> + <property name="jndiName" value="jdbc/MyDataSource"/> + <property name="cache" value="true"/> + <property name="resourceRef" value="true"/> + <property name="lookupOnStartup" value="false"/> + <property name="expectedType" value="com.myapp.DefaultThing"/> + <property name="proxyInterface" value="com.myapp.Thing"/> +</bean> +``` + +The following example shows how to use JNDI to look up a data source and a number of +different properties with `jee`: + +``` +<jee:jndi-lookup id="simple" + jndi-name="jdbc/MyDataSource" + cache="true" + resource-ref="true" + lookup-on-startup="false" + expected-type="com.myapp.DefaultThing" + proxy-interface="com.myapp.Thing"/> +``` + +##### `<jee:local-slsb/>` (Simple) + +The `<jee:local-slsb/>` element configures a reference to a local EJB Stateless Session Bean. + +The following example shows how to configures a reference to a local EJB Stateless Session Bean +without `jee`: + +``` +<bean id="simple" + class="org.springframework.ejb.access.LocalStatelessSessionProxyFactoryBean"> + <property name="jndiName" value="ejb/RentalServiceBean"/> + <property name="businessInterface" value="com.foo.service.RentalService"/> +</bean> +``` + +The following example shows how to configures a reference to a local EJB Stateless Session Bean +with `jee`: + +``` +<jee:local-slsb id="simpleSlsb" jndi-name="ejb/RentalServiceBean" + business-interface="com.foo.service.RentalService"/> +``` + +##### `<jee:local-slsb/>` (Complex) + +The `<jee:local-slsb/>` element configures a reference to a local EJB Stateless Session Bean. + +The following example shows how to configures a reference to a local EJB Stateless Session Bean +and a number of properties without `jee`: + +``` +<bean id="complexLocalEjb" + class="org.springframework.ejb.access.LocalStatelessSessionProxyFactoryBean"> + <property name="jndiName" value="ejb/RentalServiceBean"/> + <property name="businessInterface" value="com.example.service.RentalService"/> + <property name="cacheHome" value="true"/> + <property name="lookupHomeOnStartup" value="true"/> + <property name="resourceRef" value="true"/> +</bean> +``` + +The following example shows how to configures a reference to a local EJB Stateless Session Bean +and a number of properties with `jee`: + +``` +<jee:local-slsb id="complexLocalEjb" + jndi-name="ejb/RentalServiceBean" + business-interface="com.foo.service.RentalService" + cache-home="true" + lookup-home-on-startup="true" + resource-ref="true"> +``` + +##### \<jee:remote-slsb/\> + +The `<jee:remote-slsb/>` element configures a reference to a `remote` EJB Stateless Session Bean. + +The following example shows how to configures a reference to a remote EJB Stateless Session Bean +without `jee`: + +``` +<bean id="complexRemoteEjb" + class="org.springframework.ejb.access.SimpleRemoteStatelessSessionProxyFactoryBean"> + <property name="jndiName" value="ejb/MyRemoteBean"/> + <property name="businessInterface" value="com.foo.service.RentalService"/> + <property name="cacheHome" value="true"/> + <property name="lookupHomeOnStartup" value="true"/> + <property name="resourceRef" value="true"/> + <property name="homeInterface" value="com.foo.service.RentalService"/> + <property name="refreshHomeOnConnectFailure" value="true"/> +</bean> +``` + +The following example shows how to configures a reference to a remote EJB Stateless Session Bean +with `jee`: + +``` +<jee:remote-slsb id="complexRemoteEjb" + jndi-name="ejb/MyRemoteBean" + business-interface="com.foo.service.RentalService" + cache-home="true" + lookup-home-on-startup="true" + resource-ref="true" + home-interface="com.foo.service.RentalService" + refresh-home-on-connect-failure="true"> +``` + +#### 9.1.2. The `jms` Schema + +The `jms` elements deal with configuring JMS-related beans, such as Spring’s[Message Listener Containers](#jms-mdp). These elements are detailed in the +section of the [JMS chapter](#jms) entitled [JMS Namespace Support](#jms-namespace). See that chapter for full details on this support +and the `jms` elements themselves. + +In the interest of completeness, to use the elements in the `jms` schema, you need to have +the following preamble at the top of your Spring XML configuration file. The text in the +following snippet references the correct schema so that the elements in the `jms` namespace +are available to you: + +``` +<?xml version="1.0" encoding="UTF-8"?> +<beans xmlns="http://www.springframework.org/schema/beans" + xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" + xmlns:jms="http://www.springframework.org/schema/jms" + xsi:schemaLocation=" + http://www.springframework.org/schema/beans https://www.springframework.org/schema/beans/spring-beans.xsd + http://www.springframework.org/schema/jms https://www.springframework.org/schema/jms/spring-jms.xsd"> + + <!-- bean definitions here --> + +</beans> +``` + +#### 9.1.3. Using `<context:mbean-export/>` + +This element is detailed in[Configuring Annotation-based MBean Export](#jmx-context-mbeanexport). + +#### 9.1.4. The `cache` Schema + +You can use the `cache` elements to enable support for Spring’s `@CacheEvict`, `@CachePut`, +and `@Caching` annotations. It it also supports declarative XML-based caching. See[Enabling Caching Annotations](#cache-annotation-enable) and[Declarative XML-based Caching](#cache-declarative-xml) for details. + +To use the elements in the `cache` schema, you need to have the following preamble at the +top of your Spring XML configuration file. The text in the following snippet references +the correct schema so that the elements in the `cache` namespace are available to you: + +``` +<?xml version="1.0" encoding="UTF-8"?> +<beans xmlns="http://www.springframework.org/schema/beans" + xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" + xmlns:cache="http://www.springframework.org/schema/cache" + xsi:schemaLocation=" + http://www.springframework.org/schema/beans https://www.springframework.org/schema/beans/spring-beans.xsd + http://www.springframework.org/schema/cache https://www.springframework.org/schema/cache/spring-cache.xsd"> + + <!-- bean definitions here --> + +</beans> +``` diff --git a/docs/en/spring-framework/languages.md b/docs/en/spring-framework/languages.md new file mode 100644 index 0000000000000000000000000000000000000000..b26bda70d649436deca91d2dec93c9a6f4e421fc --- /dev/null +++ b/docs/en/spring-framework/languages.md @@ -0,0 +1,1722 @@ +# Language Support + +## 1. Kotlin + +[Kotlin](https://kotlinlang.org) is a statically typed language that targets the JVM +(and other platforms) which allows writing concise and elegant code while providing +very good [interoperability](https://kotlinlang.org/docs/reference/java-interop.html)with existing libraries written in Java. + +The Spring Framework provides first-class support for Kotlin and lets developers write +Kotlin applications almost as if the Spring Framework was a native Kotlin framework. +Most of the code samples of the reference documentation are +provided in Kotlin in addition to Java. + +The easiest way to build a Spring application with Kotlin is to leverage Spring Boot and +its [dedicated Kotlin support](https://docs.spring.io/spring-boot/docs/current/reference/html/boot-features-kotlin.html).[This comprehensive tutorial](https://spring.io/guides/tutorials/spring-boot-kotlin/)will teach you how to build Spring Boot applications with Kotlin using [start.spring.io](https://start.spring.io/#!language=kotlin&type=gradle-project). + +Feel free to join the #spring channel of [Kotlin Slack](https://slack.kotlinlang.org/)or ask a question with `spring` and `kotlin` as tags on[Stackoverflow](https://stackoverflow.com/questions/tagged/spring+kotlin) if you need support. + +### 1.1. Requirements + +Spring Framework supports Kotlin 1.3+ and requires[`kotlin-stdlib`](https://search.maven.org/artifact/org.jetbrains.kotlin/kotlin-stdlib)(or one of its variants, such as [`kotlin-stdlib-jdk8`](https://search.maven.org/artifact/org.jetbrains.kotlin/kotlin-stdlib-jdk8)) +and [`kotlin-reflect`](https://search.maven.org/artifact/org.jetbrains.kotlin/kotlin-reflect)to be present on the classpath. They are provided by default if you bootstrap a Kotlin project on[start.spring.io](https://start.spring.io/#!language=kotlin&type=gradle-project). + +| |The [Jackson Kotlin module](https://github.com/FasterXML/jackson-module-kotlin) is required<br/>for serializing or deserializing JSON data for Kotlin classes with Jackson, so make sure to add the`com.fasterxml.jackson.module:jackson-module-kotlin` dependency to your project if you have such need.<br/>It is automatically registered when found in the classpath.| +|---|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +### 1.2. Extensions + +Kotlin [extensions](https://kotlinlang.org/docs/reference/extensions.html) provide the ability +to extend existing classes with additional functionality. The Spring Framework Kotlin APIs +use these extensions to add new Kotlin-specific conveniences to existing Spring APIs. + +The [Spring Framework KDoc API](https://docs.spring.io/spring-framework/docs/5.3.16/kdoc-api/spring-framework/) lists +and documents all available Kotlin extensions and DSLs. + +| |Keep in mind that Kotlin extensions need to be imported to be used. This means,<br/>for example, that the `GenericApplicationContext.registerBean` Kotlin extension<br/>is available only if `org.springframework.context.support.registerBean` is imported.<br/>That said, similar to static imports, an IDE should automatically suggest the import in most cases.| +|---|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +For example, [Kotlin reified type parameters](https://kotlinlang.org/docs/reference/inline-functions.html#reified-type-parameters)provide a workaround for JVM [generics type erasure](https://docs.oracle.com/javase/tutorial/java/generics/erasure.html), +and the Spring Framework provides some extensions to take advantage of this feature. +This allows for a better Kotlin API `RestTemplate`, for the new `WebClient` from Spring +WebFlux, and for various other APIs. + +| |Other libraries, such as Reactor and Spring Data, also provide Kotlin extensions<br/>for their APIs, thus giving a better Kotlin development experience overall.| +|---|----------------------------------------------------------------------------------------------------------------------------------------------------------------| + +To retrieve a list of `User` objects in Java, you would normally write the following: + +``` +Flux<User> users = client.get().retrieve().bodyToFlux(User.class) +``` + +With Kotlin and the Spring Framework extensions, you can instead write the following: + +``` +val users = client.get().retrieve().bodyToFlux<User>() +// or (both are equivalent) +val users : Flux<User> = client.get().retrieve().bodyToFlux() +``` + +As in Java, `users` in Kotlin is strongly typed, but Kotlin’s clever type inference allows +for shorter syntax. + +### 1.3. Null-safety + +One of Kotlin’s key features is [null-safety](https://kotlinlang.org/docs/reference/null-safety.html), +which cleanly deals with `null` values at compile time rather than bumping into the famous`NullPointerException` at runtime. This makes applications safer through nullability +declarations and expressing “value or no value” semantics without paying the cost of wrappers, such as `Optional`. +(Kotlin allows using functional constructs with nullable values. See this[comprehensive guide to Kotlin null-safety](https://www.baeldung.com/kotlin-null-safety).) + +Although Java does not let you express null-safety in its type-system, the Spring Framework +provides [null-safety of the whole Spring Framework API](core.html#null-safety)via tooling-friendly annotations declared in the `org.springframework.lang` package. +By default, types from Java APIs used in Kotlin are recognized as[platform types](https://kotlinlang.org/docs/reference/java-interop.html#null-safety-and-platform-types), +for which null-checks are relaxed.[Kotlin support for JSR-305 annotations](https://kotlinlang.org/docs/reference/java-interop.html#jsr-305-support)and Spring nullability annotations provide null-safety for the whole Spring Framework API to Kotlin developers, +with the advantage of dealing with `null`-related issues at compile time. + +| |Libraries such as Reactor or Spring Data provide null-safe APIs to leverage this feature.| +|---|-----------------------------------------------------------------------------------------| + +You can configure JSR-305 checks by adding the `-Xjsr305` compiler flag with the following +options: `-Xjsr305={strict|warn|ignore}`. + +For kotlin versions 1.1+, the default behavior is the same as `-Xjsr305=warn`. +The `strict` value is required to have Spring Framework API null-safety taken into account +in Kotlin types inferred from Spring API but should be used with the knowledge that Spring +API nullability declaration could evolve even between minor releases and that more checks may +be added in the future. + +| |Generic type arguments, varargs, and array elements nullability are not supported yet,<br/>but should be in an upcoming release. See [this discussion](https://github.com/Kotlin/KEEP/issues/79)for up-to-date information.| +|---|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +### 1.4. Classes and Interfaces + +The Spring Framework supports various Kotlin constructs, such as instantiating Kotlin classes +through primary constructors, immutable classes data binding, and function optional parameters +with default values. + +Kotlin parameter names are recognized through a dedicated `KotlinReflectionParameterNameDiscoverer`, +which allows finding interface method parameter names without requiring the Java 8 `-parameters`compiler flag to be enabled during compilation. + +You can declare configuration classes as[top level or nested but not inner](https://kotlinlang.org/docs/reference/nested-classes.html), +since the later requires a reference to the outer class. + +### 1.5. Annotations + +The Spring Framework also takes advantage of [Kotlin null-safety](https://kotlinlang.org/docs/reference/null-safety.html)to determine if an HTTP parameter is required without having to explicitly +define the `required` attribute. That means `@RequestParam name: String?` is treated +as not required and, conversely, `@RequestParam name: String` is treated as being required. +This feature is also supported on the Spring Messaging `@Header` annotation. + +In a similar fashion, Spring bean injection with `@Autowired`, `@Bean`, or `@Inject` uses +this information to determine if a bean is required or not. + +For example, `@Autowired lateinit var thing: Thing` implies that a bean +of type `Thing` must be registered in the application context, while `@Autowired lateinit var thing: Thing?`does not raise an error if such a bean does not exist. + +Following the same principle, `@Bean fun play(toy: Toy, car: Car?) = Baz(toy, Car)` implies +that a bean of type `Toy` must be registered in the application context, while a bean of +type `Car` may or may not exist. The same behavior applies to autowired constructor parameters. + +| |If you use bean validation on classes with properties or a primary constructor<br/>parameters, you may need to use[annotation use-site targets](https://kotlinlang.org/docs/reference/annotations.html#annotation-use-site-targets),<br/>such as `@field:NotNull` or `@get:Size(min=5, max=15)`, as described in[this Stack Overflow response](https://stackoverflow.com/a/35853200/1092077).| +|---|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +### 1.6. Bean Definition DSL + +Spring Framework supports registering beans in a functional way by using lambdas +as an alternative to XML or Java configuration (`@Configuration` and `@Bean`). In a nutshell, +it lets you register beans with a lambda that acts as a `FactoryBean`. +This mechanism is very efficient, as it does not require any reflection or CGLIB proxies. + +In Java, you can, for example, write the following: + +``` +class Foo {} + +class Bar { + private final Foo foo; + public Bar(Foo foo) { + this.foo = foo; + } +} + +GenericApplicationContext context = new GenericApplicationContext(); +context.registerBean(Foo.class); +context.registerBean(Bar.class, () -> new Bar(context.getBean(Foo.class))); +``` + +In Kotlin, with reified type parameters and `GenericApplicationContext` Kotlin extensions, +you can instead write the following: + +``` +class Foo + +class Bar(private val foo: Foo) + +val context = GenericApplicationContext().apply { + registerBean<Foo>() + registerBean { Bar(it.getBean()) } +} +``` + +When the class `Bar` has a single constructor, you can even just specify the bean class, +the constructor parameters will be autowired by type: + +``` +val context = GenericApplicationContext().apply { + registerBean<Foo>() + registerBean<Bar>() +} +``` + +In order to allow a more declarative approach and cleaner syntax, Spring Framework provides +a [Kotlin bean definition DSL](https://docs.spring.io/spring-framework/docs/5.3.16/kdoc-api/spring-framework/org.springframework.context.support/-bean-definition-dsl/)It declares an `ApplicationContextInitializer` through a clean declarative API, +which lets you deal with profiles and `Environment` for customizing +how beans are registered. + +In the following example notice that: + +* Type inference usually allows to avoid specifying the type for bean references like `ref("bazBean")` + +* It is possible to use Kotlin top level functions to declare beans using callable references like `bean(::myRouter)` in this example + +* When specifying `bean<Bar>()` or `bean(::myRouter)`, parameters are autowired by type + +* The `FooBar` bean will be registered only if the `foobar` profile is active + +``` +class Foo +class Bar(private val foo: Foo) +class Baz(var message: String = "") +class FooBar(private val baz: Baz) + +val myBeans = beans { + bean<Foo>() + bean<Bar>() + bean("bazBean") { + Baz().apply { + message = "Hello world" + } + } + profile("foobar") { + bean { FooBar(ref("bazBean")) } + } + bean(::myRouter) +} + +fun myRouter(foo: Foo, bar: Bar, baz: Baz) = router { + // ... +} +``` + +| |This DSL is programmatic, meaning it allows custom registration logic of beans<br/>through an `if` expression, a `for` loop, or any other Kotlin constructs.| +|---|------------------------------------------------------------------------------------------------------------------------------------------------------------| + +You can then use this `beans()` function to register beans on the application context, +as the following example shows: + +``` +val context = GenericApplicationContext().apply { + myBeans.initialize(this) + refresh() +} +``` + +| |Spring Boot is based on JavaConfig and[does not yet provide specific support for functional bean definition](https://github.com/spring-projects/spring-boot/issues/8115),<br/>but you can experimentally use functional bean definitions through Spring Boot’s `ApplicationContextInitializer` support.<br/>See [this Stack Overflow answer](https://stackoverflow.com/questions/45935931/how-to-use-functional-bean-definition-kotlin-dsl-with-spring-boot-and-spring-w/46033685#46033685)for more details and up-to-date information. See also the experimental Kofu DSL developed in [Spring Fu incubator](https://github.com/spring-projects/spring-fu).| +|---|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +### 1.7. Web + +#### 1.7.1. Router DSL + +Spring Framework comes with a Kotlin router DSL available in 3 flavors: + +* WebMvc.fn DSL with [router { }](https://docs.spring.io/spring-framework/docs/5.3.16/kdoc-api/spring-framework/org.springframework.web.servlet.function/router.html) + +* WebFlux.fn [Reactive](web-reactive.html#webflux-fn) DSL with [router { }](https://docs.spring.io/spring-framework/docs/5.3.16/kdoc-api/spring-framework/org.springframework.web.reactive.function.server/router.html) + +* WebFlux.fn [Coroutines](#coroutines) DSL with [coRouter { }](https://docs.spring.io/spring-framework/docs/5.3.16/kdoc-api/spring-framework/org.springframework.web.reactive.function.server/co-router.html) + +These DSL let you write clean and idiomatic Kotlin code to build a `RouterFunction` instance as the following example shows: + +``` +@Configuration +class RouterRouterConfiguration { + + @Bean + fun mainRouter(userHandler: UserHandler) = router { + accept(TEXT_HTML).nest { + GET("/") { ok().render("index") } + GET("/sse") { ok().render("sse") } + GET("/users", userHandler::findAllView) + } + "/api".nest { + accept(APPLICATION_JSON).nest { + GET("/users", userHandler::findAll) + } + accept(TEXT_EVENT_STREAM).nest { + GET("/users", userHandler::stream) + } + } + resources("/**", ClassPathResource("static/")) + } +} +``` + +| |This DSL is programmatic, meaning that it allows custom registration logic of beans<br/>through an `if` expression, a `for` loop, or any other Kotlin constructs. That can be useful<br/>when you need to register routes depending on dynamic data (for example, from a database).| +|---|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +See [MiXiT project](https://github.com/mixitconf/mixit/) for a concrete example. + +#### 1.7.2. MockMvc DSL + +A Kotlin DSL is provided via `MockMvc` Kotlin extensions in order to provide a more +idiomatic Kotlin API and to allow better discoverability (no usage of static methods). + +``` +val mockMvc: MockMvc = ... +mockMvc.get("/person/{name}", "Lee") { + secure = true + accept = APPLICATION_JSON + headers { + contentLanguage = Locale.FRANCE + } + principal = Principal { "foo" } +}.andExpect { + status { isOk } + content { contentType(APPLICATION_JSON) } + jsonPath("$.name") { value("Lee") } + content { json("""{"someBoolean": false}""", false) } +}.andDo { + print() +} +``` + +#### 1.7.3. Kotlin Script Templates + +Spring Framework provides a[`ScriptTemplateView`](https://docs.spring.io/spring-framework/docs/current/javadoc-api/org/springframework/web/servlet/view/script/ScriptTemplateView.html)which supports [JSR-223](https://www.jcp.org/en/jsr/detail?id=223) to render templates by using script engines. + +By leveraging `scripting-jsr223` dependencies, it +is possible to use such feature to render Kotlin-based templates with[kotlinx.html](https://github.com/Kotlin/kotlinx.html) DSL or Kotlin multiline interpolated `String`. + +`build.gradle.kts` + +``` +dependencies { + runtime("org.jetbrains.kotlin:kotlin-scripting-jsr223:${kotlinVersion}") +} +``` + +Configuration is usually done with `ScriptTemplateConfigurer` and `ScriptTemplateViewResolver`beans. + +`KotlinScriptConfiguration.kt` + +``` +@Configuration +class KotlinScriptConfiguration { + + @Bean + fun kotlinScriptConfigurer() = ScriptTemplateConfigurer().apply { + engineName = "kotlin" + setScripts("scripts/render.kts") + renderFunction = "render" + isSharedEngine = false + } + + @Bean + fun kotlinScriptViewResolver() = ScriptTemplateViewResolver().apply { + setPrefix("templates/") + setSuffix(".kts") + } +} +``` + +See the [kotlin-script-templating](https://github.com/sdeleuze/kotlin-script-templating) example +project for more details. + +#### 1.7.4. Kotlin multiplatform serialization + +As of Spring Framework 5.3, [Kotlin multiplatform serialization](https://github.com/Kotlin/kotlinx.serialization) is +supported in Spring MVC, Spring WebFlux and Spring Messaging (RSocket). The builtin support currently only targets JSON format. + +To enable it, follow [those instructions](https://github.com/Kotlin/kotlinx.serialization#setup) to add the related dependency and plugin. +With Spring MVC and WebFlux, both Kotlin serialization and Jackson will be configured by default if they are in the classpath since +Kotlin serialization is designed to serialize only Kotlin classes annotated with `@Serializable`. +With Spring Messaging (RSocket), make sure that neither Jackson, GSON or JSONB are in the classpath if you want automatic configuration, +if Jackson is needed configure `KotlinSerializationJsonMessageConverter` manually. + +### 1.8. Coroutines + +Kotlin [Coroutines](https://kotlinlang.org/docs/reference/coroutines-overview.html) are Kotlin +lightweight threads allowing to write non-blocking code in an imperative way. On language side, +suspending functions provides an abstraction for asynchronous operations while on library side[kotlinx.coroutines](https://github.com/Kotlin/kotlinx.coroutines) provides functions like[`async { }`](https://kotlin.github.io/kotlinx.coroutines/kotlinx-coroutines-core/kotlinx.coroutines/async.html)and types like [`Flow`](https://kotlin.github.io/kotlinx.coroutines/kotlinx-coroutines-core/kotlinx.coroutines.flow/-flow/index.html). + +Spring Framework provides support for Coroutines on the following scope: + +* [Deferred](https://kotlin.github.io/kotlinx.coroutines/kotlinx-coroutines-core/kotlinx.coroutines/-deferred/index.html) and [Flow](https://kotlin.github.io/kotlinx.coroutines/kotlinx-coroutines-core/kotlinx.coroutines.flow/-flow/index.html) return values support in Spring MVC and WebFlux annotated `@Controller` + +* Suspending function support in Spring MVC and WebFlux annotated `@Controller` + +* Extensions for WebFlux [client](https://docs.spring.io/spring-framework/docs/5.3.16/kdoc-api/spring-framework/org.springframework.web.reactive.function.client/index.html) and [server](https://docs.spring.io/spring-framework/docs/5.3.16/kdoc-api/spring-framework/org.springframework.web.reactive.function.server/index.html) functional API. + +* WebFlux.fn [coRouter { }](https://docs.spring.io/spring-framework/docs/5.3.16/kdoc-api/spring-framework/org.springframework.web.reactive.function.server/co-router.html) DSL + +* Suspending function and `Flow` support in RSocket `@MessageMapping` annotated methods + +* Extensions for [`RSocketRequester`](https://docs.spring.io/spring-framework/docs/5.3.16/kdoc-api/spring-framework/org.springframework.messaging.rsocket/index.html) + +#### 1.8.1. Dependencies + +Coroutines support is enabled when `kotlinx-coroutines-core` and `kotlinx-coroutines-reactor`dependencies are in the classpath: + +`build.gradle.kts` + +``` +dependencies { + + implementation("org.jetbrains.kotlinx:kotlinx-coroutines-core:${coroutinesVersion}") + implementation("org.jetbrains.kotlinx:kotlinx-coroutines-reactor:${coroutinesVersion}") +} +``` + +Version `1.4.0` and above are supported. + +#### 1.8.2. How Reactive translates to Coroutines? + +For return values, the translation from Reactive to Coroutines APIs is the following: + +* `fun handler(): Mono<Void>` becomes `suspend fun handler()` + +* `fun handler(): Mono<T>` becomes `suspend fun handler(): T` or `suspend fun handler(): T?` depending on if the `Mono` can be empty or not (with the advantage of being more statically typed) + +* `fun handler(): Flux<T>` becomes `fun handler(): Flow<T>` + +For input parameters: + +* If laziness is not needed, `fun handler(mono: Mono<T>)` becomes `fun handler(value: T)` since a suspending functions can be invoked to get the value parameter. + +* If laziness is needed, `fun handler(mono: Mono<T>)` becomes `fun handler(supplier: suspend () → T)` or `fun handler(supplier: suspend () → T?)` + +[`Flow`](https://kotlin.github.io/kotlinx.coroutines/kotlinx-coroutines-core/kotlinx.coroutines.flow/-flow/index.html) is `Flux` equivalent in Coroutines world, suitable for hot or cold stream, finite or infinite streams, with the following main differences: + +* `Flow` is push-based while `Flux` is push-pull hybrid + +* Backpressure is implemented via suspending functions + +* `Flow` has only a [single suspending `collect` method](https://kotlin.github.io/kotlinx.coroutines/kotlinx-coroutines-core/kotlinx.coroutines.flow/-flow/collect.html) and operators are implemented as [extensions](https://kotlinlang.org/docs/reference/extensions.html) + +* [Operators are easy to implement](https://github.com/Kotlin/kotlinx.coroutines/tree/master/kotlinx-coroutines-core/common/src/flow/operators) thanks to Coroutines + +* Extensions allow to add custom operators to `Flow` + +* Collect operations are suspending functions + +* [`map` operator](https://kotlin.github.io/kotlinx.coroutines/kotlinx-coroutines-core/kotlinx.coroutines.flow/map.html) supports asynchronous operation (no need for `flatMap`) since it takes a suspending function parameter + +Read this blog post about [Going Reactive with Spring, Coroutines and Kotlin Flow](https://spring.io/blog/2019/04/12/going-reactive-with-spring-coroutines-and-kotlin-flow)for more details, including how to run code concurrently with Coroutines. + +#### 1.8.3. Controllers + +Here is an example of a Coroutines `@RestController`. + +``` +@RestController +class CoroutinesRestController(client: WebClient, banner: Banner) { + + @GetMapping("/suspend") + suspend fun suspendingEndpoint(): Banner { + delay(10) + return banner + } + + @GetMapping("/flow") + fun flowEndpoint() = flow { + delay(10) + emit(banner) + delay(10) + emit(banner) + } + + @GetMapping("/deferred") + fun deferredEndpoint() = GlobalScope.async { + delay(10) + banner + } + + @GetMapping("/sequential") + suspend fun sequential(): List<Banner> { + val banner1 = client + .get() + .uri("/suspend") + .accept(MediaType.APPLICATION_JSON) + .awaitExchange() + .awaitBody<Banner>() + val banner2 = client + .get() + .uri("/suspend") + .accept(MediaType.APPLICATION_JSON) + .awaitExchange() + .awaitBody<Banner>() + return listOf(banner1, banner2) + } + + @GetMapping("/parallel") + suspend fun parallel(): List<Banner> = coroutineScope { + val deferredBanner1: Deferred<Banner> = async { + client + .get() + .uri("/suspend") + .accept(MediaType.APPLICATION_JSON) + .awaitExchange() + .awaitBody<Banner>() + } + val deferredBanner2: Deferred<Banner> = async { + client + .get() + .uri("/suspend") + .accept(MediaType.APPLICATION_JSON) + .awaitExchange() + .awaitBody<Banner>() + } + listOf(deferredBanner1.await(), deferredBanner2.await()) + } + + @GetMapping("/error") + suspend fun error() { + throw IllegalStateException() + } + + @GetMapping("/cancel") + suspend fun cancel() { + throw CancellationException() + } + +} +``` + +View rendering with a `@Controller` is also supported. + +``` +@Controller +class CoroutinesViewController(banner: Banner) { + + @GetMapping("/") + suspend fun render(model: Model): String { + delay(10) + model["banner"] = banner + return "index" + } +} +``` + +#### 1.8.4. WebFlux.fn + +Here is an example of Coroutines router defined via the [coRouter { }](https://docs.spring.io/spring-framework/docs/5.3.16/kdoc-api/spring-framework/org.springframework.web.reactive.function.server/co-router.html) DSL and related handlers. + +``` +@Configuration +class RouterConfiguration { + + @Bean + fun mainRouter(userHandler: UserHandler) = coRouter { + GET("/", userHandler::listView) + GET("/api/user", userHandler::listApi) + } +} +``` + +``` +class UserHandler(builder: WebClient.Builder) { + + private val client = builder.baseUrl("...").build() + + suspend fun listView(request: ServerRequest): ServerResponse = + ServerResponse.ok().renderAndAwait("users", mapOf("users" to + client.get().uri("...").awaitExchange().awaitBody<User>())) + + suspend fun listApi(request: ServerRequest): ServerResponse = + ServerResponse.ok().contentType(MediaType.APPLICATION_JSON).bodyAndAwait( + client.get().uri("...").awaitExchange().awaitBody<User>()) +} +``` + +#### 1.8.5. Transactions + +Transactions on Coroutines are supported via the programmatic variant of the Reactive +transaction management provided as of Spring Framework 5.2. + +For suspending functions, a `TransactionalOperator.executeAndAwait` extension is provided. + +``` +import org.springframework.transaction.reactive.executeAndAwait + +class PersonRepository(private val operator: TransactionalOperator) { + + suspend fun initDatabase() = operator.executeAndAwait { + insertPerson1() + insertPerson2() + } + + private suspend fun insertPerson1() { + // INSERT SQL statement + } + + private suspend fun insertPerson2() { + // INSERT SQL statement + } +} +``` + +For Kotlin `Flow`, a `Flow<T>.transactional` extension is provided. + +``` +import org.springframework.transaction.reactive.transactional + +class PersonRepository(private val operator: TransactionalOperator) { + + fun updatePeople() = findPeople().map(::updatePerson).transactional(operator) + + private fun findPeople(): Flow<Person> { + // SELECT SQL statement + } + + private suspend fun updatePerson(person: Person): Person { + // UPDATE SQL statement + } +} +``` + +### 1.9. Spring Projects in Kotlin + +This section provides some specific hints and recommendations worth for developing Spring projects +in Kotlin. + +#### 1.9.1. Final by Default + +By default, [all classes in Kotlin are `final`](https://discuss.kotlinlang.org/t/classes-final-by-default/166). +The `open` modifier on a class is the opposite of Java’s `final`: It allows others to inherit from this +class. This also applies to member functions, in that they need to be marked as `open` to be overridden. + +While Kotlin’s JVM-friendly design is generally frictionless with Spring, this specific Kotlin feature +can prevent the application from starting, if this fact is not taken into consideration. This is because +Spring beans (such as `@Configuration` annotated classes which by default need to be extended at runtime for technical +reasons) are normally proxied by CGLIB. The workaround is to add an `open` keyword on each class and +member function of Spring beans that are proxied by CGLIB, which can +quickly become painful and is against the Kotlin principle of keeping code concise and predictable. + +| |It is also possible to avoid CGLIB proxies for configuration classes by using `@Configuration(proxyBeanMethods = false)`.<br/>See [`proxyBeanMethods` Javadoc](https://docs.spring.io/spring-framework/docs/5.3.16/javadoc-api/org/springframework/context/annotation/Configuration.html#proxyBeanMethods--) for more details.| +|---|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +Fortunately, Kotlin provides a[`kotlin-spring`](https://kotlinlang.org/docs/reference/compiler-plugins.html#kotlin-spring-compiler-plugin)plugin (a preconfigured version of the `kotlin-allopen` plugin) that automatically opens classes +and their member functions for types that are annotated or meta-annotated with one of the following +annotations: + +* `@Component` + +* `@Async` + +* `@Transactional` + +* `@Cacheable` + +Meta-annotation support means that types annotated with `@Configuration`, `@Controller`,`@RestController`, `@Service`, or `@Repository` are automatically opened since these +annotations are meta-annotated with `@Component`. + +[start.spring.io](https://start.spring.io/#!language=kotlin&type=gradle-project) enables +the `kotlin-spring` plugin by default. So, in practice, you can write your Kotlin beans +without any additional `open` keyword, as in Java. + +| |The Kotlin code samples in Spring Framework documentation do not explicitly specify`open` on the classes and their member functions. The samples are written for projects<br/>using the `kotlin-allopen` plugin, since this is the most commonly used setup.| +|---|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +#### 1.9.2. Using Immutable Class Instances for Persistence + +In Kotlin, it is convenient and considered to be a best practice to declare read-only properties +within the primary constructor, as in the following example: + +``` +class Person(val name: String, val age: Int) +``` + +You can optionally add [the `data` keyword](https://kotlinlang.org/docs/reference/data-classes.html)to make the compiler automatically derive the following members from all properties declared +in the primary constructor: + +* `equals()` and `hashCode()` + +* `toString()` of the form `"User(name=John, age=42)"` + +* `componentN()` functions that correspond to the properties in their order of declaration + +* `copy()` function + +As the following example shows, this allows for easy changes to individual properties, even if `Person` properties are read-only: + +``` +data class Person(val name: String, val age: Int) + +val jack = Person(name = "Jack", age = 1) +val olderJack = jack.copy(age = 2) +``` + +Common persistence technologies (such as JPA) require a default constructor, preventing this +kind of design. Fortunately, there is a workaround for this[“default constructor hell”](https://stackoverflow.com/questions/32038177/kotlin-with-jpa-default-constructor-hell), +since Kotlin provides a [`kotlin-jpa`](https://kotlinlang.org/docs/reference/compiler-plugins.html#kotlin-jpa-compiler-plugin)plugin that generates synthetic no-arg constructor for classes annotated with JPA annotations. + +If you need to leverage this kind of mechanism for other persistence technologies, you can configure +the [`kotlin-noarg`](https://kotlinlang.org/docs/reference/compiler-plugins.html#how-to-use-no-arg-plugin)plugin. + +| |As of the Kay release train, Spring Data supports Kotlin immutable class instances and<br/>does not require the `kotlin-noarg` plugin if the module uses Spring Data object mappings<br/>(such as MongoDB, Redis, Cassandra, and others).| +|---|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +#### 1.9.3. Injecting Dependencies + +Our recommendation is to try to favor constructor injection with `val` read-only (and +non-nullable when possible) [properties](https://kotlinlang.org/docs/reference/properties.html), +as the following example shows: + +``` +@Component +class YourBean( + private val mongoTemplate: MongoTemplate, + private val solrClient: SolrClient +) +``` + +| |Classes with a single constructor have their parameters automatically autowired.<br/>That’s why there is no need for an explicit `@Autowired constructor` in the example shown<br/>above.| +|---|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +If you really need to use field injection, you can use the `lateinit var` construct, +as the following example shows: + +``` +@Component +class YourBean { + + @Autowired + lateinit var mongoTemplate: MongoTemplate + + @Autowired + lateinit var solrClient: SolrClient +} +``` + +#### 1.9.4. Injecting Configuration Properties + +In Java, you can inject configuration properties by using annotations (such as `@Value("${property}")`). +However, in Kotlin, `$` is a reserved character that is used for[string interpolation](https://kotlinlang.org/docs/reference/idioms.html#string-interpolation). + +Therefore, if you wish to use the `@Value` annotation in Kotlin, you need to escape the `$`character by writing `@Value("\${property}")`. + +| |If you use Spring Boot, you should probably use[`@ConfigurationProperties`](https://docs.spring.io/spring-boot/docs/current/reference/html/boot-features-external-config.html#boot-features-external-config-typesafe-configuration-properties)instead of `@Value` annotations.| +|---|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +As an alternative, you can customize the property placeholder prefix by declaring the +following configuration beans: + +``` +@Bean +fun propertyConfigurer() = PropertySourcesPlaceholderConfigurer().apply { + setPlaceholderPrefix("%{") +} +``` + +You can customize existing code (such as Spring Boot actuators or `@LocalServerPort`) +that uses the `${…​}` syntax, with configuration beans, as the following example shows: + +``` +@Bean +fun kotlinPropertyConfigurer() = PropertySourcesPlaceholderConfigurer().apply { + setPlaceholderPrefix("%{") + setIgnoreUnresolvablePlaceholders(true) +} + +@Bean +fun defaultPropertyConfigurer() = PropertySourcesPlaceholderConfigurer() +``` + +#### 1.9.5. Checked Exceptions + +Java and [Kotlin exception handling](https://kotlinlang.org/docs/reference/exceptions.html)are pretty close, with the main difference being that Kotlin treats all exceptions as +unchecked exceptions. However, when using proxied objects (for example classes or methods +annotated with `@Transactional`), checked exceptions thrown will be wrapped by default in +an `UndeclaredThrowableException`. + +To get the original exception thrown like in Java, methods should be annotated with[`@Throws`](https://kotlinlang.org/api/latest/jvm/stdlib/kotlin.jvm/-throws/index.html)to specify explicitly the checked exceptions thrown (for example `@Throws(IOException::class)`). + +#### 1.9.6. Annotation Array Attributes + +Kotlin annotations are mostly similar to Java annotations, but array attributes (which are +extensively used in Spring) behave differently. As explained in the[Kotlin documentation](https://kotlinlang.org/docs/reference/annotations.html) you can omit +the `value` attribute name, unlike other attributes, and specify it as a `vararg` parameter. + +To understand what that means, consider `@RequestMapping` (which is one of the most widely +used Spring annotations) as an example. This Java annotation is declared as follows: + +``` +public @interface RequestMapping { + + @AliasFor("path") + String[] value() default {}; + + @AliasFor("value") + String[] path() default {}; + + RequestMethod[] method() default {}; + + // ... +} +``` + +The typical use case for `@RequestMapping` is to map a handler method to a specific path +and method. In Java, you can specify a single value for the annotation array attribute, +and it is automatically converted to an array. + +That is why one can write`@RequestMapping(value = "/toys", method = RequestMethod.GET)` or`@RequestMapping(path = "/toys", method = RequestMethod.GET)`. + +However, in Kotlin, you must write `@RequestMapping("/toys", method = [RequestMethod.GET])`or `@RequestMapping(path = ["/toys"], method = [RequestMethod.GET])` (square brackets need +to be specified with named array attributes). + +An alternative for this specific `method` attribute (the most common one) is to +use a shortcut annotation, such as `@GetMapping`, `@PostMapping`, and others. + +| |If the `@RequestMapping` `method` attribute is not specified, all HTTP methods will<br/>be matched, not only the `GET` method.| +|---|------------------------------------------------------------------------------------------------------------------------------| + +#### 1.9.7. Testing + +This section addresses testing with the combination of Kotlin and Spring Framework. +The recommended testing framework is [JUnit 5](https://junit.org/junit5/) along with[Mockk](https://mockk.io/) for mocking. + +| |If you are using Spring Boot, see[this related documentation](https://docs.spring.io/spring-boot/docs/current/reference/htmlsingle/#boot-features-kotlin-testing).| +|---|------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +##### Constructor injection + +As described in the [dedicated section](testing.html#testcontext-junit-jupiter-di#spring-web-reactive), +JUnit 5 allows constructor injection of beans which is pretty useful with Kotlin +in order to use `val` instead of `lateinit var`. You can use[`@TestConstructor(autowireMode = AutowireMode.ALL)`](https://docs.spring.io/spring-framework/docs/5.3.16/javadoc-api/org/springframework/test/context/TestConstructor.html)to enable autowiring for all parameters. + +``` +@SpringJUnitConfig(TestConfig::class) +@TestConstructor(autowireMode = AutowireMode.ALL) +class OrderServiceIntegrationTests(val orderService: OrderService, + val customerService: CustomerService) { + + // tests that use the injected OrderService and CustomerService +} +``` + +##### `PER_CLASS` Lifecycle + +Kotlin lets you specify meaningful test function names between backticks (```). +As of JUnit 5, Kotlin test classes can use the `@TestInstance(TestInstance.Lifecycle.PER_CLASS)`annotation to enable single instantiation of test classes, which allows the use of `@BeforeAll`and `@AfterAll` annotations on non-static methods, which is a good fit for Kotlin. + +You can also change the default behavior to `PER_CLASS` thanks to a `junit-platform.properties`file with a `junit.jupiter.testinstance.lifecycle.default = per_class` property. + +The following example demonstrates `@BeforeAll` and `@AfterAll` annotations on non-static methods: + +``` +@TestInstance(TestInstance.Lifecycle.PER_CLASS) +class IntegrationTests { + + val application = Application(8181) + val client = WebClient.create("http://localhost:8181") + + @BeforeAll + fun beforeAll() { + application.start() + } + + @Test + fun `Find all users on HTML page`() { + client.get().uri("/users") + .accept(TEXT_HTML) + .retrieve() + .bodyToMono<String>() + .test() + .expectNextMatches { it.contains("Foo") } + .verifyComplete() + } + + @AfterAll + fun afterAll() { + application.stop() + } +} +``` + +##### Specification-like Tests + +You can create specification-like tests with JUnit 5 and Kotlin. +The following example shows how to do so: + +``` +class SpecificationLikeTests { + + @Nested + @DisplayName("a calculator") + inner class Calculator { + val calculator = SampleCalculator() + + @Test + fun `should return the result of adding the first number to the second number`() { + val sum = calculator.sum(2, 4) + assertEquals(6, sum) + } + + @Test + fun `should return the result of subtracting the second number from the first number`() { + val subtract = calculator.subtract(4, 2) + assertEquals(2, subtract) + } + } +} +``` + +##### `WebTestClient` Type Inference Issue in Kotlin + +Due to a [type inference issue](https://youtrack.jetbrains.com/issue/KT-5464), you must +use the Kotlin `expectBody` extension (such as `.expectBody<String>().isEqualTo("toys")`), +since it provides a workaround for the Kotlin issue with the Java API. + +See also the related [SPR-16057](https://jira.spring.io/browse/SPR-16057) issue. + +### 1.10. Getting Started + +The easiest way to learn how to build a Spring application with Kotlin is to follow[the dedicated tutorial](https://spring.io/guides/tutorials/spring-boot-kotlin/). + +#### 1.10.1. `start.spring.io` + +The easiest way to start a new Spring Framework project in Kotlin is to create a new Spring +Boot 2 project on [start.spring.io](https://start.spring.io/#!language=kotlin&type=gradle-project). + +#### 1.10.2. Choosing the Web Flavor + +Spring Framework now comes with two different web stacks: [Spring MVC](web.html#mvc) and[Spring WebFlux](web-reactive.html#spring-web-reactive). + +Spring WebFlux is recommended if you want to create applications that will deal with latency, +long-lived connections, streaming scenarios or if you want to use the web functional +Kotlin DSL. + +For other use cases, especially if you are using blocking technologies such as JPA, Spring +MVC and its annotation-based programming model is the recommended choice. + +### 1.11. Resources + +We recommend the following resources for people learning how to build applications with +Kotlin and the Spring Framework: + +* [Kotlin language reference](https://kotlinlang.org/docs/reference/) + +* [Kotlin Slack](https://slack.kotlinlang.org/) (with a dedicated #spring channel) + +* [Stackoverflow, with `spring` and `kotlin` tags](https://stackoverflow.com/questions/tagged/spring+kotlin) + +* [Try Kotlin in your browser](https://play.kotlinlang.org/) + +* [Kotlin blog](https://blog.jetbrains.com/kotlin/) + +* [Awesome Kotlin](https://kotlin.link/) + +#### 1.11.1. Examples + +The following Github projects offer examples that you can learn from and possibly even extend: + +* [spring-boot-kotlin-demo](https://github.com/sdeleuze/spring-boot-kotlin-demo): Regular Spring Boot and Spring Data JPA project + +* [mixit](https://github.com/mixitconf/mixit): Spring Boot 2, WebFlux, and Reactive Spring Data MongoDB + +* [spring-kotlin-functional](https://github.com/sdeleuze/spring-kotlin-functional): Standalone WebFlux and functional bean definition DSL + +* [spring-kotlin-fullstack](https://github.com/sdeleuze/spring-kotlin-fullstack): WebFlux Kotlin fullstack example with Kotlin2js for frontend instead of JavaScript or TypeScript + +* [spring-petclinic-kotlin](https://github.com/spring-petclinic/spring-petclinic-kotlin): Kotlin version of the Spring PetClinic Sample Application + +* [spring-kotlin-deepdive](https://github.com/sdeleuze/spring-kotlin-deepdive): A step-by-step migration guide for Boot 1.0 and Java to Boot 2.0 and Kotlin + +* [spring-cloud-gcp-kotlin-app-sample](https://github.com/spring-cloud/spring-cloud-gcp/tree/master/spring-cloud-gcp-kotlin-samples/spring-cloud-gcp-kotlin-app-sample): Spring Boot with Google Cloud Platform Integrations + +#### 1.11.2. Issues + +The following list categorizes the pending issues related to Spring and Kotlin support: + +* Spring Framework + + * [Unable to use WebTestClient with mock server in Kotlin](https://github.com/spring-projects/spring-framework/issues/20606) + + * [Support null-safety at generics, varargs and array elements level](https://github.com/spring-projects/spring-framework/issues/20496) + +* Kotlin + + * [Parent issue for Spring Framework support](https://youtrack.jetbrains.com/issue/KT-6380) + + * [Kotlin requires type inference where Java doesn’t](https://youtrack.jetbrains.com/issue/KT-5464) + + * [Smart cast regression with open classes](https://youtrack.jetbrains.com/issue/KT-20283) + + * [Impossible to pass not all SAM argument as function](https://youtrack.jetbrains.com/issue/KT-14984) + + * [Support JSR 223 bindings directly via script variables](https://youtrack.jetbrains.com/issue/KT-15125) + + * [Kotlin properties do not override Java-style getters and setters](https://youtrack.jetbrains.com/issue/KT-6653) + +## 2. Apache Groovy + +Groovy is a powerful, optionally typed, and dynamic language, with static-typing and static +compilation capabilities. It offers a concise syntax and integrates smoothly with any +existing Java application. + +The Spring Framework provides a dedicated `ApplicationContext` that supports a Groovy-based +Bean Definition DSL. For more details, see[The Groovy Bean Definition DSL](core.html#groovy-bean-definition-dsl). + +Further support for Groovy, including beans written in Groovy, refreshable script beans, +and more is available in [Dynamic Language Support](#dynamic-language). + +## 3. Dynamic Language Support + +Spring provides comprehensive support for using classes and objects that have been +defined by using a dynamic language (such as Groovy) with Spring. This support lets +you write any number of classes in a supported dynamic language and have the Spring +container transparently instantiate, configure, and dependency inject the resulting +objects. + +Spring’s scripting support primarily targets Groovy and BeanShell. Beyond those +specifically supported languages, the JSR-223 scripting mechanism is supported +for integration with any JSR-223 capable language provider (as of Spring 4.2), +e.g. JRuby. + +You can find fully working examples of where this dynamic language support can be +immediately useful in [Scenarios](#dynamic-language-scenarios). + +### 3.1. A First Example + +The bulk of this chapter is concerned with describing the dynamic language support in +detail. Before diving into all of the ins and outs of the dynamic language support, +we look at a quick example of a bean defined in a dynamic language. The dynamic +language for this first bean is Groovy. (The basis of this example was taken from the +Spring test suite. If you want to see equivalent examples in any of the other +supported languages, take a look at the source code). + +The next example shows the `Messenger` interface, which the Groovy bean is going to +implement. Note that this interface is defined in plain Java. Dependent objects that +are injected with a reference to the `Messenger` do not know that the underlying +implementation is a Groovy script. The following listing shows the `Messenger` interface: + +``` +package org.springframework.scripting; + +public interface Messenger { + + String getMessage(); +} +``` + +The following example defines a class that has a dependency on the `Messenger` interface: + +``` +package org.springframework.scripting; + +public class DefaultBookingService implements BookingService { + + private Messenger messenger; + + public void setMessenger(Messenger messenger) { + this.messenger = messenger; + } + + public void processBooking() { + // use the injected Messenger object... + } +} +``` + +The following example implements the `Messenger` interface in Groovy: + +``` +// from the file 'Messenger.groovy' +package org.springframework.scripting.groovy; + +// import the Messenger interface (written in Java) that is to be implemented +import org.springframework.scripting.Messenger + +// define the implementation in Groovy +class GroovyMessenger implements Messenger { + + String message +} +``` + +| |To use the custom dynamic language tags to define dynamic-language-backed beans, you<br/>need to have the XML Schema preamble at the top of your Spring XML configuration file.<br/>You also need to use a Spring `ApplicationContext` implementation as your IoC<br/>container. Using the dynamic-language-backed beans with a plain `BeanFactory`implementation is supported, but you have to manage the plumbing of the Spring internals<br/>to do so.<br/><br/>For more information on schema-based configuration, see [XML Schema-based Configuration](#xsd-schemas-lang).| +|---|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +Finally, the following example shows the bean definitions that effect the injection of the +Groovy-defined `Messenger` implementation into an instance of the`DefaultBookingService` class: + +``` +<?xml version="1.0" encoding="UTF-8"?> +<beans xmlns="http://www.springframework.org/schema/beans" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" + xmlns:lang="http://www.springframework.org/schema/lang" + xsi:schemaLocation=" + http://www.springframework.org/schema/beans https://www.springframework.org/schema/beans/spring-beans.xsd + http://www.springframework.org/schema/lang https://www.springframework.org/schema/lang/spring-lang.xsd"> + + <!-- this is the bean definition for the Groovy-backed Messenger implementation --> + <lang:groovy id="messenger" script-source="classpath:Messenger.groovy"> + <lang:property name="message" value="I Can Do The Frug" /> + </lang:groovy> + + <!-- an otherwise normal bean that will be injected by the Groovy-backed Messenger --> + <bean id="bookingService" class="x.y.DefaultBookingService"> + <property name="messenger" ref="messenger" /> + </bean> + +</beans> +``` + +The `bookingService` bean (a `DefaultBookingService`) can now use its private `messenger`member variable as normal, because the `Messenger` instance that was injected into it is +a `Messenger` instance. There is nothing special going on here — just plain Java and +plain Groovy. + +Hopefully, the preceding XML snippet is self-explanatory, but do not worry unduly if it is not. +Keep reading for the in-depth detail on the whys and wherefores of the preceding configuration. + +### 3.2. Defining Beans that Are Backed by Dynamic Languages + +This section describes exactly how you define Spring-managed beans in any of the +supported dynamic languages. + +Note that this chapter does not attempt to explain the syntax and idioms of the supported +dynamic languages. For example, if you want to use Groovy to write certain of the classes +in your application, we assume that you already know Groovy. If you need further details +about the dynamic languages themselves, see [Further Resources](#dynamic-language-resources) at the end of +this chapter. + +#### 3.2.1. Common Concepts + +The steps involved in using dynamic-language-backed beans are as follows: + +1. Write the test for the dynamic language source code (naturally). + +2. Then write the dynamic language source code itself. + +3. Define your dynamic-language-backed beans by using the appropriate `<lang:language/>`element in the XML configuration (you can define such beans programmatically by + using the Spring API, although you will have to consult the source code for + directions on how to do this, as this chapter does not cover this type of advanced configuration). + Note that this is an iterative step. You need at least one bean definition for each dynamic + language source file (although multiple bean definitions can reference the same source file). + +The first two steps (testing and writing your dynamic language source files) are beyond +the scope of this chapter. See the language specification and reference manual +for your chosen dynamic language and crack on with developing your dynamic language +source files. You first want to read the rest of this chapter, though, as +Spring’s dynamic language support does make some (small) assumptions about the contents +of your dynamic language source files. + +##### The \<lang:language/\> element + +The final step in the list in the [preceding section](#dynamic-language-beans-concepts)involves defining dynamic-language-backed bean definitions, one for each bean that you +want to configure (this is no different from normal JavaBean configuration). However, +instead of specifying the fully qualified class name of the class that is to be +instantiated and configured by the container, you can use the `<lang:language/>`element to define the dynamic language-backed bean. + +Each of the supported languages has a corresponding `<lang:language/>` element: + +* `<lang:groovy/>` (Groovy) + +* `<lang:bsh/>` (BeanShell) + +* `<lang:std/>` (JSR-223, e.g. with JRuby) + +The exact attributes and child elements that are available for configuration depends on +exactly which language the bean has been defined in (the language-specific sections +later in this chapter detail this). + +##### Refreshable Beans + +One of the (and perhaps the single) most compelling value adds of the dynamic language +support in Spring is the “refreshable bean” feature. + +A refreshable bean is a dynamic-language-backed bean. With a small amount of +configuration, a dynamic-language-backed bean can monitor changes in its underlying +source file resource and then reload itself when the dynamic language source file is +changed (for example, when you edit and save changes to the file on the file system). + +This lets you deploy any number of dynamic language source files as part of an +application, configure the Spring container to create beans backed by dynamic +language source files (using the mechanisms described in this chapter), and (later, +as requirements change or some other external factor comes into play) edit a dynamic +language source file and have any change they make be reflected in the bean that is +backed by the changed dynamic language source file. There is no need to shut down a +running application (or redeploy in the case of a web application). The +dynamic-language-backed bean so amended picks up the new state and logic from the +changed dynamic language source file. + +| |This feature is off by default.| +|---|-------------------------------| + +Now we can take a look at an example to see how easy it is to start using refreshable +beans. To turn on the refreshable beans feature, you have to specify exactly one +additional attribute on the `<lang:language/>` element of your bean definition. So, +if we stick with [the example](#dynamic-language-a-first-example) from earlier in +this chapter, the following example shows what we would change in the Spring XML +configuration to effect refreshable beans: + +``` +<beans> + + <!-- this bean is now 'refreshable' due to the presence of the 'refresh-check-delay' attribute --> + <lang:groovy id="messenger" + refresh-check-delay="5000" <!-- switches refreshing on with 5 seconds between checks --> + script-source="classpath:Messenger.groovy"> + <lang:property name="message" value="I Can Do The Frug" /> + </lang:groovy> + + <bean id="bookingService" class="x.y.DefaultBookingService"> + <property name="messenger" ref="messenger" /> + </bean> + +</beans> +``` + +That really is all you have to do. The `refresh-check-delay` attribute defined on the`messenger` bean definition is the number of milliseconds after which the bean is +refreshed with any changes made to the underlying dynamic language source file. +You can turn off the refresh behavior by assigning a negative value to the`refresh-check-delay` attribute. Remember that, by default, the refresh behavior is +disabled. If you do not want the refresh behavior, do not define the attribute. + +If we then run the following application, we can exercise the refreshable feature. +(Please excuse the “jumping-through-hoops-to-pause-the-execution” shenanigans +in this next slice of code.) The `System.in.read()` call is only there so that the +execution of the program pauses while you (the developer in this scenario) go off +and edit the underlying dynamic language source file so that the refresh triggers +on the dynamic-language-backed bean when the program resumes execution. + +The following listing shows this sample application: + +``` +import org.springframework.context.ApplicationContext; +import org.springframework.context.support.ClassPathXmlApplicationContext; +import org.springframework.scripting.Messenger; + +public final class Boot { + + public static void main(final String[] args) throws Exception { + ApplicationContext ctx = new ClassPathXmlApplicationContext("beans.xml"); + Messenger messenger = (Messenger) ctx.getBean("messenger"); + System.out.println(messenger.getMessage()); + // pause execution while I go off and make changes to the source file... + System.in.read(); + System.out.println(messenger.getMessage()); + } +} +``` + +Assume then, for the purposes of this example, that all calls to the `getMessage()`method of `Messenger` implementations have to be changed such that the message is +surrounded by quotation marks. The following listing shows the changes that you +(the developer) should make to the `Messenger.groovy` source file when the +execution of the program is paused: + +``` +package org.springframework.scripting + +class GroovyMessenger implements Messenger { + + private String message = "Bingo" + + public String getMessage() { + // change the implementation to surround the message in quotes + return "'" + this.message + "'" + } + + public void setMessage(String message) { + this.message = message + } +} +``` + +When the program runs, the output before the input pause will be `I Can Do The Frug`. +After the change to the source file is made and saved and the program resumes execution, +the result of calling the `getMessage()` method on the dynamic-language-backed`Messenger` implementation is `'I Can Do The Frug'` (notice the inclusion of the +additional quotation marks). + +Changes to a script do not trigger a refresh if the changes occur within the window of +the `refresh-check-delay` value. Changes to the script are not actually picked up until +a method is called on the dynamic-language-backed bean. It is only when a method is +called on a dynamic-language-backed bean that it checks to see if its underlying script +source has changed. Any exceptions that relate to refreshing the script (such as +encountering a compilation error or finding that the script file has been deleted) +results in a fatal exception being propagated to the calling code. + +The refreshable bean behavior described earlier does not apply to dynamic language +source files defined with the `<lang:inline-script/>` element notation (see[Inline Dynamic Language Source Files](#dynamic-language-beans-inline)). Additionally, it applies only to beans where +changes to the underlying source file can actually be detected (for example, by code +that checks the last modified date of a dynamic language source file that exists on the +file system). + +##### Inline Dynamic Language Source Files + +The dynamic language support can also cater to dynamic language source files that are +embedded directly in Spring bean definitions. More specifically, the`<lang:inline-script/>` element lets you define dynamic language source immediately +inside a Spring configuration file. An example might clarify how the inline script +feature works: + +``` +<lang:groovy id="messenger"> + <lang:inline-script> + +package org.springframework.scripting.groovy; + +import org.springframework.scripting.Messenger + +class GroovyMessenger implements Messenger { + String message +} + + </lang:inline-script> + <lang:property name="message" value="I Can Do The Frug" /> +</lang:groovy> +``` + +If we put to one side the issues surrounding whether it is good practice to define +dynamic language source inside a Spring configuration file, the `<lang:inline-script/>`element can be useful in some scenarios. For instance, we might want to quickly add a +Spring `Validator` implementation to a Spring MVC `Controller`. This is but a moment’s +work using inline source. (See [Scripted Validators](#dynamic-language-scenarios-validators) for such an +example.) + +##### Understanding Constructor Injection in the Context of Dynamic-language-backed Beans + +There is one very important thing to be aware of with regard to Spring’s dynamic +language support. Namely, you can not (currently) supply constructor arguments +to dynamic-language-backed beans (and, hence, constructor-injection is not available for +dynamic-language-backed beans). In the interests of making this special handling of +constructors and properties 100% clear, the following mixture of code and configuration +does not work: + +An approach that cannot work + +``` +// from the file 'Messenger.groovy' +package org.springframework.scripting.groovy; + +import org.springframework.scripting.Messenger + +class GroovyMessenger implements Messenger { + + GroovyMessenger() {} + + // this constructor is not available for Constructor Injection + GroovyMessenger(String message) { + this.message = message; + } + + String message + + String anotherMessage +} +``` + +``` +<lang:groovy id="badMessenger" + script-source="classpath:Messenger.groovy"> + <!-- this next constructor argument will not be injected into the GroovyMessenger --> + <!-- in fact, this isn't even allowed according to the schema --> + <constructor-arg value="This will not work" /> + + <!-- only property values are injected into the dynamic-language-backed object --> + <lang:property name="anotherMessage" value="Passed straight through to the dynamic-language-backed object" /> + +</lang> +``` + +In practice this limitation is not as significant as it first appears, since setter +injection is the injection style favored by the overwhelming majority of developers +(we leave the discussion as to whether that is a good thing to another day). + +#### 3.2.2. Groovy Beans + +This section describes how to use beans defined in Groovy in Spring. + +The Groovy homepage includes the following description: + +“Groovy is an agile dynamic language for the Java 2 Platform that has many of the +features that people like so much in languages like Python, Ruby and Smalltalk, making +them available to Java developers using a Java-like syntax.” + +If you have read this chapter straight from the top, you have already[seen an example](#dynamic-language-a-first-example) of a Groovy-dynamic-language-backed +bean. Now consider another example (again using an example from the Spring test suite): + +``` +package org.springframework.scripting; + +public interface Calculator { + + int add(int x, int y); +} +``` + +The following example implements the `Calculator` interface in Groovy: + +``` +// from the file 'calculator.groovy' +package org.springframework.scripting.groovy + +class GroovyCalculator implements Calculator { + + int add(int x, int y) { + x + y + } +} +``` + +The following bean definition uses the calculator defined in Groovy: + +``` +<!-- from the file 'beans.xml' --> +<beans> + <lang:groovy id="calculator" script-source="classpath:calculator.groovy"/> +</beans> +``` + +Finally, the following small application exercises the preceding configuration: + +``` +package org.springframework.scripting; + +import org.springframework.context.ApplicationContext; +import org.springframework.context.support.ClassPathXmlApplicationContext; + +public class Main { + + public static void main(String[] args) { + ApplicationContext ctx = new ClassPathXmlApplicationContext("beans.xml"); + Calculator calc = ctx.getBean("calculator", Calculator.class); + System.out.println(calc.add(2, 8)); + } +} +``` + +The resulting output from running the above program is (unsurprisingly) `10`. +(For more interesting examples, see the dynamic language showcase project for a more +complex example or see the examples [Scenarios](#dynamic-language-scenarios) later in this chapter). + +You must not define more than one class per Groovy source file. While this is perfectly +legal in Groovy, it is (arguably) a bad practice. In the interests of a consistent +approach, you should (in the opinion of the Spring team) respect the standard Java +conventions of one (public) class per source file. + +##### Customizing Groovy Objects by Using a Callback + +The `GroovyObjectCustomizer` interface is a callback that lets you hook additional +creation logic into the process of creating a Groovy-backed bean. For example, +implementations of this interface could invoke any required initialization methods, +set some default property values, or specify a custom `MetaClass`. The following listing +shows the `GroovyObjectCustomizer` interface definition: + +``` +public interface GroovyObjectCustomizer { + + void customize(GroovyObject goo); +} +``` + +The Spring Framework instantiates an instance of your Groovy-backed bean and then +passes the created `GroovyObject` to the specified `GroovyObjectCustomizer` (if one +has been defined). You can do whatever you like with the supplied `GroovyObject`reference. We expect that most people want to set a custom `MetaClass` with this +callback, and the following example shows how to do so: + +``` +public final class SimpleMethodTracingCustomizer implements GroovyObjectCustomizer { + + public void customize(GroovyObject goo) { + DelegatingMetaClass metaClass = new DelegatingMetaClass(goo.getMetaClass()) { + + public Object invokeMethod(Object object, String methodName, Object[] arguments) { + System.out.println("Invoking '" + methodName + "'."); + return super.invokeMethod(object, methodName, arguments); + } + }; + metaClass.initialize(); + goo.setMetaClass(metaClass); + } + +} +``` + +A full discussion of meta-programming in Groovy is beyond the scope of the Spring +reference manual. See the relevant section of the Groovy reference manual or do a +search online. Plenty of articles address this topic. Actually, making use of a`GroovyObjectCustomizer` is easy if you use the Spring namespace support, as the +following example shows: + +``` +<!-- define the GroovyObjectCustomizer just like any other bean --> +<bean id="tracingCustomizer" class="example.SimpleMethodTracingCustomizer"/> + + <!-- ... and plug it into the desired Groovy bean via the 'customizer-ref' attribute --> + <lang:groovy id="calculator" + script-source="classpath:org/springframework/scripting/groovy/Calculator.groovy" + customizer-ref="tracingCustomizer"/> +``` + +If you do not use the Spring namespace support, you can still use the`GroovyObjectCustomizer` functionality, as the following example shows: + +``` +<bean id="calculator" class="org.springframework.scripting.groovy.GroovyScriptFactory"> + <constructor-arg value="classpath:org/springframework/scripting/groovy/Calculator.groovy"/> + <!-- define the GroovyObjectCustomizer (as an inner bean) --> + <constructor-arg> + <bean id="tracingCustomizer" class="example.SimpleMethodTracingCustomizer"/> + </constructor-arg> +</bean> + +<bean class="org.springframework.scripting.support.ScriptFactoryPostProcessor"/> +``` + +| |You may also specify a Groovy `CompilationCustomizer` (such as an `ImportCustomizer`)<br/>or even a full Groovy `CompilerConfiguration` object in the same place as Spring’s`GroovyObjectCustomizer`. Furthermore, you may set a common `GroovyClassLoader` with custom<br/>configuration for your beans at the `ConfigurableApplicationContext.setClassLoader` level;<br/>this also leads to shared `GroovyClassLoader` usage and is therefore recommendable in case of<br/>a large number of scripted beans (avoiding an isolated `GroovyClassLoader` instance per bean).| +|---|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +#### 3.2.3. BeanShell Beans + +This section describes how to use BeanShell beans in Spring. + +The [BeanShell homepage](https://beanshell.github.io/intro.html) includes the following +description: + +``` +BeanShell is a small, free, embeddable Java source interpreter with dynamic language +features, written in Java. BeanShell dynamically runs standard Java syntax and +extends it with common scripting conveniences such as loose types, commands, and method +closures like those in Perl and JavaScript. +``` + +In contrast to Groovy, BeanShell-backed bean definitions require some (small) additional +configuration. The implementation of the BeanShell dynamic language support in Spring is +interesting, because Spring creates a JDK dynamic proxy that implements all of the +interfaces that are specified in the `script-interfaces` attribute value of the`<lang:bsh>` element (this is why you must supply at least one interface in the value +of the attribute, and, consequently, program to interfaces when you use BeanShell-backed +beans). This means that every method call on a BeanShell-backed object goes through the +JDK dynamic proxy invocation mechanism. + +Now we can show a fully working example of using a BeanShell-based bean that implements +the `Messenger` interface that was defined earlier in this chapter. We again show the +definition of the `Messenger` interface: + +``` +package org.springframework.scripting; + +public interface Messenger { + + String getMessage(); +} +``` + +The following example shows the BeanShell “implementation” (we use the term loosely here) +of the `Messenger` interface: + +``` +String message; + +String getMessage() { + return message; +} + +void setMessage(String aMessage) { + message = aMessage; +} +``` + +The following example shows the Spring XML that defines an “instance” of the above +“class” (again, we use these terms very loosely here): + +``` +<lang:bsh id="messageService" script-source="classpath:BshMessenger.bsh" + script-interfaces="org.springframework.scripting.Messenger"> + + <lang:property name="message" value="Hello World!" /> +</lang:bsh> +``` + +See [Scenarios](#dynamic-language-scenarios) for some scenarios where you might want to use +BeanShell-based beans. + +### 3.3. Scenarios + +The possible scenarios where defining Spring managed beans in a scripting language would +be beneficial are many and varied. This section describes two possible use cases for the +dynamic language support in Spring. + +#### 3.3.1. Scripted Spring MVC Controllers + +One group of classes that can benefit from using dynamic-language-backed beans is that +of Spring MVC controllers. In pure Spring MVC applications, the navigational flow +through a web application is, to a large extent, determined by code encapsulated within +your Spring MVC controllers. As the navigational flow and other presentation layer logic +of a web application needs to be updated to respond to support issues or changing +business requirements, it may well be easier to effect any such required changes by +editing one or more dynamic language source files and seeing those changes being +immediately reflected in the state of a running application. + +Remember that, in the lightweight architectural model espoused by projects such as +Spring, you typically aim to have a really thin presentation layer, with all +the meaty business logic of an application being contained in the domain and service +layer classes. Developing Spring MVC controllers as dynamic-language-backed beans lets +you change presentation layer logic by editing and saving text files. Any +changes to such dynamic language source files is (depending on the configuration) +automatically reflected in the beans that are backed by dynamic language source files. + +| |To effect this automatic “pickup” of any changes to dynamic-language-backed<br/>beans, you have to enable the “refreshable beans” functionality. See[Refreshable Beans](#dynamic-language-refreshable-beans) for a full treatment of this feature.| +|---|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +The following example shows an `org.springframework.web.servlet.mvc.Controller` implemented +by using the Groovy dynamic language: + +``` +// from the file '/WEB-INF/groovy/FortuneController.groovy' +package org.springframework.showcase.fortune.web + +import org.springframework.showcase.fortune.service.FortuneService +import org.springframework.showcase.fortune.domain.Fortune +import org.springframework.web.servlet.ModelAndView +import org.springframework.web.servlet.mvc.Controller + +import javax.servlet.http.HttpServletRequest +import javax.servlet.http.HttpServletResponse + +class FortuneController implements Controller { + + @Property FortuneService fortuneService + + ModelAndView handleRequest(HttpServletRequest request, + HttpServletResponse httpServletResponse) { + return new ModelAndView("tell", "fortune", this.fortuneService.tellFortune()) + } +} +``` + +``` +<lang:groovy id="fortune" + refresh-check-delay="3000" + script-source="/WEB-INF/groovy/FortuneController.groovy"> + <lang:property name="fortuneService" ref="fortuneService"/> +</lang:groovy> +``` + +#### 3.3.2. Scripted Validators + +Another area of application development with Spring that may benefit from the +flexibility afforded by dynamic-language-backed beans is that of validation. It can +be easier to express complex validation logic by using a loosely typed dynamic language +(that may also have support for inline regular expressions) as opposed to regular Java. + +Again, developing validators as dynamic-language-backed beans lets you change +validation logic by editing and saving a simple text file. Any such changes is +(depending on the configuration) automatically reflected in the execution of a +running application and would not require the restart of an application. + +| |To effect the automatic “pickup” of any changes to dynamic-language-backed<br/>beans, you have to enable the 'refreshable beans' feature. See[Refreshable Beans](#dynamic-language-refreshable-beans) for a full and detailed treatment of this feature.| +|---|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +The following example shows a Spring `org.springframework.validation.Validator`implemented by using the Groovy dynamic language (see [Validation using Spring’s Validator interface](core.html#validator) for a discussion of the`Validator` interface): + +``` +import org.springframework.validation.Validator +import org.springframework.validation.Errors +import org.springframework.beans.TestBean + +class TestBeanValidator implements Validator { + + boolean supports(Class clazz) { + return TestBean.class.isAssignableFrom(clazz) + } + + void validate(Object bean, Errors errors) { + if(bean.name?.trim()?.size() > 0) { + return + } + errors.reject("whitespace", "Cannot be composed wholly of whitespace.") + } +} +``` + +### 3.4. Additional Details + +This last section contains some additional details related to the dynamic language support. + +#### 3.4.1. AOP — Advising Scripted Beans + +You can use the Spring AOP framework to advise scripted beans. The Spring AOP +framework actually is unaware that a bean that is being advised might be a scripted +bean, so all of the AOP use cases and functionality that you use (or aim to use) +work with scripted beans. When you advise scripted beans, you cannot use class-based +proxies. You must use [interface-based proxies](core.html#aop-proxying). + +You are not limited to advising scripted beans. You can also write aspects themselves +in a supported dynamic language and use such beans to advise other Spring beans. +This really would be an advanced use of the dynamic language support though. + +#### 3.4.2. Scoping + +In case it is not immediately obvious, scripted beans can be scoped in the same way as +any other bean. The `scope` attribute on the various `<lang:language/>` elements lets +you control the scope of the underlying scripted bean, as it does with a regular +bean. (The default scope is [singleton](core.html#beans-factory-scopes-singleton), +as it is with “regular” beans.) + +The following example uses the `scope` attribute to define a Groovy bean scoped as +a [prototype](core.html#beans-factory-scopes-prototype): + +``` +<?xml version="1.0" encoding="UTF-8"?> +<beans xmlns="http://www.springframework.org/schema/beans" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" + xmlns:lang="http://www.springframework.org/schema/lang" + xsi:schemaLocation=" + http://www.springframework.org/schema/beans https://www.springframework.org/schema/beans/spring-beans.xsd + http://www.springframework.org/schema/lang https://www.springframework.org/schema/lang/spring-lang.xsd"> + + <lang:groovy id="messenger" script-source="classpath:Messenger.groovy" scope="prototype"> + <lang:property name="message" value="I Can Do The RoboCop" /> + </lang:groovy> + + <bean id="bookingService" class="x.y.DefaultBookingService"> + <property name="messenger" ref="messenger" /> + </bean> + +</beans> +``` + +See [Bean Scopes](core.html#beans-factory-scopes) in [The IoC Container](core.html#beans)for a full discussion of the scoping support in the Spring Framework. + +#### 3.4.3. The `lang` XML schema + +The `lang` elements in Spring XML configuration deal with exposing objects that have been +written in a dynamic language (such as Groovy or BeanShell) as beans in the Spring container. + +These elements (and the dynamic language support) are comprehensively covered in[Dynamic Language Support](#dynamic-language). See that section +for full details on this support and the `lang` elements. + +To use the elements in the `lang` schema, you need to have the following preamble at the +top of your Spring XML configuration file. The text in the following snippet references +the correct schema so that the tags in the `lang` namespace are available to you: + +``` +<?xml version="1.0" encoding="UTF-8"?> +<beans xmlns="http://www.springframework.org/schema/beans" + xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" + xmlns:lang="http://www.springframework.org/schema/lang" + xsi:schemaLocation=" + http://www.springframework.org/schema/beans https://www.springframework.org/schema/beans/spring-beans.xsd + http://www.springframework.org/schema/lang https://www.springframework.org/schema/lang/spring-lang.xsd"> + + <!-- bean definitions here --> + +</beans> +``` + +### 3.5. Further Resources + +The following links go to further resources about the various dynamic languages referenced +in this chapter: + +* The [Groovy](https://www.groovy-lang.org/) homepage + +* The [BeanShell](https://beanshell.github.io/intro.html) homepage + +* The [JRuby](https://www.jruby.org) homepage diff --git a/docs/en/spring-framework/overview.md b/docs/en/spring-framework/overview.md new file mode 100644 index 0000000000000000000000000000000000000000..55a15b90ba4dde11c28430902abf8319fc079b99 --- /dev/null +++ b/docs/en/spring-framework/overview.md @@ -0,0 +1,141 @@ +# Spring Framework Overview + +Spring makes it easy to create Java enterprise applications. It provides everything you +need to embrace the Java language in an enterprise environment, with support for Groovy +and Kotlin as alternative languages on the JVM, and with the flexibility to create many +kinds of architectures depending on an application’s needs. As of Spring Framework 5.1, +Spring requires JDK 8+ (Java SE 8+) and provides out-of-the-box support for JDK 11 LTS. +Java SE 8 update 60 is suggested as the minimum patch release for Java 8, but it is +generally recommended to use a recent patch release. + +Spring supports a wide range of application scenarios. In a large enterprise, applications +often exist for a long time and have to run on a JDK and application server whose upgrade +cycle is beyond developer control. Others may run as a single jar with the server embedded, +possibly in a cloud environment. Yet others may be standalone applications (such as batch +or integration workloads) that do not need a server. + +Spring is open source. It has a large and active community that provides continuous feedback +based on a diverse range of real-world use cases. This has helped Spring to successfully +evolve over a very long time. + +## 1. What We Mean by "Spring" + +The term "Spring" means different things in different contexts. It can be used to refer to +the Spring Framework project itself, which is where it all started. Over time, other Spring +projects have been built on top of the Spring Framework. Most often, when people say +"Spring", they mean the entire family of projects. This reference documentation focuses on +the foundation: the Spring Framework itself. + +The Spring Framework is divided into modules. Applications can choose which modules they need. +At the heart are the modules of the core container, including a configuration model and a +dependency injection mechanism. Beyond that, the Spring Framework provides foundational +support for different application architectures, including messaging, transactional data and +persistence, and web. It also includes the Servlet-based Spring MVC web framework and, in +parallel, the Spring WebFlux reactive web framework. + +A note about modules: Spring’s framework jars allow for deployment to JDK 9’s module path +("Jigsaw"). For use in Jigsaw-enabled applications, the Spring Framework 5 jars come with +"Automatic-Module-Name" manifest entries which define stable language-level module names +("spring.core", "spring.context", etc.) independent from jar artifact names (the jars follow +the same naming pattern with "-" instead of ".", e.g. "spring-core" and "spring-context"). +Of course, Spring’s framework jars keep working fine on the classpath on both JDK 8 and 9+. + +## 2. History of Spring and the Spring Framework + +Spring came into being in 2003 as a response to the complexity of the early[J2EE](https://en.wikipedia.org/wiki/Java_Platform,_Enterprise_Edition) specifications. +While some consider Java EE and Spring to be in competition, Spring is, in fact, complementary +to Java EE. The Spring programming model does not embrace the Java EE platform specification; +rather, it integrates with carefully selected individual specifications from the EE umbrella: + +* Servlet API ([JSR 340](https://jcp.org/en/jsr/detail?id=340)) + +* WebSocket API ([JSR 356](https://www.jcp.org/en/jsr/detail?id=356)) + +* Concurrency Utilities ([JSR 236](https://www.jcp.org/en/jsr/detail?id=236)) + +* JSON Binding API ([JSR 367](https://jcp.org/en/jsr/detail?id=367)) + +* Bean Validation ([JSR 303](https://jcp.org/en/jsr/detail?id=303)) + +* JPA ([JSR 338](https://jcp.org/en/jsr/detail?id=338)) + +* JMS ([JSR 914](https://jcp.org/en/jsr/detail?id=914)) + +* as well as JTA/JCA setups for transaction coordination, if necessary. + +The Spring Framework also supports the Dependency Injection +([JSR 330](https://www.jcp.org/en/jsr/detail?id=330)) and Common Annotations +([JSR 250](https://jcp.org/en/jsr/detail?id=250)) specifications, which application developers +may choose to use instead of the Spring-specific mechanisms provided by the Spring Framework. + +As of Spring Framework 5.0, Spring requires the Java EE 7 level (e.g. Servlet 3.1+, JPA 2.1+) +as a minimum - while at the same time providing out-of-the-box integration with newer APIs +at the Java EE 8 level (e.g. Servlet 4.0, JSON Binding API) when encountered at runtime. +This keeps Spring fully compatible with e.g. Tomcat 8 and 9, WebSphere 9, and JBoss EAP 7. + +Over time, the role of Java EE in application development has evolved. In the early days of +Java EE and Spring, applications were created to be deployed to an application server. +Today, with the help of Spring Boot, applications are created in a devops- and +cloud-friendly way, with the Servlet container embedded and trivial to change. +As of Spring Framework 5, a WebFlux application does not even use the Servlet API directly +and can run on servers (such as Netty) that are not Servlet containers. + +Spring continues to innovate and to evolve. Beyond the Spring Framework, there are other +projects, such as Spring Boot, Spring Security, Spring Data, Spring Cloud, Spring Batch, +among others. It’s important to remember that each project has its own source code repository, +issue tracker, and release cadence. See [spring.io/projects](https://spring.io/projects) for +the complete list of Spring projects. + +## 3. Design Philosophy + +When you learn about a framework, it’s important to know not only what it does but what +principles it follows. Here are the guiding principles of the Spring Framework: + +* Provide choice at every level. Spring lets you defer design decisions as late as possible. + For example, you can switch persistence providers through configuration without changing + your code. The same is true for many other infrastructure concerns and integration with + third-party APIs. + +* Accommodate diverse perspectives. Spring embraces flexibility and is not opinionated + about how things should be done. It supports a wide range of application needs with + different perspectives. + +* Maintain strong backward compatibility. Spring’s evolution has been carefully managed + to force few breaking changes between versions. Spring supports a carefully chosen range + of JDK versions and third-party libraries to facilitate maintenance of applications and + libraries that depend on Spring. + +* Care about API design. The Spring team puts a lot of thought and time into making APIs + that are intuitive and that hold up across many versions and many years. + +* Set high standards for code quality. The Spring Framework puts a strong emphasis on + meaningful, current, and accurate javadoc. It is one of very few projects that can claim + clean code structure with no circular dependencies between packages. + +## 4. Feedback and Contributions + +For how-to questions or diagnosing or debugging issues, we suggest using Stack Overflow. Click[here](https://stackoverflow.com/questions/tagged/spring+or+spring-mvc+or+spring-aop+or+spring-jdbc+or+spring-r2dbc+or+spring-transactions+or+spring-annotations+or+spring-jms+or+spring-el+or+spring-test+or+spring+or+spring-remoting+or+spring-orm+or+spring-jmx+or+spring-cache+or+spring-webflux+or+spring-rsocket?tab=Newest)for a list of the suggested tags to use on Stack Overflow. If you’re fairly certain that +there is a problem in the Spring Framework or would like to suggest a feature, please use +the [GitHub Issues](https://github.com/spring-projects/spring-framework/issues). + +If you have a solution in mind or a suggested fix, you can submit a pull request on[Github](https://github.com/spring-projects/spring-framework). However, please keep in mind +that, for all but the most trivial issues, we expect a ticket to be filed in the issue +tracker, where discussions take place and leave a record for future reference. + +For more details see the guidelines at the [CONTRIBUTING](https://github.com/spring-projects/spring-framework/tree/main/CONTRIBUTING.md), +top-level project page. + +## 5. Getting Started + +If you are just getting started with Spring, you may want to begin using the Spring +Framework by creating a [Spring Boot](https://projects.spring.io/spring-boot/)-based +application. Spring Boot provides a quick (and opinionated) way to create a +production-ready Spring-based application. It is based on the Spring Framework, favors +convention over configuration, and is designed to get you up and running as quickly +as possible. + +You can use [start.spring.io](https://start.spring.io/) to generate a basic project or follow +one of the ["Getting Started" guides](https://spring.io/guides), such as[Getting Started Building a RESTful Web Service](https://spring.io/guides/gs/rest-service/). +As well as being easier to digest, these guides are very task focused, and most of them +are based on Spring Boot. They also cover other projects from the Spring portfolio that +you might want to consider when solving a particular problem. diff --git a/docs/en/spring-framework/testing.md b/docs/en/spring-framework/testing.md new file mode 100644 index 0000000000000000000000000000000000000000..e8aafba4b86156e117305194dccd54e7c2755498 --- /dev/null +++ b/docs/en/spring-framework/testing.md @@ -0,0 +1,8617 @@ +# Testing + +This chapter covers Spring’s support for integration testing and best practices for unit +testing. The Spring team advocates test-driven development (TDD). The Spring team has +found that the correct use of inversion of control (IoC) certainly does make both unit +and integration testing easier (in that the presence of setter methods and appropriate +constructors on classes makes them easier to wire together in a test without having to +set up service locator registries and similar structures). + +## 1. Introduction to Spring Testing + +Testing is an integral part of enterprise software development. This chapter focuses on +the value added by the IoC principle to [unit testing](#unit-testing) and on the benefits +of the Spring Framework’s support for [integration testing](#integration-testing). (A +thorough treatment of testing in the enterprise is beyond the scope of this reference +manual.) + +## 2. Unit Testing + +Dependency injection should make your code less dependent on the container than it would +be with traditional Java EE development. The POJOs that make up your application should +be testable in JUnit or TestNG tests, with objects instantiated by using the `new`operator, without Spring or any other container. You can use [mock objects](#mock-objects)(in conjunction with other valuable testing techniques) to test your code in isolation. +If you follow the architecture recommendations for Spring, the resulting clean layering +and componentization of your codebase facilitate easier unit testing. For example, +you can test service layer objects by stubbing or mocking DAO or repository interfaces, +without needing to access persistent data while running unit tests. + +True unit tests typically run extremely quickly, as there is no runtime infrastructure to +set up. Emphasizing true unit tests as part of your development methodology can boost +your productivity. You may not need this section of the testing chapter to help you write +effective unit tests for your IoC-based applications. For certain unit testing scenarios, +however, the Spring Framework provides mock objects and testing support classes, which +are described in this chapter. + +### 2.1. Mock Objects + +Spring includes a number of packages dedicated to mocking: + +* [Environment](#mock-objects-env) + +* [JNDI](#mock-objects-jndi) + +* [Servlet API](#mock-objects-servlet) + +* [Spring Web Reactive](#mock-objects-web-reactive) + +#### 2.1.1. Environment + +The `org.springframework.mock.env` package contains mock implementations of the`Environment` and `PropertySource` abstractions (see[Bean Definition Profiles](core.html#beans-definition-profiles)and [`PropertySource` Abstraction](core.html#beans-property-source-abstraction)).`MockEnvironment` and `MockPropertySource` are useful for developing +out-of-container tests for code that depends on environment-specific properties. + +#### 2.1.2. JNDI + +The `org.springframework.mock.jndi` package contains a partial implementation of the JNDI +SPI, which you can use to set up a simple JNDI environment for test suites or stand-alone +applications. If, for example, JDBC `DataSource` instances get bound to the same JNDI +names in test code as they do in a Java EE container, you can reuse both application code +and configuration in testing scenarios without modification. + +| |The mock JNDI support in the `org.springframework.mock.jndi` package is<br/>officially deprecated as of Spring Framework 5.2 in favor of complete solutions from third<br/>parties such as [Simple-JNDI](https://github.com/h-thurow/Simple-JNDI).| +|---|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +#### 2.1.3. Servlet API + +The `org.springframework.mock.web` package contains a comprehensive set of Servlet API +mock objects that are useful for testing web contexts, controllers, and filters. These +mock objects are targeted at usage with Spring’s Web MVC framework and are generally more +convenient to use than dynamic mock objects (such as [EasyMock](http://easymock.org/)) +or alternative Servlet API mock objects (such as [MockObjects](http://www.mockobjects.com)). + +| |Since Spring Framework 5.0, the mock objects in `org.springframework.mock.web` are<br/>based on the Servlet 4.0 API.| +|---|--------------------------------------------------------------------------------------------------------------------| + +The Spring MVC Test framework builds on the mock Servlet API objects to provide an +integration testing framework for Spring MVC. See [MockMvc](#spring-mvc-test-framework). + +#### 2.1.4. Spring Web Reactive + +The `org.springframework.mock.http.server.reactive` package contains mock implementations +of `ServerHttpRequest` and `ServerHttpResponse` for use in WebFlux applications. The`org.springframework.mock.web.server` package contains a mock `ServerWebExchange` that +depends on those mock request and response objects. + +Both `MockServerHttpRequest` and `MockServerHttpResponse` extend from the same abstract +base classes as server-specific implementations and share behavior with them. For +example, a mock request is immutable once created, but you can use the `mutate()` method +from `ServerHttpRequest` to create a modified instance. + +In order for the mock response to properly implement the write contract and return a +write completion handle (that is, `Mono<Void>`), it by default uses a `Flux` with`cache().then()`, which buffers the data and makes it available for assertions in tests. +Applications can set a custom write function (for example, to test an infinite stream). + +The [WebTestClient](#webtestclient) builds on the mock request and response to provide support for +testing WebFlux applications without an HTTP server. The client can also be used for +end-to-end tests with a running server. + +### 2.2. Unit Testing Support Classes + +Spring includes a number of classes that can help with unit testing. They fall into two +categories: + +* [General Testing Utilities](#unit-testing-utilities) + +* [Spring MVC Testing Utilities](#unit-testing-spring-mvc) + +#### 2.2.1. General Testing Utilities + +The `org.springframework.test.util` package contains several general purpose utilities +for use in unit and integration testing. + +`ReflectionTestUtils` is a collection of reflection-based utility methods. You can use +these methods in testing scenarios where you need to change the value of a constant, set +a non-`public` field, invoke a non-`public` setter method, or invoke a non-`public`configuration or lifecycle callback method when testing application code for use cases +such as the following: + +* ORM frameworks (such as JPA and Hibernate) that condone `private` or `protected` field + access as opposed to `public` setter methods for properties in a domain entity. + +* Spring’s support for annotations (such as `@Autowired`, `@Inject`, and `@Resource`), + that provide dependency injection for `private` or `protected` fields, setter methods, + and configuration methods. + +* Use of annotations such as `@PostConstruct` and `@PreDestroy` for lifecycle callback + methods. + +[`AopTestUtils`](https://docs.spring.io/spring-framework/docs/5.3.16/javadoc-api/org/springframework/test/util/AopTestUtils.html) is a collection of +AOP-related utility methods. You can use these methods to obtain a reference to the +underlying target object hidden behind one or more Spring proxies. For example, if you +have configured a bean as a dynamic mock by using a library such as EasyMock or Mockito, +and the mock is wrapped in a Spring proxy, you may need direct access to the underlying +mock to configure expectations on it and perform verifications. For Spring’s core AOP +utilities, see [`AopUtils`](https://docs.spring.io/spring-framework/docs/5.3.16/javadoc-api/org/springframework/aop/support/AopUtils.html) and[`AopProxyUtils`](https://docs.spring.io/spring-framework/docs/5.3.16/javadoc-api/org/springframework/aop/framework/AopProxyUtils.html). + +#### 2.2.2. Spring MVC Testing Utilities + +The `org.springframework.test.web` package contains[`ModelAndViewAssert`](https://docs.spring.io/spring-framework/docs/5.3.16/javadoc-api/org/springframework/test/web/ModelAndViewAssert.html), which you +can use in combination with JUnit, TestNG, or any other testing framework for unit tests +that deal with Spring MVC `ModelAndView` objects. + +| |Unit testing Spring MVC Controllers<br/><br/>To unit test your Spring MVC `Controller` classes as POJOs, use `ModelAndViewAssert`combined with `MockHttpServletRequest`, `MockHttpSession`, and so on from Spring’s[Servlet API mocks](#mock-objects-servlet). For thorough integration testing of your<br/>Spring MVC and REST `Controller` classes in conjunction with your `WebApplicationContext`configuration for Spring MVC, use the[Spring MVC Test Framework](#spring-mvc-test-framework) instead.| +|---|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +## 3. Integration Testing + +This section (most of the rest of this chapter) covers integration testing for Spring +applications. It includes the following topics: + +* [Overview](#integration-testing-overview) + +* [Goals of Integration Testing](#integration-testing-goals) + +* [JDBC Testing Support](#integration-testing-support-jdbc) + +* [Annotations](#integration-testing-annotations) + +* [Spring TestContext Framework](#testcontext-framework) + +* [MockMvc](#spring-mvc-test-framework) + +### 3.1. Overview + +It is important to be able to perform some integration testing without requiring +deployment to your application server or connecting to other enterprise infrastructure. +Doing so lets you test things such as: + +* The correct wiring of your Spring IoC container contexts. + +* Data access using JDBC or an ORM tool. This can include such things as the correctness + of SQL statements, Hibernate queries, JPA entity mappings, and so forth. + +The Spring Framework provides first-class support for integration testing in the`spring-test` module. The name of the actual JAR file might include the release version +and might also be in the long `org.springframework.test` form, depending on where you get +it from (see the [section on Dependency Management](core.html#dependency-management)for an explanation). This library includes the `org.springframework.test` package, which +contains valuable classes for integration testing with a Spring container. This testing +does not rely on an application server or other deployment environment. Such tests are +slower to run than unit tests but much faster than the equivalent Selenium tests or +remote tests that rely on deployment to an application server. + +Unit and integration testing support is provided in the form of the annotation-driven[Spring TestContext Framework](#testcontext-framework). The TestContext framework is +agnostic of the actual testing framework in use, which allows instrumentation of tests +in various environments, including JUnit, TestNG, and others. + +### 3.2. Goals of Integration Testing + +Spring’s integration testing support has the following primary goals: + +* To manage [Spring IoC container caching](#testing-ctx-management) between tests. + +* To provide [Dependency Injection of test fixture instances](#testing-fixture-di). + +* To provide [transaction management](#testing-tx) appropriate to integration testing. + +* To supply [Spring-specific base classes](#testing-support-classes) that assist + developers in writing integration tests. + +The next few sections describe each goal and provide links to implementation and +configuration details. + +#### 3.2.1. Context Management and Caching + +The Spring TestContext Framework provides consistent loading of Spring`ApplicationContext` instances and `WebApplicationContext` instances as well as caching +of those contexts. Support for the caching of loaded contexts is important, because +startup time can become an issue — not because of the overhead of Spring itself, but +because the objects instantiated by the Spring container take time to instantiate. For +example, a project with 50 to 100 Hibernate mapping files might take 10 to 20 seconds to +load the mapping files, and incurring that cost before running every test in every test +fixture leads to slower overall test runs that reduce developer productivity. + +Test classes typically declare either an array of resource locations for XML or Groovy +configuration metadata — often in the classpath — or an array of component classes that +is used to configure the application. These locations or classes are the same as or +similar to those specified in `web.xml` or other configuration files for production +deployments. + +By default, once loaded, the configured `ApplicationContext` is reused for each test. +Thus, the setup cost is incurred only once per test suite, and subsequent test execution +is much faster. In this context, the term “test suite” means all tests run in the same +JVM — for example, all tests run from an Ant, Maven, or Gradle build for a given project +or module. In the unlikely case that a test corrupts the application context and requires +reloading (for example, by modifying a bean definition or the state of an application +object) the TestContext framework can be configured to reload the configuration and +rebuild the application context before executing the next test. + +See [Context Management](#testcontext-ctx-management) and [Context Caching](#testcontext-ctx-management-caching) with the +TestContext framework. + +#### 3.2.2. Dependency Injection of Test Fixtures + +When the TestContext framework loads your application context, it can optionally +configure instances of your test classes by using Dependency Injection. This provides a +convenient mechanism for setting up test fixtures by using preconfigured beans from your +application context. A strong benefit here is that you can reuse application contexts +across various testing scenarios (for example, for configuring Spring-managed object +graphs, transactional proxies, `DataSource` instances, and others), thus avoiding the +need to duplicate complex test fixture setup for individual test cases. + +As an example, consider a scenario where we have a class (`HibernateTitleRepository`) +that implements data access logic for a `Title` domain entity. We want to write +integration tests that test the following areas: + +* The Spring configuration: Basically, is everything related to the configuration of the`HibernateTitleRepository` bean correct and present? + +* The Hibernate mapping file configuration: Is everything mapped correctly and are the + correct lazy-loading settings in place? + +* The logic of the `HibernateTitleRepository`: Does the configured instance of this class + perform as anticipated? + +See dependency injection of test fixtures with the[TestContext framework](#testcontext-fixture-di). + +#### 3.2.3. Transaction Management + +One common issue in tests that access a real database is their effect on the state of the +persistence store. Even when you use a development database, changes to the state may +affect future tests. Also, many operations — such as inserting or modifying persistent +data — cannot be performed (or verified) outside of a transaction. + +The TestContext framework addresses this issue. By default, the framework creates and +rolls back a transaction for each test. You can write code that can assume the existence +of a transaction. If you call transactionally proxied objects in your tests, they behave +correctly, according to their configured transactional semantics. In addition, if a test +method deletes the contents of selected tables while running within the transaction +managed for the test, the transaction rolls back by default, and the database returns to +its state prior to execution of the test. Transactional support is provided to a test by +using a `PlatformTransactionManager` bean defined in the test’s application context. + +If you want a transaction to commit (unusual, but occasionally useful when you want a +particular test to populate or modify the database), you can tell the TestContext +framework to cause the transaction to commit instead of roll back by using the[`@Commit`](#integration-testing-annotations) annotation. + +See transaction management with the [TestContext framework](#testcontext-tx). + +#### 3.2.4. Support Classes for Integration Testing + +The Spring TestContext Framework provides several `abstract` support classes that +simplify the writing of integration tests. These base test classes provide well-defined +hooks into the testing framework as well as convenient instance variables and methods, +which let you access: + +* The `ApplicationContext`, for performing explicit bean lookups or testing the state of + the context as a whole. + +* A `JdbcTemplate`, for executing SQL statements to query the database. You can use such + queries to confirm database state both before and after execution of database-related + application code, and Spring ensures that such queries run in the scope of the same + transaction as the application code. When used in conjunction with an ORM tool, be sure + to avoid [false positives](#testcontext-tx-false-positives). + +In addition, you may want to create your own custom, application-wide superclass with +instance variables and methods specific to your project. + +See support classes for the [TestContext framework](#testcontext-support-classes). + +### 3.3. JDBC Testing Support + +The `org.springframework.test.jdbc` package contains `JdbcTestUtils`, which is a +collection of JDBC-related utility functions intended to simplify standard database +testing scenarios. Specifically, `JdbcTestUtils` provides the following static utility +methods. + +* `countRowsInTable(..)`: Counts the number of rows in the given table. + +* `countRowsInTableWhere(..)`: Counts the number of rows in the given table by using the + provided `WHERE` clause. + +* `deleteFromTables(..)`: Deletes all rows from the specified tables. + +* `deleteFromTableWhere(..)`: Deletes rows from the given table by using the provided`WHERE` clause. + +* `dropTables(..)`: Drops the specified tables. + +| |[`AbstractTransactionalJUnit4SpringContextTests`](#testcontext-support-classes-junit4)and [`AbstractTransactionalTestNGSpringContextTests`](#testcontext-support-classes-testng)provide convenience methods that delegate to the aforementioned methods in`JdbcTestUtils`.<br/><br/>The `spring-jdbc` module provides support for configuring and launching an embedded<br/>database, which you can use in integration tests that interact with a database.<br/>For details, see [Embedded Database<br/>Support](data-access.html#jdbc-embedded-database-support) and [Testing Data Access<br/>Logic with an Embedded Database](data-access.html#jdbc-embedded-database-dao-testing).| +|---|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +### 3.4. Annotations + +This section covers annotations that you can use when you test Spring applications. +It includes the following topics: + +* [Spring Testing Annotations](#integration-testing-annotations-spring) + +* [Standard Annotation Support](#integration-testing-annotations-standard) + +* [Spring JUnit 4 Testing Annotations](#integration-testing-annotations-junit4) + +* [Spring JUnit Jupiter Testing Annotations](#integration-testing-annotations-junit-jupiter) + +* [Meta-Annotation Support for Testing](#integration-testing-annotations-meta) + +#### 3.4.1. Spring Testing Annotations + +The Spring Framework provides the following set of Spring-specific annotations that you +can use in your unit and integration tests in conjunction with the TestContext framework. +See the corresponding javadoc for further information, including default attribute +values, attribute aliases, and other details. + +Spring’s testing annotations include the following: + +* [`@BootstrapWith`](#spring-testing-annotation-bootstrapwith) + +* [`@ContextConfiguration`](#spring-testing-annotation-contextconfiguration) + +* [`@WebAppConfiguration`](#spring-testing-annotation-webappconfiguration) + +* [`@ContextHierarchy`](#spring-testing-annotation-contexthierarchy) + +* [`@ActiveProfiles`](#spring-testing-annotation-activeprofiles) + +* [`@TestPropertySource`](#spring-testing-annotation-testpropertysource) + +* [`@DynamicPropertySource`](#spring-testing-annotation-dynamicpropertysource) + +* [`@DirtiesContext`](#spring-testing-annotation-dirtiescontext) + +* [`@TestExecutionListeners`](#spring-testing-annotation-testexecutionlisteners) + +* [`@RecordApplicationEvents`](#spring-testing-annotation-recordapplicationevents) + +* [`@Commit`](#spring-testing-annotation-commit) + +* [`@Rollback`](#spring-testing-annotation-rollback) + +* [`@BeforeTransaction`](#spring-testing-annotation-beforetransaction) + +* [`@AfterTransaction`](#spring-testing-annotation-aftertransaction) + +* [`@Sql`](#spring-testing-annotation-sql) + +* [`@SqlConfig`](#spring-testing-annotation-sqlconfig) + +* [`@SqlMergeMode`](#spring-testing-annotation-sqlmergemode) + +* [`@SqlGroup`](#spring-testing-annotation-sqlgroup) + +##### `@BootstrapWith` + +`@BootstrapWith` is a class-level annotation that you can use to configure how the Spring +TestContext Framework is bootstrapped. Specifically, you can use `@BootstrapWith` to +specify a custom `TestContextBootstrapper`. See the section on[bootstrapping the TestContext framework](#testcontext-bootstrapping) for further details. + +##### `@ContextConfiguration` + +`@ContextConfiguration` defines class-level metadata that is used to determine how to +load and configure an `ApplicationContext` for integration tests. Specifically,`@ContextConfiguration` declares the application context resource `locations` or the +component `classes` used to load the context. + +Resource locations are typically XML configuration files or Groovy scripts located in the +classpath, while component classes are typically `@Configuration` classes. However, +resource locations can also refer to files and scripts in the file system, and component +classes can be `@Component` classes, `@Service` classes, and so on. See[Component Classes](#testcontext-ctx-management-javaconfig-component-classes) for further details. + +The following example shows a `@ContextConfiguration` annotation that refers to an XML +file: + +Java + +``` +@ContextConfiguration("/test-config.xml") (1) +class XmlApplicationContextTests { + // class body... +} +``` + +|**1**|Referring to an XML file.| +|-----|-------------------------| + +Kotlin + +``` +@ContextConfiguration("/test-config.xml") (1) +class XmlApplicationContextTests { + // class body... +} +``` + +|**1**|Referring to an XML file.| +|-----|-------------------------| + +The following example shows a `@ContextConfiguration` annotation that refers to a class: + +Java + +``` +@ContextConfiguration(classes = TestConfig.class) (1) +class ConfigClassApplicationContextTests { + // class body... +} +``` + +|**1**|Referring to a class.| +|-----|---------------------| + +Kotlin + +``` +@ContextConfiguration(classes = [TestConfig::class]) (1) +class ConfigClassApplicationContextTests { + // class body... +} +``` + +|**1**|Referring to a class.| +|-----|---------------------| + +As an alternative or in addition to declaring resource locations or component classes, +you can use `@ContextConfiguration` to declare `ApplicationContextInitializer` classes. +The following example shows such a case: + +Java + +``` +@ContextConfiguration(initializers = CustomContextIntializer.class) (1) +class ContextInitializerTests { + // class body... +} +``` + +|**1**|Declaring an initializer class.| +|-----|-------------------------------| + +Kotlin + +``` +@ContextConfiguration(initializers = [CustomContextIntializer::class]) (1) +class ContextInitializerTests { + // class body... +} +``` + +|**1**|Declaring an initializer class.| +|-----|-------------------------------| + +You can optionally use `@ContextConfiguration` to declare the `ContextLoader` strategy as +well. Note, however, that you typically do not need to explicitly configure the loader, +since the default loader supports `initializers` and either resource `locations` or +component `classes`. + +The following example uses both a location and a loader: + +Java + +``` +@ContextConfiguration(locations = "/test-context.xml", loader = CustomContextLoader.class) (1) +class CustomLoaderXmlApplicationContextTests { + // class body... +} +``` + +|**1**|Configuring both a location and a custom loader.| +|-----|------------------------------------------------| + +Kotlin + +``` +@ContextConfiguration("/test-context.xml", loader = CustomContextLoader::class) (1) +class CustomLoaderXmlApplicationContextTests { + // class body... +} +``` + +|**1**|Configuring both a location and a custom loader.| +|-----|------------------------------------------------| + +| |`@ContextConfiguration` provides support for inheriting resource locations or<br/>configuration classes as well as context initializers that are declared by superclasses<br/>or enclosing classes.| +|---|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +See [Context Management](#testcontext-ctx-management),[`@Nested` test class configuration](#testcontext-junit-jupiter-nested-test-configuration), and the `@ContextConfiguration`javadocs for further details. + +##### `@WebAppConfiguration` + +`@WebAppConfiguration` is a class-level annotation that you can use to declare that the`ApplicationContext` loaded for an integration test should be a `WebApplicationContext`. +The mere presence of `@WebAppConfiguration` on a test class ensures that a`WebApplicationContext` is loaded for the test, using the default value of`"file:src/main/webapp"` for the path to the root of the web application (that is, the +resource base path). The resource base path is used behind the scenes to create a`MockServletContext`, which serves as the `ServletContext` for the test’s`WebApplicationContext`. + +The following example shows how to use the `@WebAppConfiguration` annotation: + +Java + +``` +@ContextConfiguration +@WebAppConfiguration (1) +class WebAppTests { + // class body... +} +``` + +Kotlin + +``` +@ContextConfiguration +@WebAppConfiguration (1) +class WebAppTests { + // class body... +} +``` + +|**1**|The `@WebAppConfiguration` annotation.| +|-----|--------------------------------------| + +To override the default, you can specify a different base resource path by using the +implicit `value` attribute. Both `classpath:` and `file:` resource prefixes are +supported. If no resource prefix is supplied, the path is assumed to be a file system +resource. The following example shows how to specify a classpath resource: + +Java + +``` +@ContextConfiguration +@WebAppConfiguration("classpath:test-web-resources") (1) +class WebAppTests { + // class body... +} +``` + +|**1**|Specifying a classpath resource.| +|-----|--------------------------------| + +Kotlin + +``` +@ContextConfiguration +@WebAppConfiguration("classpath:test-web-resources") (1) +class WebAppTests { + // class body... +} +``` + +|**1**|Specifying a classpath resource.| +|-----|--------------------------------| + +Note that `@WebAppConfiguration` must be used in conjunction with`@ContextConfiguration`, either within a single test class or within a test class +hierarchy. See the[`@WebAppConfiguration`](https://docs.spring.io/spring-framework/docs/5.3.16/javadoc-api/org/springframework/test/context/web/WebAppConfiguration.html)javadoc for further details. + +##### `@ContextHierarchy` + +`@ContextHierarchy` is a class-level annotation that is used to define a hierarchy of`ApplicationContext` instances for integration tests. `@ContextHierarchy` should be +declared with a list of one or more `@ContextConfiguration` instances, each of which +defines a level in the context hierarchy. The following examples demonstrate the use of`@ContextHierarchy` within a single test class (`@ContextHierarchy` can also be used +within a test class hierarchy): + +Java + +``` +@ContextHierarchy({ + @ContextConfiguration("/parent-config.xml"), + @ContextConfiguration("/child-config.xml") +}) +class ContextHierarchyTests { + // class body... +} +``` + +Kotlin + +``` +@ContextHierarchy( + ContextConfiguration("/parent-config.xml"), + ContextConfiguration("/child-config.xml")) +class ContextHierarchyTests { + // class body... +} +``` + +Java + +``` +@WebAppConfiguration +@ContextHierarchy({ + @ContextConfiguration(classes = AppConfig.class), + @ContextConfiguration(classes = WebConfig.class) +}) +class WebIntegrationTests { + // class body... +} +``` + +Kotlin + +``` +@WebAppConfiguration +@ContextHierarchy( + ContextConfiguration(classes = [AppConfig::class]), + ContextConfiguration(classes = [WebConfig::class])) +class WebIntegrationTests { + // class body... +} +``` + +If you need to merge or override the configuration for a given level of the context +hierarchy within a test class hierarchy, you must explicitly name that level by supplying +the same value to the `name` attribute in `@ContextConfiguration` at each corresponding +level in the class hierarchy. See [Context Hierarchies](#testcontext-ctx-management-ctx-hierarchies) and the[`@ContextHierarchy`](https://docs.spring.io/spring-framework/docs/5.3.16/javadoc-api/org/springframework/test/context/ContextHierarchy.html) javadoc +for further examples. + +##### `@ActiveProfiles` + +`@ActiveProfiles` is a class-level annotation that is used to declare which bean +definition profiles should be active when loading an `ApplicationContext` for an +integration test. + +The following example indicates that the `dev` profile should be active: + +Java + +``` +@ContextConfiguration +@ActiveProfiles("dev") (1) +class DeveloperTests { + // class body... +} +``` + +|**1**|Indicate that the `dev` profile should be active.| +|-----|-------------------------------------------------| + +Kotlin + +``` +@ContextConfiguration +@ActiveProfiles("dev") (1) +class DeveloperTests { + // class body... +} +``` + +|**1**|Indicate that the `dev` profile should be active.| +|-----|-------------------------------------------------| + +The following example indicates that both the `dev` and the `integration` profiles should +be active: + +Java + +``` +@ContextConfiguration +@ActiveProfiles({"dev", "integration"}) (1) +class DeveloperIntegrationTests { + // class body... +} +``` + +|**1**|Indicate that the `dev` and `integration` profiles should be active.| +|-----|--------------------------------------------------------------------| + +Kotlin + +``` +@ContextConfiguration +@ActiveProfiles(["dev", "integration"]) (1) +class DeveloperIntegrationTests { + // class body... +} +``` + +|**1**|Indicate that the `dev` and `integration` profiles should be active.| +|-----|--------------------------------------------------------------------| + +| |`@ActiveProfiles` provides support for inheriting active bean definition profiles<br/>declared by superclasses and enclosing classes by default. You can also resolve active<br/>bean definition profiles programmatically by implementing a custom[`ActiveProfilesResolver`](#testcontext-ctx-management-env-profiles-ActiveProfilesResolver)and registering it by using the `resolver` attribute of `@ActiveProfiles`.| +|---|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +See [Context Configuration with Environment Profiles](#testcontext-ctx-management-env-profiles),[`@Nested` test class configuration](#testcontext-junit-jupiter-nested-test-configuration), and the[`@ActiveProfiles`](https://docs.spring.io/spring-framework/docs/5.3.16/javadoc-api/org/springframework/test/context/ActiveProfiles.html) javadoc for +examples and further details. + +##### `@TestPropertySource` + +`@TestPropertySource` is a class-level annotation that you can use to configure the +locations of properties files and inlined properties to be added to the set of`PropertySources` in the `Environment` for an `ApplicationContext` loaded for an +integration test. + +The following example demonstrates how to declare a properties file from the classpath: + +Java + +``` +@ContextConfiguration +@TestPropertySource("/test.properties") (1) +class MyIntegrationTests { + // class body... +} +``` + +|**1**|Get properties from `test.properties` in the root of the classpath.| +|-----|-------------------------------------------------------------------| + +Kotlin + +``` +@ContextConfiguration +@TestPropertySource("/test.properties") (1) +class MyIntegrationTests { + // class body... +} +``` + +|**1**|Get properties from `test.properties` in the root of the classpath.| +|-----|-------------------------------------------------------------------| + +The following example demonstrates how to declare inlined properties: + +Java + +``` +@ContextConfiguration +@TestPropertySource(properties = { "timezone = GMT", "port: 4242" }) (1) +class MyIntegrationTests { + // class body... +} +``` + +|**1**|Declare `timezone` and `port` properties.| +|-----|-----------------------------------------| + +Kotlin + +``` +@ContextConfiguration +@TestPropertySource(properties = ["timezone = GMT", "port: 4242"]) (1) +class MyIntegrationTests { + // class body... +} +``` + +|**1**|Declare `timezone` and `port` properties.| +|-----|-----------------------------------------| + +See [Context Configuration with Test Property Sources](#testcontext-ctx-management-property-sources) for examples and further details. + +##### `@DynamicPropertySource` + +`@DynamicPropertySource` is a method-level annotation that you can use to register*dynamic* properties to be added to the set of `PropertySources` in the `Environment` for +an `ApplicationContext` loaded for an integration test. Dynamic properties are useful +when you do not know the value of the properties upfront – for example, if the properties +are managed by an external resource such as for a container managed by the[Testcontainers](https://www.testcontainers.org/) project. + +The following example demonstrates how to register a dynamic property: + +Java + +``` +@ContextConfiguration +class MyIntegrationTests { + + static MyExternalServer server = // ... + + @DynamicPropertySource (1) + static void dynamicProperties(DynamicPropertyRegistry registry) { (2) + registry.add("server.port", server::getPort); (3) + } + + // tests ... +} +``` + +|**1**| Annotate a `static` method with `@DynamicPropertySource`. | +|-----|---------------------------------------------------------------------------------| +|**2**| Accept a `DynamicPropertyRegistry` as an argument. | +|**3**|Register a dynamic `server.port` property to be retrieved lazily from the server.| + +Kotlin + +``` +@ContextConfiguration +class MyIntegrationTests { + + companion object { + + @JvmStatic + val server: MyExternalServer = // ... + + @DynamicPropertySource (1) + @JvmStatic + fun dynamicProperties(registry: DynamicPropertyRegistry) { (2) + registry.add("server.port", server::getPort) (3) + } + } + + // tests ... +} +``` + +|**1**| Annotate a `static` method with `@DynamicPropertySource`. | +|-----|---------------------------------------------------------------------------------| +|**2**| Accept a `DynamicPropertyRegistry` as an argument. | +|**3**|Register a dynamic `server.port` property to be retrieved lazily from the server.| + +See [Context Configuration with Dynamic Property Sources](#testcontext-ctx-management-dynamic-property-sources) for further details. + +##### `@DirtiesContext` + +`@DirtiesContext` indicates that the underlying Spring `ApplicationContext` has been +dirtied during the execution of a test (that is, the test modified or corrupted it in +some manner — for example, by changing the state of a singleton bean) and should be +closed. When an application context is marked as dirty, it is removed from the testing +framework’s cache and closed. As a consequence, the underlying Spring container is +rebuilt for any subsequent test that requires a context with the same configuration +metadata. + +You can use `@DirtiesContext` as both a class-level and a method-level annotation within +the same class or class hierarchy. In such scenarios, the `ApplicationContext` is marked +as dirty before or after any such annotated method as well as before or after the current +test class, depending on the configured `methodMode` and `classMode`. + +The following examples explain when the context would be dirtied for various +configuration scenarios: + +* Before the current test class, when declared on a class with class mode set to`BEFORE_CLASS`. + + Java + + ``` + @DirtiesContext(classMode = BEFORE_CLASS) (1) + class FreshContextTests { + // some tests that require a new Spring container + } + ``` + + |**1**|Dirty the context before the current test class.| + |-----|------------------------------------------------| + + Kotlin + + ``` + @DirtiesContext(classMode = BEFORE_CLASS) (1) + class FreshContextTests { + // some tests that require a new Spring container + } + ``` + + |**1**|Dirty the context before the current test class.| + |-----|------------------------------------------------| + +* After the current test class, when declared on a class with class mode set to`AFTER_CLASS` (i.e., the default class mode). + + Java + + ``` + @DirtiesContext (1) + class ContextDirtyingTests { + // some tests that result in the Spring container being dirtied + } + ``` + + |**1**|Dirty the context after the current test class.| + |-----|-----------------------------------------------| + + Kotlin + + ``` + @DirtiesContext (1) + class ContextDirtyingTests { + // some tests that result in the Spring container being dirtied + } + ``` + + |**1**|Dirty the context after the current test class.| + |-----|-----------------------------------------------| + +* Before each test method in the current test class, when declared on a class with class + mode set to `BEFORE_EACH_TEST_METHOD.` + + Java + + ``` + @DirtiesContext(classMode = BEFORE_EACH_TEST_METHOD) (1) + class FreshContextTests { + // some tests that require a new Spring container + } + ``` + + |**1**|Dirty the context before each test method.| + |-----|------------------------------------------| + + Kotlin + + ``` + @DirtiesContext(classMode = BEFORE_EACH_TEST_METHOD) (1) + class FreshContextTests { + // some tests that require a new Spring container + } + ``` + + |**1**|Dirty the context before each test method.| + |-----|------------------------------------------| + +* After each test method in the current test class, when declared on a class with class + mode set to `AFTER_EACH_TEST_METHOD.` + + Java + + ``` + @DirtiesContext(classMode = AFTER_EACH_TEST_METHOD) (1) + class ContextDirtyingTests { + // some tests that result in the Spring container being dirtied + } + ``` + + |**1**|Dirty the context after each test method.| + |-----|-----------------------------------------| + + Kotlin + + ``` + @DirtiesContext(classMode = AFTER_EACH_TEST_METHOD) (1) + class ContextDirtyingTests { + // some tests that result in the Spring container being dirtied + } + ``` + + |**1**|Dirty the context after each test method.| + |-----|-----------------------------------------| + +* Before the current test, when declared on a method with the method mode set to`BEFORE_METHOD`. + + Java + + ``` + @DirtiesContext(methodMode = BEFORE_METHOD) (1) + @Test + void testProcessWhichRequiresFreshAppCtx() { + // some logic that requires a new Spring container + } + ``` + + |**1**|Dirty the context before the current test method.| + |-----|-------------------------------------------------| + + Kotlin + + ``` + @DirtiesContext(methodMode = BEFORE_METHOD) (1) + @Test + fun testProcessWhichRequiresFreshAppCtx() { + // some logic that requires a new Spring container + } + ``` + + |**1**|Dirty the context before the current test method.| + |-----|-------------------------------------------------| + +* After the current test, when declared on a method with the method mode set to`AFTER_METHOD` (i.e., the default method mode). + + Java + + ``` + @DirtiesContext (1) + @Test + void testProcessWhichDirtiesAppCtx() { + // some logic that results in the Spring container being dirtied + } + ``` + + |**1**|Dirty the context after the current test method.| + |-----|------------------------------------------------| + + Kotlin + + ``` + @DirtiesContext (1) + @Test + fun testProcessWhichDirtiesAppCtx() { + // some logic that results in the Spring container being dirtied + } + ``` + + |**1**|Dirty the context after the current test method.| + |-----|------------------------------------------------| + +If you use `@DirtiesContext` in a test whose context is configured as part of a context +hierarchy with `@ContextHierarchy`, you can use the `hierarchyMode` flag to control how +the context cache is cleared. By default, an exhaustive algorithm is used to clear the +context cache, including not only the current level but also all other context +hierarchies that share an ancestor context common to the current test. All`ApplicationContext` instances that reside in a sub-hierarchy of the common ancestor +context are removed from the context cache and closed. If the exhaustive algorithm is +overkill for a particular use case, you can specify the simpler current level algorithm, +as the following example shows. + +Java + +``` +@ContextHierarchy({ + @ContextConfiguration("/parent-config.xml"), + @ContextConfiguration("/child-config.xml") +}) +class BaseTests { + // class body... +} + +class ExtendedTests extends BaseTests { + + @Test + @DirtiesContext(hierarchyMode = CURRENT_LEVEL) (1) + void test() { + // some logic that results in the child context being dirtied + } +} +``` + +|**1**|Use the current-level algorithm.| +|-----|--------------------------------| + +Kotlin + +``` +@ContextHierarchy( + ContextConfiguration("/parent-config.xml"), + ContextConfiguration("/child-config.xml")) +open class BaseTests { + // class body... +} + +class ExtendedTests : BaseTests() { + + @Test + @DirtiesContext(hierarchyMode = CURRENT_LEVEL) (1) + fun test() { + // some logic that results in the child context being dirtied + } +} +``` + +|**1**|Use the current-level algorithm.| +|-----|--------------------------------| + +For further details regarding the `EXHAUSTIVE` and `CURRENT_LEVEL` algorithms, see the[`DirtiesContext.HierarchyMode`](https://docs.spring.io/spring-framework/docs/5.3.16/javadoc-api/org/springframework/test/annotation/DirtiesContext.HierarchyMode.html)javadoc. + +##### `@TestExecutionListeners` + +`@TestExecutionListeners` defines class-level metadata for configuring the`TestExecutionListener` implementations that should be registered with the`TestContextManager`. Typically, `@TestExecutionListeners` is used in conjunction with`@ContextConfiguration`. + +The following example shows how to register two `TestExecutionListener` implementations: + +Java + +``` +@ContextConfiguration +@TestExecutionListeners({CustomTestExecutionListener.class, AnotherTestExecutionListener.class}) (1) +class CustomTestExecutionListenerTests { + // class body... +} +``` + +|**1**|Register two `TestExecutionListener` implementations.| +|-----|-----------------------------------------------------| + +Kotlin + +``` +@ContextConfiguration +@TestExecutionListeners(CustomTestExecutionListener::class, AnotherTestExecutionListener::class) (1) +class CustomTestExecutionListenerTests { + // class body... +} +``` + +|**1**|Register two `TestExecutionListener` implementations.| +|-----|-----------------------------------------------------| + +By default, `@TestExecutionListeners` provides support for inheriting listeners from +superclasses or enclosing classes. See[`@Nested` test class configuration](#testcontext-junit-jupiter-nested-test-configuration) and the[`@TestExecutionListeners`javadoc](https://docs.spring.io/spring-framework/docs/5.3.16/javadoc-api/org/springframework/test/context/TestExecutionListeners.html) for an example and further details. + +##### `@RecordApplicationEvents` + +`@RecordApplicationEvents` is a class-level annotation that is used to instruct the*Spring TestContext Framework* to record all application events that are published in the`ApplicationContext` during the execution of a single test. + +The recorded events can be accessed via the `ApplicationEvents` API within tests. + +See [Application Events](#testcontext-application-events) and the[`@RecordApplicationEvents`javadoc](https://docs.spring.io/spring-framework/docs/5.3.16/javadoc-api/org/springframework/test/context/event/RecordApplicationEvents.html) for an example and further details. + +##### `@Commit` + +`@Commit` indicates that the transaction for a transactional test method should be +committed after the test method has completed. You can use `@Commit` as a direct +replacement for `@Rollback(false)` to more explicitly convey the intent of the code. +Analogous to `@Rollback`, `@Commit` can also be declared as a class-level or method-level +annotation. + +The following example shows how to use the `@Commit` annotation: + +Java + +``` +@Commit (1) +@Test +void testProcessWithoutRollback() { + // ... +} +``` + +|**1**|Commit the result of the test to the database.| +|-----|----------------------------------------------| + +Kotlin + +``` +@Commit (1) +@Test +fun testProcessWithoutRollback() { + // ... +} +``` + +|**1**|Commit the result of the test to the database.| +|-----|----------------------------------------------| + +##### `@Rollback` + +`@Rollback` indicates whether the transaction for a transactional test method should be +rolled back after the test method has completed. If `true`, the transaction is rolled +back. Otherwise, the transaction is committed (see also[`@Commit`](#spring-testing-annotation-commit)). Rollback for integration tests in the Spring +TestContext Framework defaults to `true` even if `@Rollback` is not explicitly declared. + +When declared as a class-level annotation, `@Rollback` defines the default rollback +semantics for all test methods within the test class hierarchy. When declared as a +method-level annotation, `@Rollback` defines rollback semantics for the specific test +method, potentially overriding class-level `@Rollback` or `@Commit` semantics. + +The following example causes a test method’s result to not be rolled back (that is, the +result is committed to the database): + +Java + +``` +@Rollback(false) (1) +@Test +void testProcessWithoutRollback() { + // ... +} +``` + +|**1**|Do not roll back the result.| +|-----|----------------------------| + +Kotlin + +``` +@Rollback(false) (1) +@Test +fun testProcessWithoutRollback() { + // ... +} +``` + +|**1**|Do not roll back the result.| +|-----|----------------------------| + +##### `@BeforeTransaction` + +`@BeforeTransaction` indicates that the annotated `void` method should be run before a +transaction is started, for test methods that have been configured to run within a +transaction by using Spring’s `@Transactional` annotation. `@BeforeTransaction` methods +are not required to be `public` and may be declared on Java 8-based interface default +methods. + +The following example shows how to use the `@BeforeTransaction` annotation: + +Java + +``` +@BeforeTransaction (1) +void beforeTransaction() { + // logic to be run before a transaction is started +} +``` + +|**1**|Run this method before a transaction.| +|-----|-------------------------------------| + +Kotlin + +``` +@BeforeTransaction (1) +fun beforeTransaction() { + // logic to be run before a transaction is started +} +``` + +|**1**|Run this method before a transaction.| +|-----|-------------------------------------| + +##### `@AfterTransaction` + +`@AfterTransaction` indicates that the annotated `void` method should be run after a +transaction is ended, for test methods that have been configured to run within a +transaction by using Spring’s `@Transactional` annotation. `@AfterTransaction` methods +are not required to be `public` and may be declared on Java 8-based interface default +methods. + +Java + +``` +@AfterTransaction (1) +void afterTransaction() { + // logic to be run after a transaction has ended +} +``` + +|**1**|Run this method after a transaction.| +|-----|------------------------------------| + +Kotlin + +``` +@AfterTransaction (1) +fun afterTransaction() { + // logic to be run after a transaction has ended +} +``` + +|**1**|Run this method after a transaction.| +|-----|------------------------------------| + +##### `@Sql` + +`@Sql` is used to annotate a test class or test method to configure SQL scripts to be run +against a given database during integration tests. The following example shows how to use +it: + +Java + +``` +@Test +@Sql({"/test-schema.sql", "/test-user-data.sql"}) (1) +void userTest() { + // run code that relies on the test schema and test data +} +``` + +|**1**|Run two scripts for this test.| +|-----|------------------------------| + +Kotlin + +``` +@Test +@Sql("/test-schema.sql", "/test-user-data.sql") (1) +fun userTest() { + // run code that relies on the test schema and test data +} +``` + +|**1**|Run two scripts for this test.| +|-----|------------------------------| + +See [Executing SQL scripts declaratively with @Sql](#testcontext-executing-sql-declaratively) for further details. + +##### `@SqlConfig` + +`@SqlConfig` defines metadata that is used to determine how to parse and run SQL scripts +configured with the `@Sql` annotation. The following example shows how to use it: + +Java + +``` +@Test +@Sql( + scripts = "/test-user-data.sql", + config = @SqlConfig(commentPrefix = "`", separator = "@@") (1) +) +void userTest() { + // run code that relies on the test data +} +``` + +|**1**|Set the comment prefix and the separator in SQL scripts.| +|-----|--------------------------------------------------------| + +Kotlin + +``` +@Test +@Sql("/test-user-data.sql", config = SqlConfig(commentPrefix = "`", separator = "@@")) (1) +fun userTest() { + // run code that relies on the test data +} +``` + +|**1**|Set the comment prefix and the separator in SQL scripts.| +|-----|--------------------------------------------------------| + +##### `@SqlMergeMode` + +`@SqlMergeMode` is used to annotate a test class or test method to configure whether +method-level `@Sql` declarations are merged with class-level `@Sql` declarations. If`@SqlMergeMode` is not declared on a test class or test method, the `OVERRIDE` merge mode +will be used by default. With the `OVERRIDE` mode, method-level `@Sql` declarations will +effectively override class-level `@Sql` declarations. + +Note that a method-level `@SqlMergeMode` declaration overrides a class-level declaration. + +The following example shows how to use `@SqlMergeMode` at the class level. + +Java + +``` +@SpringJUnitConfig(TestConfig.class) +@Sql("/test-schema.sql") +@SqlMergeMode(MERGE) (1) +class UserTests { + + @Test + @Sql("/user-test-data-001.sql") + void standardUserProfile() { + // run code that relies on test data set 001 + } +} +``` + +|**1**|Set the `@Sql` merge mode to `MERGE` for all test methods in the class.| +|-----|-----------------------------------------------------------------------| + +Kotlin + +``` +@SpringJUnitConfig(TestConfig::class) +@Sql("/test-schema.sql") +@SqlMergeMode(MERGE) (1) +class UserTests { + + @Test + @Sql("/user-test-data-001.sql") + fun standardUserProfile() { + // run code that relies on test data set 001 + } +} +``` + +|**1**|Set the `@Sql` merge mode to `MERGE` for all test methods in the class.| +|-----|-----------------------------------------------------------------------| + +The following example shows how to use `@SqlMergeMode` at the method level. + +Java + +``` +@SpringJUnitConfig(TestConfig.class) +@Sql("/test-schema.sql") +class UserTests { + + @Test + @Sql("/user-test-data-001.sql") + @SqlMergeMode(MERGE) (1) + void standardUserProfile() { + // run code that relies on test data set 001 + } +} +``` + +|**1**|Set the `@Sql` merge mode to `MERGE` for a specific test method.| +|-----|----------------------------------------------------------------| + +Kotlin + +``` +@SpringJUnitConfig(TestConfig::class) +@Sql("/test-schema.sql") +class UserTests { + + @Test + @Sql("/user-test-data-001.sql") + @SqlMergeMode(MERGE) (1) + fun standardUserProfile() { + // run code that relies on test data set 001 + } +} +``` + +|**1**|Set the `@Sql` merge mode to `MERGE` for a specific test method.| +|-----|----------------------------------------------------------------| + +##### `@SqlGroup` + +`@SqlGroup` is a container annotation that aggregates several `@Sql` annotations. You can +use `@SqlGroup` natively to declare several nested `@Sql` annotations, or you can use it +in conjunction with Java 8’s support for repeatable annotations, where `@Sql` can be +declared several times on the same class or method, implicitly generating this container +annotation. The following example shows how to declare an SQL group: + +Java + +``` +@Test +@SqlGroup({ (1) + @Sql(scripts = "/test-schema.sql", config = @SqlConfig(commentPrefix = "`")), + @Sql("/test-user-data.sql") +)} +void userTest() { + // run code that uses the test schema and test data +} +``` + +|**1**|Declare a group of SQL scripts.| +|-----|-------------------------------| + +Kotlin + +``` +@Test +@SqlGroup( (1) + Sql("/test-schema.sql", config = SqlConfig(commentPrefix = "`")), + Sql("/test-user-data.sql")) +fun userTest() { + // run code that uses the test schema and test data +} +``` + +|**1**|Declare a group of SQL scripts.| +|-----|-------------------------------| + +#### 3.4.2. Standard Annotation Support + +The following annotations are supported with standard semantics for all configurations of +the Spring TestContext Framework. Note that these annotations are not specific to tests +and can be used anywhere in the Spring Framework. + +* `@Autowired` + +* `@Qualifier` + +* `@Value` + +* `@Resource` (javax.annotation) if JSR-250 is present + +* `@ManagedBean` (javax.annotation) if JSR-250 is present + +* `@Inject` (javax.inject) if JSR-330 is present + +* `@Named` (javax.inject) if JSR-330 is present + +* `@PersistenceContext` (javax.persistence) if JPA is present + +* `@PersistenceUnit` (javax.persistence) if JPA is present + +* `@Required` + +* `@Transactional` (org.springframework.transaction.annotation)*with [limited attribute support](#testcontext-tx-attribute-support)* + +| |JSR-250 Lifecycle Annotations<br/><br/>In the Spring TestContext Framework, you can use `@PostConstruct` and `@PreDestroy` with<br/>standard semantics on any application components configured in the `ApplicationContext`.<br/>However, these lifecycle annotations have limited usage within an actual test class.<br/><br/>If a method within a test class is annotated with `@PostConstruct`, that method runs<br/>before any before methods of the underlying test framework (for example, methods<br/>annotated with JUnit Jupiter’s `@BeforeEach`), and that applies for every test method in<br/>the test class. On the other hand, if a method within a test class is annotated with`@PreDestroy`, that method never runs. Therefore, within a test class, we recommend that<br/>you use test lifecycle callbacks from the underlying test framework instead of`@PostConstruct` and `@PreDestroy`.| +|---|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +#### 3.4.3. Spring JUnit 4 Testing Annotations + +The following annotations are supported only when used in conjunction with the[SpringRunner](#testcontext-junit4-runner), [Spring’s JUnit 4 +rules](#testcontext-junit4-rules), or [Spring’s JUnit 4 support classes](#testcontext-support-classes-junit4): + +* [`@IfProfileValue`](#integration-testing-annotations-junit4-ifprofilevalue) + +* [`@ProfileValueSourceConfiguration`](#integration-testing-annotations-junit4-profilevaluesourceconfiguration) + +* [`@Timed`](#integration-testing-annotations-junit4-timed) + +* [`@Repeat`](#integration-testing-annotations-junit4-repeat) + +##### `@IfProfileValue` + +`@IfProfileValue` indicates that the annotated test is enabled for a specific testing +environment. If the configured `ProfileValueSource` returns a matching `value` for the +provided `name`, the test is enabled. Otherwise, the test is disabled and, effectively, +ignored. + +You can apply `@IfProfileValue` at the class level, the method level, or both. +Class-level usage of `@IfProfileValue` takes precedence over method-level usage for any +methods within that class or its subclasses. Specifically, a test is enabled if it is +enabled both at the class level and at the method level. The absence of `@IfProfileValue`means the test is implicitly enabled. This is analogous to the semantics of JUnit 4’s`@Ignore` annotation, except that the presence of `@Ignore` always disables a test. + +The following example shows a test that has an `@IfProfileValue` annotation: + +Java + +``` +@IfProfileValue(name="java.vendor", value="Oracle Corporation") (1) +@Test +public void testProcessWhichRunsOnlyOnOracleJvm() { + // some logic that should run only on Java VMs from Oracle Corporation +} +``` + +|**1**|Run this test only when the Java vendor is "Oracle Corporation".| +|-----|----------------------------------------------------------------| + +Kotlin + +``` +@IfProfileValue(name="java.vendor", value="Oracle Corporation") (1) +@Test +fun testProcessWhichRunsOnlyOnOracleJvm() { + // some logic that should run only on Java VMs from Oracle Corporation +} +``` + +|**1**|Run this test only when the Java vendor is "Oracle Corporation".| +|-----|----------------------------------------------------------------| + +Alternatively, you can configure `@IfProfileValue` with a list of `values` (with `OR`semantics) to achieve TestNG-like support for test groups in a JUnit 4 environment. +Consider the following example: + +Java + +``` +@IfProfileValue(name="test-groups", values={"unit-tests", "integration-tests"}) (1) +@Test +public void testProcessWhichRunsForUnitOrIntegrationTestGroups() { + // some logic that should run only for unit and integration test groups +} +``` + +|**1**|Run this test for unit tests and integration tests.| +|-----|---------------------------------------------------| + +Kotlin + +``` +@IfProfileValue(name="test-groups", values=["unit-tests", "integration-tests"]) (1) +@Test +fun testProcessWhichRunsForUnitOrIntegrationTestGroups() { + // some logic that should run only for unit and integration test groups +} +``` + +|**1**|Run this test for unit tests and integration tests.| +|-----|---------------------------------------------------| + +##### `@ProfileValueSourceConfiguration` + +`@ProfileValueSourceConfiguration` is a class-level annotation that specifies what type +of `ProfileValueSource` to use when retrieving profile values configured through the`@IfProfileValue` annotation. If `@ProfileValueSourceConfiguration` is not declared for a +test, `SystemProfileValueSource` is used by default. The following example shows how to +use `@ProfileValueSourceConfiguration`: + +Java + +``` +@ProfileValueSourceConfiguration(CustomProfileValueSource.class) (1) +public class CustomProfileValueSourceTests { + // class body... +} +``` + +|**1**|Use a custom profile value source.| +|-----|----------------------------------| + +Kotlin + +``` +@ProfileValueSourceConfiguration(CustomProfileValueSource::class) (1) +class CustomProfileValueSourceTests { + // class body... +} +``` + +|**1**|Use a custom profile value source.| +|-----|----------------------------------| + +##### `@Timed` + +`@Timed` indicates that the annotated test method must finish execution in a specified +time period (in milliseconds). If the text execution time exceeds the specified time +period, the test fails. + +The time period includes running the test method itself, any repetitions of the test (see`@Repeat`), as well as any setting up or tearing down of the test fixture. The following +example shows how to use it: + +Java + +``` +@Timed(millis = 1000) (1) +public void testProcessWithOneSecondTimeout() { + // some logic that should not take longer than 1 second to run +} +``` + +|**1**|Set the time period for the test to one second.| +|-----|-----------------------------------------------| + +Kotlin + +``` +@Timed(millis = 1000) (1) +fun testProcessWithOneSecondTimeout() { + // some logic that should not take longer than 1 second to run +} +``` + +|**1**|Set the time period for the test to one second.| +|-----|-----------------------------------------------| + +Spring’s `@Timed` annotation has different semantics than JUnit 4’s `@Test(timeout=…​)`support. Specifically, due to the manner in which JUnit 4 handles test execution timeouts +(that is, by executing the test method in a separate `Thread`), `@Test(timeout=…​)`preemptively fails the test if the test takes too long. Spring’s `@Timed`, on the other +hand, does not preemptively fail the test but rather waits for the test to complete +before failing. + +##### `@Repeat` + +`@Repeat` indicates that the annotated test method must be run repeatedly. The number of +times that the test method is to be run is specified in the annotation. + +The scope of execution to be repeated includes execution of the test method itself as +well as any setting up or tearing down of the test fixture. When used with the[`SpringMethodRule`](#testcontext-junit4-rules), the scope additionally includes +preparation of the test instance by `TestExecutionListener` implementations. The +following example shows how to use the `@Repeat` annotation: + +Java + +``` +@Repeat(10) (1) +@Test +public void testProcessRepeatedly() { + // ... +} +``` + +|**1**|Repeat this test ten times.| +|-----|---------------------------| + +Kotlin + +``` +@Repeat(10) (1) +@Test +fun testProcessRepeatedly() { + // ... +} +``` + +|**1**|Repeat this test ten times.| +|-----|---------------------------| + +#### 3.4.4. Spring JUnit Jupiter Testing Annotations + +The following annotations are supported when used in conjunction with the[`SpringExtension`](#testcontext-junit-jupiter-extension) and JUnit Jupiter +(that is, the programming model in JUnit 5): + +* [`@SpringJUnitConfig`](#integration-testing-annotations-junit-jupiter-springjunitconfig) + +* [`@SpringJUnitWebConfig`](#integration-testing-annotations-junit-jupiter-springjunitwebconfig) + +* [`@TestConstructor`](#integration-testing-annotations-testconstructor) + +* [`@NestedTestConfiguration`](#integration-testing-annotations-nestedtestconfiguration) + +* [`@EnabledIf`](#integration-testing-annotations-junit-jupiter-enabledif) + +* [`@DisabledIf`](#integration-testing-annotations-junit-jupiter-disabledif) + +##### `@SpringJUnitConfig` + +`@SpringJUnitConfig` is a composed annotation that combines`@ExtendWith(SpringExtension.class)` from JUnit Jupiter with `@ContextConfiguration` from +the Spring TestContext Framework. It can be used at the class level as a drop-in +replacement for `@ContextConfiguration`. With regard to configuration options, the only +difference between `@ContextConfiguration` and `@SpringJUnitConfig` is that component +classes may be declared with the `value` attribute in `@SpringJUnitConfig`. + +The following example shows how to use the `@SpringJUnitConfig` annotation to specify a +configuration class: + +Java + +``` +@SpringJUnitConfig(TestConfig.class) (1) +class ConfigurationClassJUnitJupiterSpringTests { + // class body... +} +``` + +|**1**|Specify the configuration class.| +|-----|--------------------------------| + +Kotlin + +``` +@SpringJUnitConfig(TestConfig::class) (1) +class ConfigurationClassJUnitJupiterSpringTests { + // class body... +} +``` + +|**1**|Specify the configuration class.| +|-----|--------------------------------| + +The following example shows how to use the `@SpringJUnitConfig` annotation to specify the +location of a configuration file: + +Java + +``` +@SpringJUnitConfig(locations = "/test-config.xml") (1) +class XmlJUnitJupiterSpringTests { + // class body... +} +``` + +|**1**|Specify the location of a configuration file.| +|-----|---------------------------------------------| + +Kotlin + +``` +@SpringJUnitConfig(locations = ["/test-config.xml"]) (1) +class XmlJUnitJupiterSpringTests { + // class body... +} +``` + +|**1**|Specify the location of a configuration file.| +|-----|---------------------------------------------| + +See [Context Management](#testcontext-ctx-management) as well as the javadoc for[`@SpringJUnitConfig`](https://docs.spring.io/spring-framework/docs/5.3.16/javadoc-api/org/springframework/test/context/junit/jupiter/SpringJUnitConfig.html)and `@ContextConfiguration` for further details. + +##### `@SpringJUnitWebConfig` + +`@SpringJUnitWebConfig` is a composed annotation that combines`@ExtendWith(SpringExtension.class)` from JUnit Jupiter with `@ContextConfiguration` and`@WebAppConfiguration` from the Spring TestContext Framework. You can use it at the class +level as a drop-in replacement for `@ContextConfiguration` and `@WebAppConfiguration`. +With regard to configuration options, the only difference between `@ContextConfiguration`and `@SpringJUnitWebConfig` is that you can declare component classes by using the`value` attribute in `@SpringJUnitWebConfig`. In addition, you can override the `value`attribute from `@WebAppConfiguration` only by using the `resourcePath` attribute in`@SpringJUnitWebConfig`. + +The following example shows how to use the `@SpringJUnitWebConfig` annotation to specify +a configuration class: + +Java + +``` +@SpringJUnitWebConfig(TestConfig.class) (1) +class ConfigurationClassJUnitJupiterSpringWebTests { + // class body... +} +``` + +|**1**|Specify the configuration class.| +|-----|--------------------------------| + +Kotlin + +``` +@SpringJUnitWebConfig(TestConfig::class) (1) +class ConfigurationClassJUnitJupiterSpringWebTests { + // class body... +} +``` + +|**1**|Specify the configuration class.| +|-----|--------------------------------| + +The following example shows how to use the `@SpringJUnitWebConfig` annotation to specify the +location of a configuration file: + +Java + +``` +@SpringJUnitWebConfig(locations = "/test-config.xml") (1) +class XmlJUnitJupiterSpringWebTests { + // class body... +} +``` + +|**1**|Specify the location of a configuration file.| +|-----|---------------------------------------------| + +Kotlin + +``` +@SpringJUnitWebConfig(locations = ["/test-config.xml"]) (1) +class XmlJUnitJupiterSpringWebTests { + // class body... +} +``` + +|**1**|Specify the location of a configuration file.| +|-----|---------------------------------------------| + +See [Context Management](#testcontext-ctx-management) as well as the javadoc for[`@SpringJUnitWebConfig`](https://docs.spring.io/spring-framework/docs/5.3.16/javadoc-api/org/springframework/test/context/junit/jupiter/web/SpringJUnitWebConfig.html),[`@ContextConfiguration`](https://docs.spring.io/spring-framework/docs/5.3.16/javadoc-api/org/springframework/test/context/ContextConfiguration.html), and[`@WebAppConfiguration`](https://docs.spring.io/spring-framework/docs/5.3.16/javadoc-api/org/springframework/test/context/web/WebAppConfiguration.html)for further details. + +##### `@TestConstructor` + +`@TestConstructor` is a type-level annotation that is used to configure how the parameters +of a test class constructor are autowired from components in the test’s`ApplicationContext`. + +If `@TestConstructor` is not present or meta-present on a test class, the default *test +constructor autowire mode* will be used. See the tip below for details on how to change +the default mode. Note, however, that a local declaration of `@Autowired` on a +constructor takes precedence over both `@TestConstructor` and the default mode. + +| |Changing the default test constructor autowire mode<br/><br/>The default *test constructor autowire mode* can be changed by setting the`spring.test.constructor.autowire.mode` JVM system property to `all`. Alternatively, the<br/>default mode may be set via the[`SpringProperties`](appendix.html#appendix-spring-properties) mechanism.<br/><br/>As of Spring Framework 5.3, the default mode may also be configured as a[JUnit Platform configuration parameter](https://junit.org/junit5/docs/current/user-guide/#running-tests-config-params).<br/><br/>If the `spring.test.constructor.autowire.mode` property is not set, test class<br/>constructors will not be automatically autowired.| +|---|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +| |As of Spring Framework 5.2, `@TestConstructor` is only supported in conjunction<br/>with the `SpringExtension` for use with JUnit Jupiter. Note that the `SpringExtension` is<br/>often automatically registered for you – for example, when using annotations such as`@SpringJUnitConfig` and `@SpringJUnitWebConfig` or various test-related annotations from<br/>Spring Boot Test.| +|---|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +##### `@NestedTestConfiguration` + +`@NestedTestConfiguration` is a type-level annotation that is used to configure how +Spring test configuration annotations are processed within enclosing class hierarchies +for inner test classes. + +If `@NestedTestConfiguration` is not present or meta-present on a test class, in its +super type hierarchy, or in its enclosing class hierarchy, the default *enclosing +configuration inheritance mode* will be used. See the tip below for details on how to +change the default mode. + +| |Changing the default enclosing configuration inheritance mode<br/><br/>The default *enclosing configuration inheritance mode* is `INHERIT`, but it can be<br/>changed by setting the `spring.test.enclosing.configuration` JVM system property to`OVERRIDE`. Alternatively, the default mode may be set via the[`SpringProperties`](appendix.html#appendix-spring-properties) mechanism.| +|---|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +The [Spring TestContext Framework](#testcontext-framework) honors `@NestedTestConfiguration` semantics for the +following annotations. + +* [`@BootstrapWith`](#spring-testing-annotation-bootstrapwith) + +* [`@ContextConfiguration`](#spring-testing-annotation-contextconfiguration) + +* [`@WebAppConfiguration`](#spring-testing-annotation-webappconfiguration) + +* [`@ContextHierarchy`](#spring-testing-annotation-contexthierarchy) + +* [`@ActiveProfiles`](#spring-testing-annotation-activeprofiles) + +* [`@TestPropertySource`](#spring-testing-annotation-testpropertysource) + +* [`@DynamicPropertySource`](#spring-testing-annotation-dynamicpropertysource) + +* [`@DirtiesContext`](#spring-testing-annotation-dirtiescontext) + +* [`@TestExecutionListeners`](#spring-testing-annotation-testexecutionlisteners) + +* [`@RecordApplicationEvents`](#spring-testing-annotation-recordapplicationevents) + +* [`@Transactional`](#testcontext-tx) + +* [`@Commit`](#spring-testing-annotation-commit) + +* [`@Rollback`](#spring-testing-annotation-rollback) + +* [`@Sql`](#spring-testing-annotation-sql) + +* [`@SqlConfig`](#spring-testing-annotation-sqlconfig) + +* [`@SqlMergeMode`](#spring-testing-annotation-sqlmergemode) + +* [`@TestConstructor`](#integration-testing-annotations-testconstructor) + +| |The use of `@NestedTestConfiguration` typically only makes sense in conjunction<br/>with `@Nested` test classes in JUnit Jupiter; however, there may be other testing<br/>frameworks with support for Spring and nested test classes that make use of this<br/>annotation.| +|---|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +See [`@Nested` test class configuration](#testcontext-junit-jupiter-nested-test-configuration) for an example and further +details. + +##### `@EnabledIf` + +`@EnabledIf` is used to signal that the annotated JUnit Jupiter test class or test method +is enabled and should be run if the supplied `expression` evaluates to `true`. +Specifically, if the expression evaluates to `Boolean.TRUE` or a `String` equal to `true`(ignoring case), the test is enabled. When applied at the class level, all test methods +within that class are automatically enabled by default as well. + +Expressions can be any of the following: + +* [Spring Expression Language](core.html#expressions) (SpEL) expression. For example:`@EnabledIf("#{systemProperties['os.name'].toLowerCase().contains('mac')}")` + +* Placeholder for a property available in the Spring [`Environment`](core.html#beans-environment). + For example: `@EnabledIf("${smoke.tests.enabled}")` + +* Text literal. For example: `@EnabledIf("true")` + +Note, however, that a text literal that is not the result of dynamic resolution of a +property placeholder is of zero practical value, since `@EnabledIf("false")` is +equivalent to `@Disabled` and `@EnabledIf("true")` is logically meaningless. + +You can use `@EnabledIf` as a meta-annotation to create custom composed annotations. For +example, you can create a custom `@EnabledOnMac` annotation as follows: + +Java + +``` +@Target({ElementType.TYPE, ElementType.METHOD}) +@Retention(RetentionPolicy.RUNTIME) +@EnabledIf( + expression = "#{systemProperties['os.name'].toLowerCase().contains('mac')}", + reason = "Enabled on Mac OS" +) +public @interface EnabledOnMac {} +``` + +Kotlin + +``` +@Target(AnnotationTarget.TYPE, AnnotationTarget.FUNCTION) +@Retention(AnnotationRetention.RUNTIME) +@EnabledIf( + expression = "#{systemProperties['os.name'].toLowerCase().contains('mac')}", + reason = "Enabled on Mac OS" +) +annotation class EnabledOnMac {} +``` + +##### `@DisabledIf` + +`@DisabledIf` is used to signal that the annotated JUnit Jupiter test class or test +method is disabled and should not be run if the supplied `expression` evaluates to`true`. Specifically, if the expression evaluates to `Boolean.TRUE` or a `String` equal +to `true` (ignoring case), the test is disabled. When applied at the class level, all +test methods within that class are automatically disabled as well. + +Expressions can be any of the following: + +* [Spring Expression Language](core.html#expressions) (SpEL) expression. For example:`@DisabledIf("#{systemProperties['os.name'].toLowerCase().contains('mac')}")` + +* Placeholder for a property available in the Spring [`Environment`](core.html#beans-environment). + For example: `@DisabledIf("${smoke.tests.disabled}")` + +* Text literal. For example: `@DisabledIf("true")` + +Note, however, that a text literal that is not the result of dynamic resolution of a +property placeholder is of zero practical value, since `@DisabledIf("true")` is +equivalent to `@Disabled` and `@DisabledIf("false")` is logically meaningless. + +You can use `@DisabledIf` as a meta-annotation to create custom composed annotations. For +example, you can create a custom `@DisabledOnMac` annotation as follows: + +Java + +``` +@Target({ElementType.TYPE, ElementType.METHOD}) +@Retention(RetentionPolicy.RUNTIME) +@DisabledIf( + expression = "#{systemProperties['os.name'].toLowerCase().contains('mac')}", + reason = "Disabled on Mac OS" +) +public @interface DisabledOnMac {} +``` + +Kotlin + +``` +@Target(AnnotationTarget.TYPE, AnnotationTarget.FUNCTION) +@Retention(AnnotationRetention.RUNTIME) +@DisabledIf( + expression = "#{systemProperties['os.name'].toLowerCase().contains('mac')}", + reason = "Disabled on Mac OS" +) +annotation class DisabledOnMac {} +``` + +#### 3.4.5. Meta-Annotation Support for Testing + +You can use most test-related annotations as[meta-annotations](core.html#beans-meta-annotations) to create custom composed +annotations and reduce configuration duplication across a test suite. + +You can use each of the following as a meta-annotation in conjunction with the[TestContext framework](#testcontext-framework). + +* `@BootstrapWith` + +* `@ContextConfiguration` + +* `@ContextHierarchy` + +* `@ActiveProfiles` + +* `@TestPropertySource` + +* `@DirtiesContext` + +* `@WebAppConfiguration` + +* `@TestExecutionListeners` + +* `@Transactional` + +* `@BeforeTransaction` + +* `@AfterTransaction` + +* `@Commit` + +* `@Rollback` + +* `@Sql` + +* `@SqlConfig` + +* `@SqlMergeMode` + +* `@SqlGroup` + +* `@Repeat` *(only supported on JUnit 4)* + +* `@Timed` *(only supported on JUnit 4)* + +* `@IfProfileValue` *(only supported on JUnit 4)* + +* `@ProfileValueSourceConfiguration` *(only supported on JUnit 4)* + +* `@SpringJUnitConfig` *(only supported on JUnit Jupiter)* + +* `@SpringJUnitWebConfig` *(only supported on JUnit Jupiter)* + +* `@TestConstructor` *(only supported on JUnit Jupiter)* + +* `@NestedTestConfiguration` *(only supported on JUnit Jupiter)* + +* `@EnabledIf` *(only supported on JUnit Jupiter)* + +* `@DisabledIf` *(only supported on JUnit Jupiter)* + +Consider the following example: + +Java + +``` +@RunWith(SpringRunner.class) +@ContextConfiguration({"/app-config.xml", "/test-data-access-config.xml"}) +@ActiveProfiles("dev") +@Transactional +public class OrderRepositoryTests { } + +@RunWith(SpringRunner.class) +@ContextConfiguration({"/app-config.xml", "/test-data-access-config.xml"}) +@ActiveProfiles("dev") +@Transactional +public class UserRepositoryTests { } +``` + +Kotlin + +``` +@RunWith(SpringRunner::class) +@ContextConfiguration("/app-config.xml", "/test-data-access-config.xml") +@ActiveProfiles("dev") +@Transactional +class OrderRepositoryTests { } + +@RunWith(SpringRunner::class) +@ContextConfiguration("/app-config.xml", "/test-data-access-config.xml") +@ActiveProfiles("dev") +@Transactional +class UserRepositoryTests { } +``` + +If we discover that we are repeating the preceding configuration across our JUnit 4-based +test suite, we can reduce the duplication by introducing a custom composed annotation +that centralizes the common test configuration for Spring, as follows: + +Java + +``` +@Target(ElementType.TYPE) +@Retention(RetentionPolicy.RUNTIME) +@ContextConfiguration({"/app-config.xml", "/test-data-access-config.xml"}) +@ActiveProfiles("dev") +@Transactional +public @interface TransactionalDevTestConfig { } +``` + +Kotlin + +``` +@Target(AnnotationTarget.TYPE) +@Retention(AnnotationRetention.RUNTIME) +@ContextConfiguration("/app-config.xml", "/test-data-access-config.xml") +@ActiveProfiles("dev") +@Transactional +annotation class TransactionalDevTestConfig { } +``` + +Then we can use our custom `@TransactionalDevTestConfig` annotation to simplify the +configuration of individual JUnit 4 based test classes, as follows: + +Java + +``` +@RunWith(SpringRunner.class) +@TransactionalDevTestConfig +public class OrderRepositoryTests { } + +@RunWith(SpringRunner.class) +@TransactionalDevTestConfig +public class UserRepositoryTests { } +``` + +Kotlin + +``` +@RunWith(SpringRunner::class) +@TransactionalDevTestConfig +class OrderRepositoryTests + +@RunWith(SpringRunner::class) +@TransactionalDevTestConfig +class UserRepositoryTests +``` + +If we write tests that use JUnit Jupiter, we can reduce code duplication even further, +since annotations in JUnit 5 can also be used as meta-annotations. Consider the following +example: + +Java + +``` +@ExtendWith(SpringExtension.class) +@ContextConfiguration({"/app-config.xml", "/test-data-access-config.xml"}) +@ActiveProfiles("dev") +@Transactional +class OrderRepositoryTests { } + +@ExtendWith(SpringExtension.class) +@ContextConfiguration({"/app-config.xml", "/test-data-access-config.xml"}) +@ActiveProfiles("dev") +@Transactional +class UserRepositoryTests { } +``` + +Kotlin + +``` +@ExtendWith(SpringExtension::class) +@ContextConfiguration("/app-config.xml", "/test-data-access-config.xml") +@ActiveProfiles("dev") +@Transactional +class OrderRepositoryTests { } + +@ExtendWith(SpringExtension::class) +@ContextConfiguration("/app-config.xml", "/test-data-access-config.xml") +@ActiveProfiles("dev") +@Transactional +class UserRepositoryTests { } +``` + +If we discover that we are repeating the preceding configuration across our JUnit +Jupiter-based test suite, we can reduce the duplication by introducing a custom composed +annotation that centralizes the common test configuration for Spring and JUnit Jupiter, +as follows: + +Java + +``` +@Target(ElementType.TYPE) +@Retention(RetentionPolicy.RUNTIME) +@ExtendWith(SpringExtension.class) +@ContextConfiguration({"/app-config.xml", "/test-data-access-config.xml"}) +@ActiveProfiles("dev") +@Transactional +public @interface TransactionalDevTestConfig { } +``` + +Kotlin + +``` +@Target(AnnotationTarget.TYPE) +@Retention(AnnotationRetention.RUNTIME) +@ExtendWith(SpringExtension::class) +@ContextConfiguration("/app-config.xml", "/test-data-access-config.xml") +@ActiveProfiles("dev") +@Transactional +annotation class TransactionalDevTestConfig { } +``` + +Then we can use our custom `@TransactionalDevTestConfig` annotation to simplify the +configuration of individual JUnit Jupiter based test classes, as follows: + +Java + +``` +@TransactionalDevTestConfig +class OrderRepositoryTests { } + +@TransactionalDevTestConfig +class UserRepositoryTests { } +``` + +Kotlin + +``` +@TransactionalDevTestConfig +class OrderRepositoryTests { } + +@TransactionalDevTestConfig +class UserRepositoryTests { } +``` + +Since JUnit Jupiter supports the use of `@Test`, `@RepeatedTest`, `ParameterizedTest`, +and others as meta-annotations, you can also create custom composed annotations at the +test method level. For example, if we wish to create a composed annotation that combines +the `@Test` and `@Tag` annotations from JUnit Jupiter with the `@Transactional`annotation from Spring, we could create an `@TransactionalIntegrationTest` annotation, as +follows: + +Java + +``` +@Target(ElementType.METHOD) +@Retention(RetentionPolicy.RUNTIME) +@Transactional +@Tag("integration-test") // org.junit.jupiter.api.Tag +@Test // org.junit.jupiter.api.Test +public @interface TransactionalIntegrationTest { } +``` + +Kotlin + +``` +@Target(AnnotationTarget.TYPE) +@Retention(AnnotationRetention.RUNTIME) +@Transactional +@Tag("integration-test") // org.junit.jupiter.api.Tag +@Test // org.junit.jupiter.api.Test +annotation class TransactionalIntegrationTest { } +``` + +Then we can use our custom `@TransactionalIntegrationTest` annotation to simplify the +configuration of individual JUnit Jupiter based test methods, as follows: + +Java + +``` +@TransactionalIntegrationTest +void saveOrder() { } + +@TransactionalIntegrationTest +void deleteOrder() { } +``` + +Kotlin + +``` +@TransactionalIntegrationTest +fun saveOrder() { } + +@TransactionalIntegrationTest +fun deleteOrder() { } +``` + +For further details, see the[Spring Annotation Programming Model](https://github.com/spring-projects/spring-framework/wiki/Spring-Annotation-Programming-Model)wiki page. + +### 3.5. Spring TestContext Framework + +The Spring TestContext Framework (located in the `org.springframework.test.context`package) provides generic, annotation-driven unit and integration testing support that is +agnostic of the testing framework in use. The TestContext framework also places a great +deal of importance on convention over configuration, with reasonable defaults that you +can override through annotation-based configuration. + +In addition to generic testing infrastructure, the TestContext framework provides +explicit support for JUnit 4, JUnit Jupiter (AKA JUnit 5), and TestNG. For JUnit 4 and +TestNG, Spring provides `abstract` support classes. Furthermore, Spring provides a custom +JUnit `Runner` and custom JUnit `Rules` for JUnit 4 and a custom `Extension` for JUnit +Jupiter that let you write so-called POJO test classes. POJO test classes are not +required to extend a particular class hierarchy, such as the `abstract` support classes. + +The following section provides an overview of the internals of the TestContext framework. +If you are interested only in using the framework and are not interested in extending it +with your own custom listeners or custom loaders, feel free to go directly to the +configuration ([context management](#testcontext-ctx-management),[dependency injection](#testcontext-fixture-di), [transaction +management](#testcontext-tx)), [support classes](#testcontext-support-classes), and[annotation support](#integration-testing-annotations) sections. + +#### 3.5.1. Key Abstractions + +The core of the framework consists of the `TestContextManager` class and the`TestContext`, `TestExecutionListener`, and `SmartContextLoader` interfaces. A`TestContextManager` is created for each test class (for example, for the execution of +all test methods within a single test class in JUnit Jupiter). The `TestContextManager`, +in turn, manages a `TestContext` that holds the context of the current test. The`TestContextManager` also updates the state of the `TestContext` as the test progresses +and delegates to `TestExecutionListener` implementations, which instrument the actual +test execution by providing dependency injection, managing transactions, and so on. A`SmartContextLoader` is responsible for loading an `ApplicationContext` for a given test +class. See the [javadoc](https://docs.spring.io/spring-framework/docs/5.3.16/javadoc-api/org/springframework/test/context/package-summary.html) and the +Spring test suite for further information and examples of various implementations. + +##### `TestContext` + +`TestContext` encapsulates the context in which a test is run (agnostic of the +actual testing framework in use) and provides context management and caching support for +the test instance for which it is responsible. The `TestContext` also delegates to a`SmartContextLoader` to load an `ApplicationContext` if requested. + +##### `TestContextManager` + +`TestContextManager` is the main entry point into the Spring TestContext Framework and is +responsible for managing a single `TestContext` and signaling events to each registered`TestExecutionListener` at well-defined test execution points: + +* Prior to any “before class” or “before all” methods of a particular testing framework. + +* Test instance post-processing. + +* Prior to any “before” or “before each” methods of a particular testing framework. + +* Immediately before execution of the test method but after test setup. + +* Immediately after execution of the test method but before test tear down. + +* After any “after” or “after each” methods of a particular testing framework. + +* After any “after class” or “after all” methods of a particular testing framework. + +##### `TestExecutionListener` + +`TestExecutionListener` defines the API for reacting to test-execution events published by +the `TestContextManager` with which the listener is registered. See [`TestExecutionListener` Configuration](#testcontext-tel-config). + +##### Context Loaders + +`ContextLoader` is a strategy interface for loading an `ApplicationContext` for an +integration test managed by the Spring TestContext Framework. You should implement`SmartContextLoader` instead of this interface to provide support for component classes, +active bean definition profiles, test property sources, context hierarchies, and`WebApplicationContext` support. + +`SmartContextLoader` is an extension of the `ContextLoader` interface that supersedes the +original minimal `ContextLoader` SPI. Specifically, a `SmartContextLoader` can choose to +process resource locations, component classes, or context initializers. Furthermore, a`SmartContextLoader` can set active bean definition profiles and test property sources in +the context that it loads. + +Spring provides the following implementations: + +* `DelegatingSmartContextLoader`: One of two default loaders, it delegates internally to + an `AnnotationConfigContextLoader`, a `GenericXmlContextLoader`, or a`GenericGroovyXmlContextLoader`, depending either on the configuration declared for the + test class or on the presence of default locations or default configuration classes. + Groovy support is enabled only if Groovy is on the classpath. + +* `WebDelegatingSmartContextLoader`: One of two default loaders, it delegates internally + to an `AnnotationConfigWebContextLoader`, a `GenericXmlWebContextLoader`, or a`GenericGroovyXmlWebContextLoader`, depending either on the configuration declared for + the test class or on the presence of default locations or default configuration + classes. A web `ContextLoader` is used only if `@WebAppConfiguration` is present on the + test class. Groovy support is enabled only if Groovy is on the classpath. + +* `AnnotationConfigContextLoader`: Loads a standard `ApplicationContext` from component + classes. + +* `AnnotationConfigWebContextLoader`: Loads a `WebApplicationContext` from component + classes. + +* `GenericGroovyXmlContextLoader`: Loads a standard `ApplicationContext` from resource + locations that are either Groovy scripts or XML configuration files. + +* `GenericGroovyXmlWebContextLoader`: Loads a `WebApplicationContext` from resource + locations that are either Groovy scripts or XML configuration files. + +* `GenericXmlContextLoader`: Loads a standard `ApplicationContext` from XML resource + locations. + +* `GenericXmlWebContextLoader`: Loads a `WebApplicationContext` from XML resource + locations. + +#### 3.5.2. Bootstrapping the TestContext Framework + +The default configuration for the internals of the Spring TestContext Framework is +sufficient for all common use cases. However, there are times when a development team or +third party framework would like to change the default `ContextLoader`, implement a +custom `TestContext` or `ContextCache`, augment the default sets of`ContextCustomizerFactory` and `TestExecutionListener` implementations, and so on. For +such low-level control over how the TestContext framework operates, Spring provides a +bootstrapping strategy. + +`TestContextBootstrapper` defines the SPI for bootstrapping the TestContext framework. A`TestContextBootstrapper` is used by the `TestContextManager` to load the`TestExecutionListener` implementations for the current test and to build the`TestContext` that it manages. You can configure a custom bootstrapping strategy for a +test class (or test class hierarchy) by using `@BootstrapWith`, either directly or as a +meta-annotation. If a bootstrapper is not explicitly configured by using`@BootstrapWith`, either the `DefaultTestContextBootstrapper` or the`WebTestContextBootstrapper` is used, depending on the presence of `@WebAppConfiguration`. + +Since the `TestContextBootstrapper` SPI is likely to change in the future (to accommodate +new requirements), we strongly encourage implementers not to implement this interface +directly but rather to extend `AbstractTestContextBootstrapper` or one of its concrete +subclasses instead. + +#### 3.5.3. `TestExecutionListener` Configuration + +Spring provides the following `TestExecutionListener` implementations that are registered +by default, exactly in the following order: + +* `ServletTestExecutionListener`: Configures Servlet API mocks for a`WebApplicationContext`. + +* `DirtiesContextBeforeModesTestExecutionListener`: Handles the `@DirtiesContext`annotation for “before” modes. + +* `ApplicationEventsTestExecutionListener`: Provides support for[`ApplicationEvents`](#testcontext-application-events). + +* `DependencyInjectionTestExecutionListener`: Provides dependency injection for the test + instance. + +* `DirtiesContextTestExecutionListener`: Handles the `@DirtiesContext` annotation for + “after” modes. + +* `TransactionalTestExecutionListener`: Provides transactional test execution with + default rollback semantics. + +* `SqlScriptsTestExecutionListener`: Runs SQL scripts configured by using the `@Sql`annotation. + +* `EventPublishingTestExecutionListener`: Publishes test execution events to the test’s`ApplicationContext` (see [Test Execution Events](#testcontext-test-execution-events)). + +##### Registering `TestExecutionListener` Implementations + +You can register `TestExecutionListener` implementations for a test class and its +subclasses by using the `@TestExecutionListeners` annotation. See[annotation support](#integration-testing-annotations) and the javadoc for[`@TestExecutionListeners`](https://docs.spring.io/spring-framework/docs/5.3.16/javadoc-api/org/springframework/test/context/TestExecutionListeners.html)for details and examples. + +##### Automatic Discovery of Default `TestExecutionListener` Implementations + +Registering `TestExecutionListener` implementations by using `@TestExecutionListeners` is +suitable for custom listeners that are used in limited testing scenarios. However, it can +become cumbersome if a custom listener needs to be used across an entire test suite. This +issue is addressed through support for automatic discovery of default`TestExecutionListener` implementations through the `SpringFactoriesLoader` mechanism. + +Specifically, the `spring-test` module declares all core default `TestExecutionListener`implementations under the `org.springframework.test.context.TestExecutionListener` key in +its `META-INF/spring.factories` properties file. Third-party frameworks and developers +can contribute their own `TestExecutionListener` implementations to the list of default +listeners in the same manner through their own `META-INF/spring.factories` properties +file. + +##### Ordering `TestExecutionListener` Implementations + +When the TestContext framework discovers default `TestExecutionListener` implementations +through the [aforementioned](#testcontext-tel-config-automatic-discovery)`SpringFactoriesLoader` mechanism, the instantiated listeners are sorted by using +Spring’s `AnnotationAwareOrderComparator`, which honors Spring’s `Ordered` interface and`@Order` annotation for ordering. `AbstractTestExecutionListener` and all default`TestExecutionListener` implementations provided by Spring implement `Ordered` with +appropriate values. Third-party frameworks and developers should therefore make sure that +their default `TestExecutionListener` implementations are registered in the proper order +by implementing `Ordered` or declaring `@Order`. See the javadoc for the `getOrder()`methods of the core default `TestExecutionListener` implementations for details on what +values are assigned to each core listener. + +##### Merging `TestExecutionListener` Implementations + +If a custom `TestExecutionListener` is registered via `@TestExecutionListeners`, the +default listeners are not registered. In most common testing scenarios, this effectively +forces the developer to manually declare all default listeners in addition to any custom +listeners. The following listing demonstrates this style of configuration: + +Java + +``` +@ContextConfiguration +@TestExecutionListeners({ + MyCustomTestExecutionListener.class, + ServletTestExecutionListener.class, + DirtiesContextBeforeModesTestExecutionListener.class, + DependencyInjectionTestExecutionListener.class, + DirtiesContextTestExecutionListener.class, + TransactionalTestExecutionListener.class, + SqlScriptsTestExecutionListener.class +}) +class MyTest { + // class body... +} +``` + +Kotlin + +``` +@ContextConfiguration +@TestExecutionListeners( + MyCustomTestExecutionListener::class, + ServletTestExecutionListener::class, + DirtiesContextBeforeModesTestExecutionListener::class, + DependencyInjectionTestExecutionListener::class, + DirtiesContextTestExecutionListener::class, + TransactionalTestExecutionListener::class, + SqlScriptsTestExecutionListener::class +) +class MyTest { + // class body... +} +``` + +The challenge with this approach is that it requires that the developer know exactly +which listeners are registered by default. Moreover, the set of default listeners can +change from release to release — for example, `SqlScriptsTestExecutionListener` was +introduced in Spring Framework 4.1, and `DirtiesContextBeforeModesTestExecutionListener`was introduced in Spring Framework 4.2. Furthermore, third-party frameworks like Spring +Boot and Spring Security register their own default `TestExecutionListener`implementations by using the aforementioned [automatic discovery mechanism](#testcontext-tel-config-automatic-discovery). + +To avoid having to be aware of and re-declare all default listeners, you can set the`mergeMode` attribute of `@TestExecutionListeners` to `MergeMode.MERGE_WITH_DEFAULTS`.`MERGE_WITH_DEFAULTS` indicates that locally declared listeners should be merged with the +default listeners. The merging algorithm ensures that duplicates are removed from the +list and that the resulting set of merged listeners is sorted according to the semantics +of `AnnotationAwareOrderComparator`, as described in [Ordering `TestExecutionListener` Implementations](#testcontext-tel-config-ordering). +If a listener implements `Ordered` or is annotated with `@Order`, it can influence the +position in which it is merged with the defaults. Otherwise, locally declared listeners +are appended to the list of default listeners when merged. + +For example, if the `MyCustomTestExecutionListener` class in the previous example +configures its `order` value (for example, `500`) to be less than the order of the`ServletTestExecutionListener` (which happens to be `1000`), the`MyCustomTestExecutionListener` can then be automatically merged with the list of +defaults in front of the `ServletTestExecutionListener`, and the previous example could +be replaced with the following: + +Java + +``` +@ContextConfiguration +@TestExecutionListeners( + listeners = MyCustomTestExecutionListener.class, + mergeMode = MERGE_WITH_DEFAULTS +) +class MyTest { + // class body... +} +``` + +Kotlin + +``` +@ContextConfiguration +@TestExecutionListeners( + listeners = [MyCustomTestExecutionListener::class], + mergeMode = MERGE_WITH_DEFAULTS +) +class MyTest { + // class body... +} +``` + +#### 3.5.4. Application Events + +Since Spring Framework 5.3.3, the TestContext framework provides support for recording[application events](core.html#context-functionality-events) published in the`ApplicationContext` so that assertions can be performed against those events within +tests. All events published during the execution of a single test are made available via +the `ApplicationEvents` API which allows you to process the events as a`java.util.Stream`. + +To use `ApplicationEvents` in your tests, do the following. + +* Ensure that your test class is annotated or meta-annotated with[`@RecordApplicationEvents`](#spring-testing-annotation-recordapplicationevents). + +* Ensure that the `ApplicationEventsTestExecutionListener` is registered. Note, however, + that `ApplicationEventsTestExecutionListener` is registered by default and only needs + to be manually registered if you have custom configuration via`@TestExecutionListeners` that does not include the default listeners. + +* Annotate a field of type `ApplicationEvents` with `@Autowired` and use that instance of`ApplicationEvents` in your test and lifecycle methods (such as `@BeforeEach` and`@AfterEach` methods in JUnit Jupiter). + + * When using the [SpringExtension for JUnit Jupiter](#testcontext-junit-jupiter-extension), you may declare a method + parameter of type `ApplicationEvents` in a test or lifecycle method as an alternative + to an `@Autowired` field in the test class. + +The following test class uses the `SpringExtension` for JUnit Jupiter and[AssertJ](https://assertj.github.io/doc/) to assert the types of application events +published while invoking a method in a Spring-managed component: + +Java + +``` +@SpringJUnitConfig(/* ... */) +@RecordApplicationEvents (1) +class OrderServiceTests { + + @Autowired + OrderService orderService; + + @Autowired + ApplicationEvents events; (2) + + @Test + void submitOrder() { + // Invoke method in OrderService that publishes an event + orderService.submitOrder(new Order(/* ... */)); + // Verify that an OrderSubmitted event was published + long numEvents = events.stream(OrderSubmitted.class).count(); (3) + assertThat(numEvents).isEqualTo(1); + } +} +``` + +|**1**| Annotate the test class with `@RecordApplicationEvents`. | +|-----|-----------------------------------------------------------------------------------------| +|**2**| Inject the `ApplicationEvents` instance for the current test. | +|**3**|Use the `ApplicationEvents` API to count how many `OrderSubmitted` events were published.| + +Kotlin + +``` +@SpringJUnitConfig(/* ... */) +@RecordApplicationEvents (1) +class OrderServiceTests { + + @Autowired + lateinit var orderService: OrderService + + @Autowired + lateinit var events: ApplicationEvents (2) + + @Test + fun submitOrder() { + // Invoke method in OrderService that publishes an event + orderService.submitOrder(Order(/* ... */)) + // Verify that an OrderSubmitted event was published + val numEvents = events.stream(OrderSubmitted::class).count() (3) + assertThat(numEvents).isEqualTo(1) + } +} +``` + +|**1**| Annotate the test class with `@RecordApplicationEvents`. | +|-----|-----------------------------------------------------------------------------------------| +|**2**| Inject the `ApplicationEvents` instance for the current test. | +|**3**|Use the `ApplicationEvents` API to count how many `OrderSubmitted` events were published.| + +See the[`ApplicationEvents`javadoc](https://docs.spring.io/spring-framework/docs/5.3.16/javadoc-api/org/springframework/test/context/event/ApplicationEvents.html) for further details regarding the `ApplicationEvents` API. + +#### 3.5.5. Test Execution Events + +The `EventPublishingTestExecutionListener` introduced in Spring Framework 5.2 offers an +alternative approach to implementing a custom `TestExecutionListener`. Components in the +test’s `ApplicationContext` can listen to the following events published by the`EventPublishingTestExecutionListener`, each of which corresponds to a method in the`TestExecutionListener` API. + +* `BeforeTestClassEvent` + +* `PrepareTestInstanceEvent` + +* `BeforeTestMethodEvent` + +* `BeforeTestExecutionEvent` + +* `AfterTestExecutionEvent` + +* `AfterTestMethodEvent` + +* `AfterTestClassEvent` + +| |These events are only published if the `ApplicationContext` has already been loaded.| +|---|------------------------------------------------------------------------------------| + +These events may be consumed for various reasons, such as resetting mock beans or tracing +test execution. One advantage of consuming test execution events rather than implementing +a custom `TestExecutionListener` is that test execution events may be consumed by any +Spring bean registered in the test `ApplicationContext`, and such beans may benefit +directly from dependency injection and other features of the `ApplicationContext`. In +contrast, a `TestExecutionListener` is not a bean in the `ApplicationContext`. + +In order to listen to test execution events, a Spring bean may choose to implement the`org.springframework.context.ApplicationListener` interface. Alternatively, listener +methods can be annotated with `@EventListener` and configured to listen to one of the +particular event types listed above (see[Annotation-based Event Listeners](core.html#context-functionality-events-annotation)). +Due to the popularity of this approach, Spring provides the following dedicated`@EventListener` annotations to simplify registration of test execution event listeners. +These annotations reside in the `org.springframework.test.context.event.annotation`package. + +* `@BeforeTestClass` + +* `@PrepareTestInstance` + +* `@BeforeTestMethod` + +* `@BeforeTestExecution` + +* `@AfterTestExecution` + +* `@AfterTestMethod` + +* `@AfterTestClass` + +##### Exception Handling + +By default, if a test execution event listener throws an exception while consuming an +event, that exception will propagate to the underlying testing framework in use (such as +JUnit or TestNG). For example, if the consumption of a `BeforeTestMethodEvent` results in +an exception, the corresponding test method will fail as a result of the exception. In +contrast, if an asynchronous test execution event listener throws an exception, the +exception will not propagate to the underlying testing framework. For further details on +asynchronous exception handling, consult the class-level javadoc for `@EventListener`. + +##### Asynchronous Listeners + +If you want a particular test execution event listener to process events asynchronously, +you can use Spring’s [regular`@Async` support](integration.html#scheduling-annotation-support-async). For further details, consult the class-level javadoc for`@EventListener`. + +#### 3.5.6. Context Management + +Each `TestContext` provides context management and caching support for the test instance +for which it is responsible. Test instances do not automatically receive access to the +configured `ApplicationContext`. However, if a test class implements the`ApplicationContextAware` interface, a reference to the `ApplicationContext` is supplied +to the test instance. Note that `AbstractJUnit4SpringContextTests` and`AbstractTestNGSpringContextTests` implement `ApplicationContextAware` and, therefore, +provide access to the `ApplicationContext` automatically. + +| |@Autowired ApplicationContext<br/><br/>As an alternative to implementing the `ApplicationContextAware` interface, you can inject<br/>the application context for your test class through the `@Autowired` annotation on either<br/>a field or setter method, as the following example shows:<br/><br/>Java<br/><br/>```<br/>@SpringJUnitConfig<br/>class MyTest {<br/><br/> @Autowired (1)<br/> ApplicationContext applicationContext;<br/><br/> // class body...<br/>}<br/>```<br/><br/>|**1**|Injecting the `ApplicationContext`.|<br/>|-----|-----------------------------------|<br/><br/>Kotlin<br/><br/>```<br/>@SpringJUnitConfig<br/>class MyTest {<br/><br/> @Autowired (1)<br/> lateinit var applicationContext: ApplicationContext<br/><br/> // class body...<br/>}<br/>```<br/><br/>|**1**|Injecting the `ApplicationContext`.|<br/>|-----|-----------------------------------|<br/><br/>Similarly, if your test is configured to load a `WebApplicationContext`, you can inject<br/>the web application context into your test, as follows:<br/><br/>Java<br/><br/>```<br/>@SpringJUnitWebConfig (1)<br/>class MyWebAppTest {<br/><br/> @Autowired (2)<br/> WebApplicationContext wac;<br/><br/> // class body...<br/>}<br/>```<br/><br/>|**1**|Configuring the `WebApplicationContext`.|<br/>|-----|----------------------------------------|<br/>|**2**| Injecting the `WebApplicationContext`. |<br/><br/>Kotlin<br/><br/>```<br/>@SpringJUnitWebConfig (1)<br/>class MyWebAppTest {<br/><br/> @Autowired (2)<br/> lateinit var wac: WebApplicationContext<br/> // class body...<br/>}<br/>```<br/><br/>|**1**|Configuring the `WebApplicationContext`.|<br/>|-----|----------------------------------------|<br/>|**2**| Injecting the `WebApplicationContext`. |<br/><br/>Dependency injection by using `@Autowired` is provided by the`DependencyInjectionTestExecutionListener`, which is configured by default<br/>(see [Dependency Injection of Test Fixtures](#testcontext-fixture-di)).| +|-----|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +|**1**| Injecting the `ApplicationContext`. | +|**1**| Injecting the `ApplicationContext`. | +|**1**| Configuring the `WebApplicationContext`. | +|**2**| Injecting the `WebApplicationContext`. | +|**1**| Configuring the `WebApplicationContext`. | +|**2**| Injecting the `WebApplicationContext`. | + +Test classes that use the TestContext framework do not need to extend any particular +class or implement a specific interface to configure their application context. Instead, +configuration is achieved by declaring the `@ContextConfiguration` annotation at the +class level. If your test class does not explicitly declare application context resource +locations or component classes, the configured `ContextLoader` determines how to load a +context from a default location or default configuration classes. In addition to context +resource locations and component classes, an application context can also be configured +through application context initializers. + +The following sections explain how to use Spring’s `@ContextConfiguration` annotation to +configure a test `ApplicationContext` by using XML configuration files, Groovy scripts, +component classes (typically `@Configuration` classes), or context initializers. +Alternatively, you can implement and configure your own custom `SmartContextLoader` for +advanced use cases. + +* [Context Configuration with XML resources](#testcontext-ctx-management-xml) + +* [Context Configuration with Groovy Scripts](#testcontext-ctx-management-groovy) + +* [Context Configuration with Component Classes](#testcontext-ctx-management-javaconfig) + +* [Mixing XML, Groovy Scripts, and Component Classes](#testcontext-ctx-management-mixed-config) + +* [Context Configuration with Context Initializers](#testcontext-ctx-management-initializers) + +* [Context Configuration Inheritance](#testcontext-ctx-management-inheritance) + +* [Context Configuration with Environment Profiles](#testcontext-ctx-management-env-profiles) + +* [Context Configuration with Test Property Sources](#testcontext-ctx-management-property-sources) + +* [Context Configuration with Dynamic Property Sources](#testcontext-ctx-management-dynamic-property-sources) + +* [Loading a `WebApplicationContext`](#testcontext-ctx-management-web) + +* [Context Caching](#testcontext-ctx-management-caching) + +* [Context Hierarchies](#testcontext-ctx-management-ctx-hierarchies) + +##### Context Configuration with XML resources + +To load an `ApplicationContext` for your tests by using XML configuration files, annotate +your test class with `@ContextConfiguration` and configure the `locations` attribute with +an array that contains the resource locations of XML configuration metadata. A plain or +relative path (for example, `context.xml`) is treated as a classpath resource that is +relative to the package in which the test class is defined. A path starting with a slash +is treated as an absolute classpath location (for example, `/org/example/config.xml`). A +path that represents a resource URL (i.e., a path prefixed with `classpath:`, `file:`,`http:`, etc.) is used *as is*. + +Java + +``` +@ExtendWith(SpringExtension.class) +// ApplicationContext will be loaded from "/app-config.xml" and +// "/test-config.xml" in the root of the classpath +@ContextConfiguration(locations={"/app-config.xml", "/test-config.xml"}) (1) +class MyTest { + // class body... +} +``` + +|**1**|Setting the locations attribute to a list of XML files.| +|-----|-------------------------------------------------------| + +Kotlin + +``` +@ExtendWith(SpringExtension::class) +// ApplicationContext will be loaded from "/app-config.xml" and +// "/test-config.xml" in the root of the classpath +@ContextConfiguration("/app-config.xml", "/test-config.xml") (1) +class MyTest { + // class body... +} +``` + +|**1**|Setting the locations attribute to a list of XML files.| +|-----|-------------------------------------------------------| + +`@ContextConfiguration` supports an alias for the `locations` attribute through the +standard Java `value` attribute. Thus, if you do not need to declare additional +attributes in `@ContextConfiguration`, you can omit the declaration of the `locations`attribute name and declare the resource locations by using the shorthand format +demonstrated in the following example: + +Java + +``` +@ExtendWith(SpringExtension.class) +@ContextConfiguration({"/app-config.xml", "/test-config.xml"}) (1) +class MyTest { + // class body... +} +``` + +|**1**|Specifying XML files without using the `location` attribute.| +|-----|------------------------------------------------------------| + +Kotlin + +``` +@ExtendWith(SpringExtension::class) +@ContextConfiguration("/app-config.xml", "/test-config.xml") (1) +class MyTest { + // class body... +} +``` + +|**1**|Specifying XML files without using the `location` attribute.| +|-----|------------------------------------------------------------| + +If you omit both the `locations` and the `value` attributes from the`@ContextConfiguration` annotation, the TestContext framework tries to detect a default +XML resource location. Specifically, `GenericXmlContextLoader` and`GenericXmlWebContextLoader` detect a default location based on the name of the test +class. If your class is named `com.example.MyTest`, `GenericXmlContextLoader` loads your +application context from `"classpath:com/example/MyTest-context.xml"`. The following +example shows how to do so: + +Java + +``` +@ExtendWith(SpringExtension.class) +// ApplicationContext will be loaded from +// "classpath:com/example/MyTest-context.xml" +@ContextConfiguration (1) +class MyTest { + // class body... +} +``` + +|**1**|Loading configuration from the default location.| +|-----|------------------------------------------------| + +Kotlin + +``` +@ExtendWith(SpringExtension::class) +// ApplicationContext will be loaded from +// "classpath:com/example/MyTest-context.xml" +@ContextConfiguration (1) +class MyTest { + // class body... +} +``` + +|**1**|Loading configuration from the default location.| +|-----|------------------------------------------------| + +##### Context Configuration with Groovy Scripts + +To load an `ApplicationContext` for your tests by using Groovy scripts that use the[Groovy Bean Definition DSL](core.html#groovy-bean-definition-dsl), you can annotate +your test class with `@ContextConfiguration` and configure the `locations` or `value`attribute with an array that contains the resource locations of Groovy scripts. Resource +lookup semantics for Groovy scripts are the same as those described for[XML configuration files](#testcontext-ctx-management-xml). + +| |Enabling Groovy script support<br/><br/>Support for using Groovy scripts to load an `ApplicationContext` in the Spring<br/>TestContext Framework is enabled automatically if Groovy is on the classpath.| +|---|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +The following example shows how to specify Groovy configuration files: + +Java + +``` +@ExtendWith(SpringExtension.class) +// ApplicationContext will be loaded from "/AppConfig.groovy" and +// "/TestConfig.groovy" in the root of the classpath +@ContextConfiguration({"/AppConfig.groovy", "/TestConfig.Groovy"}) (1) +class MyTest { + // class body... +} +``` + +Kotlin + +``` +@ExtendWith(SpringExtension::class) +// ApplicationContext will be loaded from "/AppConfig.groovy" and +// "/TestConfig.groovy" in the root of the classpath +@ContextConfiguration("/AppConfig.groovy", "/TestConfig.Groovy") (1) +class MyTest { + // class body... +} +``` + +|**1**|Specifying the location of Groovy configuration files.| +|-----|------------------------------------------------------| + +If you omit both the `locations` and `value` attributes from the `@ContextConfiguration`annotation, the TestContext framework tries to detect a default Groovy script. +Specifically, `GenericGroovyXmlContextLoader` and `GenericGroovyXmlWebContextLoader`detect a default location based on the name of the test class. If your class is named`com.example.MyTest`, the Groovy context loader loads your application context from`"classpath:com/example/MyTestContext.groovy"`. The following example shows how to use +the default: + +Java + +``` +@ExtendWith(SpringExtension.class) +// ApplicationContext will be loaded from +// "classpath:com/example/MyTestContext.groovy" +@ContextConfiguration (1) +class MyTest { + // class body... +} +``` + +|**1**|Loading configuration from the default location.| +|-----|------------------------------------------------| + +Kotlin + +``` +@ExtendWith(SpringExtension::class) +// ApplicationContext will be loaded from +// "classpath:com/example/MyTestContext.groovy" +@ContextConfiguration (1) +class MyTest { + // class body... +} +``` + +|**1**|Loading configuration from the default location.| +|-----|------------------------------------------------| + +| |Declaring XML configuration and Groovy scripts simultaneously<br/><br/>You can declare both XML configuration files and Groovy scripts simultaneously by using<br/>the `locations` or `value` attribute of `@ContextConfiguration`. If the path to a<br/>configured resource location ends with `.xml`, it is loaded by using an`XmlBeanDefinitionReader`. Otherwise, it is loaded by using a`GroovyBeanDefinitionReader`.<br/><br/>The following listing shows how to combine both in an integration test:<br/><br/>Java<br/><br/>```<br/>@ExtendWith(SpringExtension.class)<br/>// ApplicationContext will be loaded from<br/>// "/app-config.xml" and "/TestConfig.groovy"<br/>@ContextConfiguration({ "/app-config.xml", "/TestConfig.groovy" })<br/>class MyTest {<br/> // class body...<br/>}<br/>```<br/><br/>Kotlin<br/><br/>```<br/>@ExtendWith(SpringExtension::class)<br/>// ApplicationContext will be loaded from<br/>// "/app-config.xml" and "/TestConfig.groovy"<br/>@ContextConfiguration("/app-config.xml", "/TestConfig.groovy")<br/>class MyTest {<br/> // class body...<br/>}<br/>```| +|---|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +##### Context Configuration with Component Classes + +To load an `ApplicationContext` for your tests by using component classes (see[Java-based container configuration](core.html#beans-java)), you can annotate your test +class with `@ContextConfiguration` and configure the `classes` attribute with an array +that contains references to component classes. The following example shows how to do so: + +Java + +``` +@ExtendWith(SpringExtension.class) +// ApplicationContext will be loaded from AppConfig and TestConfig +@ContextConfiguration(classes = {AppConfig.class, TestConfig.class}) (1) +class MyTest { + // class body... +} +``` + +|**1**|Specifying component classes.| +|-----|-----------------------------| + +Kotlin + +``` +@ExtendWith(SpringExtension::class) +// ApplicationContext will be loaded from AppConfig and TestConfig +@ContextConfiguration(classes = [AppConfig::class, TestConfig::class]) (1) +class MyTest { + // class body... +} +``` + +|**1**|Specifying component classes.| +|-----|-----------------------------| + +| |Component Classes<br/><br/>The term “component class” can refer to any of the following:<br/><br/>* A class annotated with `@Configuration`.<br/><br/>* A component (that is, a class annotated with `@Component`, `@Service`, `@Repository`, or other stereotype annotations).<br/><br/>* A JSR-330 compliant class that is annotated with `javax.inject` annotations.<br/><br/>* Any class that contains `@Bean`-methods.<br/><br/>* Any other class that is intended to be registered as a Spring component (i.e., a Spring<br/> bean in the `ApplicationContext`), potentially taking advantage of automatic autowiring<br/> of a single constructor without the use of Spring annotations.<br/><br/>See the javadoc of[`@Configuration`](https://docs.spring.io/spring-framework/docs/5.3.16/javadoc-api/org/springframework/context/annotation/Configuration.html) and[`@Bean`](https://docs.spring.io/spring-framework/docs/5.3.16/javadoc-api/org/springframework/context/annotation/Bean.html) for further information<br/>regarding the configuration and semantics of component classes, paying special attention<br/>to the discussion of `@Bean` Lite Mode.| +|---|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +If you omit the `classes` attribute from the `@ContextConfiguration` annotation, the +TestContext framework tries to detect the presence of default configuration classes. +Specifically, `AnnotationConfigContextLoader` and `AnnotationConfigWebContextLoader`detect all `static` nested classes of the test class that meet the requirements for +configuration class implementations, as specified in the[`@Configuration`](https://docs.spring.io/spring-framework/docs/5.3.16/javadoc-api/org/springframework/context/annotation/Configuration.html) javadoc. +Note that the name of the configuration class is arbitrary. In addition, a test class can +contain more than one `static` nested configuration class if desired. In the following +example, the `OrderServiceTest` class declares a `static` nested configuration class +named `Config` that is automatically used to load the `ApplicationContext` for the test +class: + +Java + +``` +@SpringJUnitConfig (1) +// ApplicationContext will be loaded from the +// static nested Config class +class OrderServiceTest { + + @Configuration + static class Config { + + // this bean will be injected into the OrderServiceTest class + @Bean + OrderService orderService() { + OrderService orderService = new OrderServiceImpl(); + // set properties, etc. + return orderService; + } + } + + @Autowired + OrderService orderService; + + @Test + void testOrderService() { + // test the orderService + } + +} +``` + +|**1**|Loading configuration information from the nested `Config` class.| +|-----|-----------------------------------------------------------------| + +Kotlin + +``` +@SpringJUnitConfig (1) +// ApplicationContext will be loaded from the nested Config class +class OrderServiceTest { + + @Autowired + lateinit var orderService: OrderService + + @Configuration + class Config { + + // this bean will be injected into the OrderServiceTest class + @Bean + fun orderService(): OrderService { + // set properties, etc. + return OrderServiceImpl() + } + } + + @Test + fun testOrderService() { + // test the orderService + } +} +``` + +|**1**|Loading configuration information from the nested `Config` class.| +|-----|-----------------------------------------------------------------| + +##### Mixing XML, Groovy Scripts, and Component Classes + +It may sometimes be desirable to mix XML configuration files, Groovy scripts, and +component classes (typically `@Configuration` classes) to configure an`ApplicationContext` for your tests. For example, if you use XML configuration in +production, you may decide that you want to use `@Configuration` classes to configure +specific Spring-managed components for your tests, or vice versa. + +Furthermore, some third-party frameworks (such as Spring Boot) provide first-class +support for loading an `ApplicationContext` from different types of resources +simultaneously (for example, XML configuration files, Groovy scripts, and`@Configuration` classes). The Spring Framework, historically, has not supported this for +standard deployments. Consequently, most of the `SmartContextLoader` implementations that +the Spring Framework delivers in the `spring-test` module support only one resource type +for each test context. However, this does not mean that you cannot use both. One +exception to the general rule is that the `GenericGroovyXmlContextLoader` and`GenericGroovyXmlWebContextLoader` support both XML configuration files and Groovy +scripts simultaneously. Furthermore, third-party frameworks may choose to support the +declaration of both `locations` and `classes` through `@ContextConfiguration`, and, with +the standard testing support in the TestContext framework, you have the following options. + +If you want to use resource locations (for example, XML or Groovy) and `@Configuration`classes to configure your tests, you must pick one as the entry point, and that one must +include or import the other. For example, in XML or Groovy scripts, you can include`@Configuration` classes by using component scanning or defining them as normal Spring +beans, whereas, in a `@Configuration` class, you can use `@ImportResource` to import XML +configuration files or Groovy scripts. Note that this behavior is semantically equivalent +to how you configure your application in production: In production configuration, you +define either a set of XML or Groovy resource locations or a set of `@Configuration`classes from which your production `ApplicationContext` is loaded, but you still have the +freedom to include or import the other type of configuration. + +##### Context Configuration with Context Initializers + +To configure an `ApplicationContext` for your tests by using context initializers, +annotate your test class with `@ContextConfiguration` and configure the `initializers`attribute with an array that contains references to classes that implement`ApplicationContextInitializer`. The declared context initializers are then used to +initialize the `ConfigurableApplicationContext` that is loaded for your tests. Note that +the concrete `ConfigurableApplicationContext` type supported by each declared initializer +must be compatible with the type of `ApplicationContext` created by the`SmartContextLoader` in use (typically a `GenericApplicationContext`). Furthermore, the +order in which the initializers are invoked depends on whether they implement Spring’s`Ordered` interface or are annotated with Spring’s `@Order` annotation or the standard`@Priority` annotation. The following example shows how to use initializers: + +Java + +``` +@ExtendWith(SpringExtension.class) +// ApplicationContext will be loaded from TestConfig +// and initialized by TestAppCtxInitializer +@ContextConfiguration( + classes = TestConfig.class, + initializers = TestAppCtxInitializer.class) (1) +class MyTest { + // class body... +} +``` + +|**1**|Specifying configuration by using a configuration class and an initializer.| +|-----|---------------------------------------------------------------------------| + +Kotlin + +``` +@ExtendWith(SpringExtension::class) +// ApplicationContext will be loaded from TestConfig +// and initialized by TestAppCtxInitializer +@ContextConfiguration( + classes = [TestConfig::class], + initializers = [TestAppCtxInitializer::class]) (1) +class MyTest { + // class body... +} +``` + +|**1**|Specifying configuration by using a configuration class and an initializer.| +|-----|---------------------------------------------------------------------------| + +You can also omit the declaration of XML configuration files, Groovy scripts, or +component classes in `@ContextConfiguration` entirely and instead declare only`ApplicationContextInitializer` classes, which are then responsible for registering beans +in the context — for example, by programmatically loading bean definitions from XML +files or configuration classes. The following example shows how to do so: + +Java + +``` +@ExtendWith(SpringExtension.class) +// ApplicationContext will be initialized by EntireAppInitializer +// which presumably registers beans in the context +@ContextConfiguration(initializers = EntireAppInitializer.class) (1) +class MyTest { + // class body... +} +``` + +|**1**|Specifying configuration by using only an initializer.| +|-----|------------------------------------------------------| + +Kotlin + +``` +@ExtendWith(SpringExtension::class) +// ApplicationContext will be initialized by EntireAppInitializer +// which presumably registers beans in the context +@ContextConfiguration(initializers = [EntireAppInitializer::class]) (1) +class MyTest { + // class body... +} +``` + +|**1**|Specifying configuration by using only an initializer.| +|-----|------------------------------------------------------| + +##### Context Configuration Inheritance + +`@ContextConfiguration` supports boolean `inheritLocations` and `inheritInitializers`attributes that denote whether resource locations or component classes and context +initializers declared by superclasses should be inherited. The default value for both +flags is `true`. This means that a test class inherits the resource locations or +component classes as well as the context initializers declared by any superclasses. +Specifically, the resource locations or component classes for a test class are appended +to the list of resource locations or annotated classes declared by superclasses. +Similarly, the initializers for a given test class are added to the set of initializers +defined by test superclasses. Thus, subclasses have the option of extending the resource +locations, component classes, or context initializers. + +If the `inheritLocations` or `inheritInitializers` attribute in `@ContextConfiguration`is set to `false`, the resource locations or component classes and the context +initializers, respectively, for the test class shadow and effectively replace the +configuration defined by superclasses. + +| |As of Spring Framework 5.3, test configuration may also be inherited from enclosing<br/>classes. See [`@Nested` test class configuration](#testcontext-junit-jupiter-nested-test-configuration) for details.| +|---|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +In the next example, which uses XML resource locations, the `ApplicationContext` for`ExtendedTest` is loaded from `base-config.xml` and `extended-config.xml`, in that order. +Beans defined in `extended-config.xml` can, therefore, override (that is, replace) those +defined in `base-config.xml`. The following example shows how one class can extend +another and use both its own configuration file and the superclass’s configuration file: + +Java + +``` +@ExtendWith(SpringExtension.class) +// ApplicationContext will be loaded from "/base-config.xml" +// in the root of the classpath +@ContextConfiguration("/base-config.xml") (1) +class BaseTest { + // class body... +} + +// ApplicationContext will be loaded from "/base-config.xml" and +// "/extended-config.xml" in the root of the classpath +@ContextConfiguration("/extended-config.xml") (2) +class ExtendedTest extends BaseTest { + // class body... +} +``` + +|**1**|Configuration file defined in the superclass.| +|-----|---------------------------------------------| +|**2**| Configuration file defined in the subclass. | + +Kotlin + +``` +@ExtendWith(SpringExtension::class) +// ApplicationContext will be loaded from "/base-config.xml" +// in the root of the classpath +@ContextConfiguration("/base-config.xml") (1) +open class BaseTest { + // class body... +} + +// ApplicationContext will be loaded from "/base-config.xml" and +// "/extended-config.xml" in the root of the classpath +@ContextConfiguration("/extended-config.xml") (2) +class ExtendedTest : BaseTest() { + // class body... +} +``` + +|**1**|Configuration file defined in the superclass.| +|-----|---------------------------------------------| +|**2**| Configuration file defined in the subclass. | + +Similarly, in the next example, which uses component classes, the `ApplicationContext`for `ExtendedTest` is loaded from the `BaseConfig` and `ExtendedConfig` classes, in that +order. Beans defined in `ExtendedConfig` can, therefore, override (that is, replace) +those defined in `BaseConfig`. The following example shows how one class can extend +another and use both its own configuration class and the superclass’s configuration class: + +Java + +``` +// ApplicationContext will be loaded from BaseConfig +@SpringJUnitConfig(BaseConfig.class) (1) +class BaseTest { + // class body... +} + +// ApplicationContext will be loaded from BaseConfig and ExtendedConfig +@SpringJUnitConfig(ExtendedConfig.class) (2) +class ExtendedTest extends BaseTest { + // class body... +} +``` + +|**1**|Configuration class defined in the superclass.| +|-----|----------------------------------------------| +|**2**| Configuration class defined in the subclass. | + +Kotlin + +``` +// ApplicationContext will be loaded from BaseConfig +@SpringJUnitConfig(BaseConfig::class) (1) +open class BaseTest { + // class body... +} + +// ApplicationContext will be loaded from BaseConfig and ExtendedConfig +@SpringJUnitConfig(ExtendedConfig::class) (2) +class ExtendedTest : BaseTest() { + // class body... +} +``` + +|**1**|Configuration class defined in the superclass.| +|-----|----------------------------------------------| +|**2**| Configuration class defined in the subclass. | + +In the next example, which uses context initializers, the `ApplicationContext` for`ExtendedTest` is initialized by using `BaseInitializer` and `ExtendedInitializer`. Note, +however, that the order in which the initializers are invoked depends on whether they +implement Spring’s `Ordered` interface or are annotated with Spring’s `@Order` annotation +or the standard `@Priority` annotation. The following example shows how one class can +extend another and use both its own initializer and the superclass’s initializer: + +Java + +``` +// ApplicationContext will be initialized by BaseInitializer +@SpringJUnitConfig(initializers = BaseInitializer.class) (1) +class BaseTest { + // class body... +} + +// ApplicationContext will be initialized by BaseInitializer +// and ExtendedInitializer +@SpringJUnitConfig(initializers = ExtendedInitializer.class) (2) +class ExtendedTest extends BaseTest { + // class body... +} +``` + +|**1**|Initializer defined in the superclass.| +|-----|--------------------------------------| +|**2**| Initializer defined in the subclass. | + +Kotlin + +``` +// ApplicationContext will be initialized by BaseInitializer +@SpringJUnitConfig(initializers = [BaseInitializer::class]) (1) +open class BaseTest { + // class body... +} + +// ApplicationContext will be initialized by BaseInitializer +// and ExtendedInitializer +@SpringJUnitConfig(initializers = [ExtendedInitializer::class]) (2) +class ExtendedTest : BaseTest() { + // class body... +} +``` + +|**1**|Initializer defined in the superclass.| +|-----|--------------------------------------| +|**2**| Initializer defined in the subclass. | + +##### Context Configuration with Environment Profiles + +The Spring Framework has first-class support for the notion of environments and profiles +(AKA "bean definition profiles"), and integration tests can be configured to activate +particular bean definition profiles for various testing scenarios. This is achieved by +annotating a test class with the `@ActiveProfiles` annotation and supplying a list of +profiles that should be activated when loading the `ApplicationContext` for the test. + +| |You can use `@ActiveProfiles` with any implementation of the `SmartContextLoader`SPI, but `@ActiveProfiles` is not supported with implementations of the older`ContextLoader` SPI.| +|---|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +Consider two examples with XML configuration and `@Configuration` classes: + +``` +<!-- app-config.xml --> +<beans xmlns="http://www.springframework.org/schema/beans" + xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" + xmlns:jdbc="http://www.springframework.org/schema/jdbc" + xmlns:jee="http://www.springframework.org/schema/jee" + xsi:schemaLocation="..."> + + <bean id="transferService" + class="com.bank.service.internal.DefaultTransferService"> + <constructor-arg ref="accountRepository"/> + <constructor-arg ref="feePolicy"/> + </bean> + + <bean id="accountRepository" + class="com.bank.repository.internal.JdbcAccountRepository"> + <constructor-arg ref="dataSource"/> + </bean> + + <bean id="feePolicy" + class="com.bank.service.internal.ZeroFeePolicy"/> + + <beans profile="dev"> + <jdbc:embedded-database id="dataSource"> + <jdbc:script + location="classpath:com/bank/config/sql/schema.sql"/> + <jdbc:script + location="classpath:com/bank/config/sql/test-data.sql"/> + </jdbc:embedded-database> + </beans> + + <beans profile="production"> + <jee:jndi-lookup id="dataSource" jndi-name="java:comp/env/jdbc/datasource"/> + </beans> + + <beans profile="default"> + <jdbc:embedded-database id="dataSource"> + <jdbc:script + location="classpath:com/bank/config/sql/schema.sql"/> + </jdbc:embedded-database> + </beans> + +</beans> +``` + +Java + +``` +@ExtendWith(SpringExtension.class) +// ApplicationContext will be loaded from "classpath:/app-config.xml" +@ContextConfiguration("/app-config.xml") +@ActiveProfiles("dev") +class TransferServiceTest { + + @Autowired + TransferService transferService; + + @Test + void testTransferService() { + // test the transferService + } +} +``` + +Kotlin + +``` +@ExtendWith(SpringExtension::class) +// ApplicationContext will be loaded from "classpath:/app-config.xml" +@ContextConfiguration("/app-config.xml") +@ActiveProfiles("dev") +class TransferServiceTest { + + @Autowired + lateinit var transferService: TransferService + + @Test + fun testTransferService() { + // test the transferService + } +} +``` + +When `TransferServiceTest` is run, its `ApplicationContext` is loaded from the`app-config.xml` configuration file in the root of the classpath. If you inspect`app-config.xml`, you can see that the `accountRepository` bean has a dependency on a`dataSource` bean. However, `dataSource` is not defined as a top-level bean. Instead,`dataSource` is defined three times: in the `production` profile, in the `dev` profile, +and in the `default` profile. + +By annotating `TransferServiceTest` with `@ActiveProfiles("dev")`, we instruct the Spring +TestContext Framework to load the `ApplicationContext` with the active profiles set to`{"dev"}`. As a result, an embedded database is created and populated with test data, and +the `accountRepository` bean is wired with a reference to the development `DataSource`. +That is likely what we want in an integration test. + +It is sometimes useful to assign beans to a `default` profile. Beans within the default +profile are included only when no other profile is specifically activated. You can use +this to define “fallback” beans to be used in the application’s default state. For +example, you may explicitly provide a data source for `dev` and `production` profiles, +but define an in-memory data source as a default when neither of these is active. + +The following code listings demonstrate how to implement the same configuration and +integration test with `@Configuration` classes instead of XML: + +Java + +``` +@Configuration +@Profile("dev") +public class StandaloneDataConfig { + + @Bean + public DataSource dataSource() { + return new EmbeddedDatabaseBuilder() + .setType(EmbeddedDatabaseType.HSQL) + .addScript("classpath:com/bank/config/sql/schema.sql") + .addScript("classpath:com/bank/config/sql/test-data.sql") + .build(); + } +} +``` + +Kotlin + +``` +@Configuration +@Profile("dev") +class StandaloneDataConfig { + + @Bean + fun dataSource(): DataSource { + return EmbeddedDatabaseBuilder() + .setType(EmbeddedDatabaseType.HSQL) + .addScript("classpath:com/bank/config/sql/schema.sql") + .addScript("classpath:com/bank/config/sql/test-data.sql") + .build() + } +} +``` + +Java + +``` +@Configuration +@Profile("production") +public class JndiDataConfig { + + @Bean(destroyMethod="") + public DataSource dataSource() throws Exception { + Context ctx = new InitialContext(); + return (DataSource) ctx.lookup("java:comp/env/jdbc/datasource"); + } +} +``` + +Kotlin + +``` +@Configuration +@Profile("production") +class JndiDataConfig { + + @Bean(destroyMethod = "") + fun dataSource(): DataSource { + val ctx = InitialContext() + return ctx.lookup("java:comp/env/jdbc/datasource") as DataSource + } +} +``` + +Java + +``` +@Configuration +@Profile("default") +public class DefaultDataConfig { + + @Bean + public DataSource dataSource() { + return new EmbeddedDatabaseBuilder() + .setType(EmbeddedDatabaseType.HSQL) + .addScript("classpath:com/bank/config/sql/schema.sql") + .build(); + } +} +``` + +Kotlin + +``` +@Configuration +@Profile("default") +class DefaultDataConfig { + + @Bean + fun dataSource(): DataSource { + return EmbeddedDatabaseBuilder() + .setType(EmbeddedDatabaseType.HSQL) + .addScript("classpath:com/bank/config/sql/schema.sql") + .build() + } +} +``` + +Java + +``` +@Configuration +public class TransferServiceConfig { + + @Autowired DataSource dataSource; + + @Bean + public TransferService transferService() { + return new DefaultTransferService(accountRepository(), feePolicy()); + } + + @Bean + public AccountRepository accountRepository() { + return new JdbcAccountRepository(dataSource); + } + + @Bean + public FeePolicy feePolicy() { + return new ZeroFeePolicy(); + } +} +``` + +Kotlin + +``` +@Configuration +class TransferServiceConfig { + + @Autowired + lateinit var dataSource: DataSource + + @Bean + fun transferService(): TransferService { + return DefaultTransferService(accountRepository(), feePolicy()) + } + + @Bean + fun accountRepository(): AccountRepository { + return JdbcAccountRepository(dataSource) + } + + @Bean + fun feePolicy(): FeePolicy { + return ZeroFeePolicy() + } +} +``` + +Java + +``` +@SpringJUnitConfig({ + TransferServiceConfig.class, + StandaloneDataConfig.class, + JndiDataConfig.class, + DefaultDataConfig.class}) +@ActiveProfiles("dev") +class TransferServiceTest { + + @Autowired + TransferService transferService; + + @Test + void testTransferService() { + // test the transferService + } +} +``` + +Kotlin + +``` +@SpringJUnitConfig( + TransferServiceConfig::class, + StandaloneDataConfig::class, + JndiDataConfig::class, + DefaultDataConfig::class) +@ActiveProfiles("dev") +class TransferServiceTest { + + @Autowired + lateinit var transferService: TransferService + + @Test + fun testTransferService() { + // test the transferService + } +} +``` + +In this variation, we have split the XML configuration into four independent`@Configuration` classes: + +* `TransferServiceConfig`: Acquires a `dataSource` through dependency injection by using`@Autowired`. + +* `StandaloneDataConfig`: Defines a `dataSource` for an embedded database suitable for + developer tests. + +* `JndiDataConfig`: Defines a `dataSource` that is retrieved from JNDI in a production + environment. + +* `DefaultDataConfig`: Defines a `dataSource` for a default embedded database, in case no + profile is active. + +As with the XML-based configuration example, we still annotate `TransferServiceTest` with`@ActiveProfiles("dev")`, but this time we specify all four configuration classes by +using the `@ContextConfiguration` annotation. The body of the test class itself remains +completely unchanged. + +It is often the case that a single set of profiles is used across multiple test classes +within a given project. Thus, to avoid duplicate declarations of the `@ActiveProfiles`annotation, you can declare `@ActiveProfiles` once on a base class, and subclasses +automatically inherit the `@ActiveProfiles` configuration from the base class. In the +following example, the declaration of `@ActiveProfiles` (as well as other annotations) +has been moved to an abstract superclass, `AbstractIntegrationTest`: + +| |As of Spring Framework 5.3, test configuration may also be inherited from enclosing<br/>classes. See [`@Nested` test class configuration](#testcontext-junit-jupiter-nested-test-configuration) for details.| +|---|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +Java + +``` +@SpringJUnitConfig({ + TransferServiceConfig.class, + StandaloneDataConfig.class, + JndiDataConfig.class, + DefaultDataConfig.class}) +@ActiveProfiles("dev") +abstract class AbstractIntegrationTest { +} +``` + +Kotlin + +``` +@SpringJUnitConfig( + TransferServiceConfig::class, + StandaloneDataConfig::class, + JndiDataConfig::class, + DefaultDataConfig::class) +@ActiveProfiles("dev") +abstract class AbstractIntegrationTest { +} +``` + +Java + +``` +// "dev" profile inherited from superclass +class TransferServiceTest extends AbstractIntegrationTest { + + @Autowired + TransferService transferService; + + @Test + void testTransferService() { + // test the transferService + } +} +``` + +Kotlin + +``` +// "dev" profile inherited from superclass +class TransferServiceTest : AbstractIntegrationTest() { + + @Autowired + lateinit var transferService: TransferService + + @Test + fun testTransferService() { + // test the transferService + } +} +``` + +`@ActiveProfiles` also supports an `inheritProfiles` attribute that can be used to +disable the inheritance of active profiles, as the following example shows: + +Java + +``` +// "dev" profile overridden with "production" +@ActiveProfiles(profiles = "production", inheritProfiles = false) +class ProductionTransferServiceTest extends AbstractIntegrationTest { + // test body +} +``` + +Kotlin + +``` +// "dev" profile overridden with "production" +@ActiveProfiles("production", inheritProfiles = false) +class ProductionTransferServiceTest : AbstractIntegrationTest() { + // test body +} +``` + +Furthermore, it is sometimes necessary to resolve active profiles for tests +programmatically instead of declaratively — for example, based on: + +* The current operating system. + +* Whether tests are being run on a continuous integration build server. + +* The presence of certain environment variables. + +* The presence of custom class-level annotations. + +* Other concerns. + +To resolve active bean definition profiles programmatically, you can implement +a custom `ActiveProfilesResolver` and register it by using the `resolver`attribute of `@ActiveProfiles`. For further information, see the corresponding[javadoc](https://docs.spring.io/spring-framework/docs/5.3.16/javadoc-api/org/springframework/test/context/ActiveProfilesResolver.html). +The following example demonstrates how to implement and register a custom`OperatingSystemActiveProfilesResolver`: + +Java + +``` +// "dev" profile overridden programmatically via a custom resolver +@ActiveProfiles( + resolver = OperatingSystemActiveProfilesResolver.class, + inheritProfiles = false) +class TransferServiceTest extends AbstractIntegrationTest { + // test body +} +``` + +Kotlin + +``` +// "dev" profile overridden programmatically via a custom resolver +@ActiveProfiles( + resolver = OperatingSystemActiveProfilesResolver::class, + inheritProfiles = false) +class TransferServiceTest : AbstractIntegrationTest() { + // test body +} +``` + +Java + +``` +public class OperatingSystemActiveProfilesResolver implements ActiveProfilesResolver { + + @Override + public String[] resolve(Class<?> testClass) { + String profile = ...; + // determine the value of profile based on the operating system + return new String[] {profile}; + } +} +``` + +Kotlin + +``` +class OperatingSystemActiveProfilesResolver : ActiveProfilesResolver { + + override fun resolve(testClass: Class<*>): Array<String> { + val profile: String = ... + // determine the value of profile based on the operating system + return arrayOf(profile) + } +} +``` + +##### Context Configuration with Test Property Sources + +The Spring Framework has first-class support for the notion of an environment with a +hierarchy of property sources, and you can configure integration tests with test-specific +property sources. In contrast to the `@PropertySource` annotation used on`@Configuration` classes, you can declare the `@TestPropertySource` annotation on a test +class to declare resource locations for test properties files or inlined properties. +These test property sources are added to the set of `PropertySources` in the`Environment` for the `ApplicationContext` loaded for the annotated integration test. + +| |You can use `@TestPropertySource` with any implementation of the `SmartContextLoader`SPI, but `@TestPropertySource` is not supported with implementations of the older`ContextLoader` SPI.<br/><br/>Implementations of `SmartContextLoader` gain access to merged test property source values<br/>through the `getPropertySourceLocations()` and `getPropertySourceProperties()` methods in`MergedContextConfiguration`.| +|---|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +###### Declaring Test Property Sources + +You can configure test properties files by using the `locations` or `value` attribute of`@TestPropertySource`. + +Both traditional and XML-based properties file formats are supported — for example,`"classpath:/com/example/test.properties"` or `"file:///path/to/file.xml"`. + +Each path is interpreted as a Spring `Resource`. A plain path (for example,`"test.properties"`) is treated as a classpath resource that is relative to the package +in which the test class is defined. A path starting with a slash is treated as an +absolute classpath resource (for example: `"/org/example/test.xml"`). A path that +references a URL (for example, a path prefixed with `classpath:`, `file:`, or `http:`) is +loaded by using the specified resource protocol. Resource location wildcards (such as`***/**.properties`) are not permitted: Each location must evaluate to exactly one`.properties` or `.xml` resource. + +The following example uses a test properties file: + +Java + +``` +@ContextConfiguration +@TestPropertySource("/test.properties") (1) +class MyIntegrationTests { + // class body... +} +``` + +|**1**|Specifying a properties file with an absolute path.| +|-----|---------------------------------------------------| + +Kotlin + +``` +@ContextConfiguration +@TestPropertySource("/test.properties") (1) +class MyIntegrationTests { + // class body... +} +``` + +|**1**|Specifying a properties file with an absolute path.| +|-----|---------------------------------------------------| + +You can configure inlined properties in the form of key-value pairs by using the`properties` attribute of `@TestPropertySource`, as shown in the next example. All +key-value pairs are added to the enclosing `Environment` as a single test`PropertySource` with the highest precedence. + +The supported syntax for key-value pairs is the same as the syntax defined for entries in +a Java properties file: + +* `key=value` + +* `key:value` + +* `key value` + +The following example sets two inlined properties: + +Java + +``` +@ContextConfiguration +@TestPropertySource(properties = {"timezone = GMT", "port: 4242"}) (1) +class MyIntegrationTests { + // class body... +} +``` + +|**1**|Setting two properties by using two variations of the key-value syntax.| +|-----|-----------------------------------------------------------------------| + +Kotlin + +``` +@ContextConfiguration +@TestPropertySource(properties = ["timezone = GMT", "port: 4242"]) (1) +class MyIntegrationTests { + // class body... +} +``` + +|**1**|Setting two properties by using two variations of the key-value syntax.| +|-----|-----------------------------------------------------------------------| + +| |As of Spring Framework 5.2, `@TestPropertySource` can be used as *repeatable annotation*.<br/>That means that you can have multiple declarations of `@TestPropertySource` on a single<br/>test class, with the `locations` and `properties` from later `@TestPropertySource`annotations overriding those from previous `@TestPropertySource` annotations.<br/><br/>In addition, you may declare multiple composed annotations on a test class that are each<br/>meta-annotated with `@TestPropertySource`, and all of those `@TestPropertySource`declarations will contribute to your test property sources.<br/><br/>Directly present `@TestPropertySource` annotations always take precedence over<br/>meta-present `@TestPropertySource` annotations. In other words, `locations` and`properties` from a directly present `@TestPropertySource` annotation will override the`locations` and `properties` from a `@TestPropertySource` annotation used as a<br/>meta-annotation.| +|---|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +###### Default Properties File Detection + +If `@TestPropertySource` is declared as an empty annotation (that is, without explicit +values for the `locations` or `properties` attributes), an attempt is made to detect a +default properties file relative to the class that declared the annotation. For example, +if the annotated test class is `com.example.MyTest`, the corresponding default properties +file is `classpath:com/example/MyTest.properties`. If the default cannot be detected, an`IllegalStateException` is thrown. + +###### Precedence + +Test properties have higher precedence than those defined in the operating system’s +environment, Java system properties, or property sources added by the application +declaratively by using `@PropertySource` or programmatically. Thus, test properties can +be used to selectively override properties loaded from system and application property +sources. Furthermore, inlined properties have higher precedence than properties loaded +from resource locations. Note, however, that properties registered via[`@DynamicPropertySource`](#testcontext-ctx-management-dynamic-property-sources) have +higher precedence than those loaded via `@TestPropertySource`. + +In the next example, the `timezone` and `port` properties and any properties defined in`"/test.properties"` override any properties of the same name that are defined in system +and application property sources. Furthermore, if the `"/test.properties"` file defines +entries for the `timezone` and `port` properties those are overridden by the inlined +properties declared by using the `properties` attribute. The following example shows how +to specify properties both in a file and inline: + +Java + +``` +@ContextConfiguration +@TestPropertySource( + locations = "/test.properties", + properties = {"timezone = GMT", "port: 4242"} +) +class MyIntegrationTests { + // class body... +} +``` + +Kotlin + +``` +@ContextConfiguration +@TestPropertySource("/test.properties", + properties = ["timezone = GMT", "port: 4242"] +) +class MyIntegrationTests { + // class body... +} +``` + +###### Inheriting and Overriding Test Property Sources + +`@TestPropertySource` supports boolean `inheritLocations` and `inheritProperties`attributes that denote whether resource locations for properties files and inlined +properties declared by superclasses should be inherited. The default value for both flags +is `true`. This means that a test class inherits the locations and inlined properties +declared by any superclasses. Specifically, the locations and inlined properties for a +test class are appended to the locations and inlined properties declared by superclasses. +Thus, subclasses have the option of extending the locations and inlined properties. Note +that properties that appear later shadow (that is, override) properties of the same name +that appear earlier. In addition, the aforementioned precedence rules apply for inherited +test property sources as well. + +If the `inheritLocations` or `inheritProperties` attribute in `@TestPropertySource` is +set to `false`, the locations or inlined properties, respectively, for the test class +shadow and effectively replace the configuration defined by superclasses. + +| |As of Spring Framework 5.3, test configuration may also be inherited from enclosing<br/>classes. See [`@Nested` test class configuration](#testcontext-junit-jupiter-nested-test-configuration) for details.| +|---|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +In the next example, the `ApplicationContext` for `BaseTest` is loaded by using only the`base.properties` file as a test property source. In contrast, the `ApplicationContext`for `ExtendedTest` is loaded by using the `base.properties` and `extended.properties`files as test property source locations. The following example shows how to define +properties in both a subclass and its superclass by using `properties` files: + +Java + +``` +@TestPropertySource("base.properties") +@ContextConfiguration +class BaseTest { + // ... +} + +@TestPropertySource("extended.properties") +@ContextConfiguration +class ExtendedTest extends BaseTest { + // ... +} +``` + +Kotlin + +``` +@TestPropertySource("base.properties") +@ContextConfiguration +open class BaseTest { + // ... +} + +@TestPropertySource("extended.properties") +@ContextConfiguration +class ExtendedTest : BaseTest() { + // ... +} +``` + +In the next example, the `ApplicationContext` for `BaseTest` is loaded by using only the +inlined `key1` property. In contrast, the `ApplicationContext` for `ExtendedTest` is +loaded by using the inlined `key1` and `key2` properties. The following example shows how +to define properties in both a subclass and its superclass by using inline properties: + +Java + +``` +@TestPropertySource(properties = "key1 = value1") +@ContextConfiguration +class BaseTest { + // ... +} + +@TestPropertySource(properties = "key2 = value2") +@ContextConfiguration +class ExtendedTest extends BaseTest { + // ... +} +``` + +Kotlin + +``` +@TestPropertySource(properties = ["key1 = value1"]) +@ContextConfiguration +open class BaseTest { + // ... +} + +@TestPropertySource(properties = ["key2 = value2"]) +@ContextConfiguration +class ExtendedTest : BaseTest() { + // ... +} +``` + +##### Context Configuration with Dynamic Property Sources + +As of Spring Framework 5.2.5, the TestContext framework provides support for *dynamic*properties via the `@DynamicPropertySource` annotation. This annotation can be used in +integration tests that need to add properties with dynamic values to the set of`PropertySources` in the `Environment` for the `ApplicationContext` loaded for the +integration test. + +| |The `@DynamicPropertySource` annotation and its supporting infrastructure were<br/>originally designed to allow properties from[Testcontainers](https://www.testcontainers.org/) based tests to be exposed easily to<br/>Spring integration tests. However, this feature may also be used with any form of<br/>external resource whose lifecycle is maintained outside the test’s `ApplicationContext`.| +|---|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +In contrast to the [`@TestPropertySource`](#testcontext-ctx-management-property-sources)annotation that is applied at the class level, `@DynamicPropertySource` must be applied +to a `static` method that accepts a single `DynamicPropertyRegistry` argument which is +used to add *name-value* pairs to the `Environment`. Values are dynamic and provided via +a `Supplier` which is only invoked when the property is resolved. Typically, method +references are used to supply values, as can be seen in the following example which uses +the Testcontainers project to manage a Redis container outside of the Spring`ApplicationContext`. The IP address and port of the managed Redis container are made +available to components within the test’s `ApplicationContext` via the `redis.host` and`redis.port` properties. These properties can be accessed via Spring’s `Environment`abstraction or injected directly into Spring-managed components – for example, via`@Value("${redis.host}")` and `@Value("${redis.port}")`, respectively. + +| |If you use `@DynamicPropertySource` in a base class and discover that tests in subclasses<br/>fail because the dynamic properties change between subclasses, you may need to annotate<br/>your base class with [`@DirtiesContext`](#spring-testing-annotation-dirtiescontext) to<br/>ensure that each subclass gets its own `ApplicationContext` with the correct dynamic<br/>properties.| +|---|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +Java + +``` +@SpringJUnitConfig(/* ... */) +@Testcontainers +class ExampleIntegrationTests { + + @Container + static RedisContainer redis = new RedisContainer(); + + @DynamicPropertySource + static void redisProperties(DynamicPropertyRegistry registry) { + registry.add("redis.host", redis::getContainerIpAddress); + registry.add("redis.port", redis::getMappedPort); + } + + // tests ... + +} +``` + +Kotlin + +``` +@SpringJUnitConfig(/* ... */) +@Testcontainers +class ExampleIntegrationTests { + + companion object { + + @Container + @JvmStatic + val redis: RedisContainer = RedisContainer() + + @DynamicPropertySource + @JvmStatic + fun redisProperties(registry: DynamicPropertyRegistry) { + registry.add("redis.host", redis::getContainerIpAddress) + registry.add("redis.port", redis::getMappedPort) + } + } + + // tests ... + +} +``` + +###### Precedence + +Dynamic properties have higher precedence than those loaded from `@TestPropertySource`, +the operating system’s environment, Java system properties, or property sources added by +the application declaratively by using `@PropertySource` or programmatically. Thus, +dynamic properties can be used to selectively override properties loaded via`@TestPropertySource`, system property sources, and application property sources. + +##### Loading a `WebApplicationContext` + +To instruct the TestContext framework to load a `WebApplicationContext` instead of a +standard `ApplicationContext`, you can annotate the respective test class with`@WebAppConfiguration`. + +The presence of `@WebAppConfiguration` on your test class instructs the TestContext +framework (TCF) that a `WebApplicationContext` (WAC) should be loaded for your +integration tests. In the background, the TCF makes sure that a `MockServletContext` is +created and supplied to your test’s WAC. By default, the base resource path for your`MockServletContext` is set to `src/main/webapp`. This is interpreted as a path relative +to the root of your JVM (normally the path to your project). If you are familiar with the +directory structure of a web application in a Maven project, you know that`src/main/webapp` is the default location for the root of your WAR. If you need to +override this default, you can provide an alternate path to the `@WebAppConfiguration`annotation (for example, `@WebAppConfiguration("src/test/webapp")`). If you wish to +reference a base resource path from the classpath instead of the file system, you can use +Spring’s `classpath:` prefix. + +Note that Spring’s testing support for `WebApplicationContext` implementations is on par +with its support for standard `ApplicationContext` implementations. When testing with a`WebApplicationContext`, you are free to declare XML configuration files, Groovy scripts, +or `@Configuration` classes by using `@ContextConfiguration`. You are also free to use +any other test annotations, such as `@ActiveProfiles`, `@TestExecutionListeners`, `@Sql`,`@Rollback`, and others. + +The remaining examples in this section show some of the various configuration options for +loading a `WebApplicationContext`. The following example shows the TestContext +framework’s support for convention over configuration: + +Java + +``` +@ExtendWith(SpringExtension.class) + +// defaults to "file:src/main/webapp" +@WebAppConfiguration + +// detects "WacTests-context.xml" in the same package +// or static nested @Configuration classes +@ContextConfiguration +class WacTests { + //... +} +``` + +Kotlin + +``` +@ExtendWith(SpringExtension::class) + +// defaults to "file:src/main/webapp" +@WebAppConfiguration + +// detects "WacTests-context.xml" in the same package +// or static nested @Configuration classes +@ContextConfiguration +class WacTests { + //... +} +``` + +If you annotate a test class with `@WebAppConfiguration` without specifying a resource +base path, the resource path effectively defaults to `file:src/main/webapp`. Similarly, +if you declare `@ContextConfiguration` without specifying resource `locations`, component`classes`, or context `initializers`, Spring tries to detect the presence of your +configuration by using conventions (that is, `WacTests-context.xml` in the same package +as the `WacTests` class or static nested `@Configuration` classes). + +The following example shows how to explicitly declare a resource base path with`@WebAppConfiguration` and an XML resource location with `@ContextConfiguration`: + +Java + +``` +@ExtendWith(SpringExtension.class) + +// file system resource +@WebAppConfiguration("webapp") + +// classpath resource +@ContextConfiguration("/spring/test-servlet-config.xml") +class WacTests { + //... +} +``` + +Kotlin + +``` +@ExtendWith(SpringExtension::class) + +// file system resource +@WebAppConfiguration("webapp") + +// classpath resource +@ContextConfiguration("/spring/test-servlet-config.xml") +class WacTests { + //... +} +``` + +The important thing to note here is the different semantics for paths with these two +annotations. By default, `@WebAppConfiguration` resource paths are file system based, +whereas `@ContextConfiguration` resource locations are classpath based. + +The following example shows that we can override the default resource semantics for both +annotations by specifying a Spring resource prefix: + +Java + +``` +@ExtendWith(SpringExtension.class) + +// classpath resource +@WebAppConfiguration("classpath:test-web-resources") + +// file system resource +@ContextConfiguration("file:src/main/webapp/WEB-INF/servlet-config.xml") +class WacTests { + //... +} +``` + +Kotlin + +``` +@ExtendWith(SpringExtension::class) + +// classpath resource +@WebAppConfiguration("classpath:test-web-resources") + +// file system resource +@ContextConfiguration("file:src/main/webapp/WEB-INF/servlet-config.xml") +class WacTests { + //... +} +``` + +Contrast the comments in this example with the previous example. + +[]()Working with Web Mocks + +To provide comprehensive web testing support, the TestContext framework has a`ServletTestExecutionListener` that is enabled by default. When testing against a`WebApplicationContext`, this [`TestExecutionListener`](#testcontext-key-abstractions)sets up default thread-local state by using Spring Web’s `RequestContextHolder` before +each test method and creates a `MockHttpServletRequest`, a `MockHttpServletResponse`, and +a `ServletWebRequest` based on the base resource path configured with`@WebAppConfiguration`. `ServletTestExecutionListener` also ensures that the`MockHttpServletResponse` and `ServletWebRequest` can be injected into the test instance, +and, once the test is complete, it cleans up thread-local state. + +Once you have a `WebApplicationContext` loaded for your test, you might find that you +need to interact with the web mocks — for example, to set up your test fixture or to +perform assertions after invoking your web component. The following example shows which +mocks can be autowired into your test instance. Note that the `WebApplicationContext` and`MockServletContext` are both cached across the test suite, whereas the other mocks are +managed per test method by the `ServletTestExecutionListener`. + +Java + +``` +@SpringJUnitWebConfig +class WacTests { + + @Autowired + WebApplicationContext wac; // cached + + @Autowired + MockServletContext servletContext; // cached + + @Autowired + MockHttpSession session; + + @Autowired + MockHttpServletRequest request; + + @Autowired + MockHttpServletResponse response; + + @Autowired + ServletWebRequest webRequest; + + //... +} +``` + +Kotlin + +``` +@SpringJUnitWebConfig +class WacTests { + + @Autowired + lateinit var wac: WebApplicationContext // cached + + @Autowired + lateinit var servletContext: MockServletContext // cached + + @Autowired + lateinit var session: MockHttpSession + + @Autowired + lateinit var request: MockHttpServletRequest + + @Autowired + lateinit var response: MockHttpServletResponse + + @Autowired + lateinit var webRequest: ServletWebRequest + + //... +} +``` + +##### Context Caching + +Once the TestContext framework loads an `ApplicationContext` (or `WebApplicationContext`) +for a test, that context is cached and reused for all subsequent tests that declare the +same unique context configuration within the same test suite. To understand how caching +works, it is important to understand what is meant by “unique” and “test suite.” + +An `ApplicationContext` can be uniquely identified by the combination of configuration +parameters that is used to load it. Consequently, the unique combination of configuration +parameters is used to generate a key under which the context is cached. The TestContext +framework uses the following configuration parameters to build the context cache key: + +* `locations` (from `@ContextConfiguration`) + +* `classes` (from `@ContextConfiguration`) + +* `contextInitializerClasses` (from `@ContextConfiguration`) + +* `contextCustomizers` (from `ContextCustomizerFactory`) – this includes`@DynamicPropertySource` methods as well as various features from Spring Boot’s + testing support such as `@MockBean` and `@SpyBean`. + +* `contextLoader` (from `@ContextConfiguration`) + +* `parent` (from `@ContextHierarchy`) + +* `activeProfiles` (from `@ActiveProfiles`) + +* `propertySourceLocations` (from `@TestPropertySource`) + +* `propertySourceProperties` (from `@TestPropertySource`) + +* `resourceBasePath` (from `@WebAppConfiguration`) + +For example, if `TestClassA` specifies `{"app-config.xml", "test-config.xml"}` for the`locations` (or `value`) attribute of `@ContextConfiguration`, the TestContext framework +loads the corresponding `ApplicationContext` and stores it in a `static` context cache +under a key that is based solely on those locations. So, if `TestClassB` also defines`{"app-config.xml", "test-config.xml"}` for its locations (either explicitly or +implicitly through inheritance) but does not define `@WebAppConfiguration`, a different`ContextLoader`, different active profiles, different context initializers, different +test property sources, or a different parent context, then the same `ApplicationContext`is shared by both test classes. This means that the setup cost for loading an application +context is incurred only once (per test suite), and subsequent test execution is much +faster. + +| |Test suites and forked processes<br/><br/>The Spring TestContext framework stores application contexts in a static cache. This<br/>means that the context is literally stored in a `static` variable. In other words, if<br/>tests run in separate processes, the static cache is cleared between each test<br/>execution, which effectively disables the caching mechanism.<br/><br/>To benefit from the caching mechanism, all tests must run within the same process or test<br/>suite. This can be achieved by executing all tests as a group within an IDE. Similarly,<br/>when executing tests with a build framework such as Ant, Maven, or Gradle, it is<br/>important to make sure that the build framework does not fork between tests. For example,<br/>if the[`forkMode`](https://maven.apache.org/plugins/maven-surefire-plugin/test-mojo.html#forkMode)for the Maven Surefire plug-in is set to `always` or `pertest`, the TestContext framework<br/>cannot cache application contexts between test classes, and the build process runs<br/>significantly more slowly as a result.| +|---|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +The size of the context cache is bounded with a default maximum size of 32. Whenever the +maximum size is reached, a least recently used (LRU) eviction policy is used to evict and +close stale contexts. You can configure the maximum size from the command line or a build +script by setting a JVM system property named `spring.test.context.cache.maxSize`. As an +alternative, you can set the same property via the[`SpringProperties`](appendix.html#appendix-spring-properties) mechanism. + +Since having a large number of application contexts loaded within a given test suite can +cause the suite to take an unnecessarily long time to run, it is often beneficial to +know exactly how many contexts have been loaded and cached. To view the statistics for +the underlying context cache, you can set the log level for the`org.springframework.test.context.cache` logging category to `DEBUG`. + +In the unlikely case that a test corrupts the application context and requires reloading +(for example, by modifying a bean definition or the state of an application object), you +can annotate your test class or test method with `@DirtiesContext` (see the discussion of`@DirtiesContext` in [Spring Testing +Annotations](#spring-testing-annotation-dirtiescontext)). This instructs Spring to remove the context from the cache and rebuild +the application context before running the next test that requires the same application +context. Note that support for the `@DirtiesContext` annotation is provided by the`DirtiesContextBeforeModesTestExecutionListener` and the`DirtiesContextTestExecutionListener`, which are enabled by default. + +| |ApplicationContext lifecycle and console logging<br/><br/>When you need to debug a test executed with the Spring TestContext Framework, it can be<br/>useful to analyze the console output (that is, output to the `SYSOUT` and `SYSERR`streams). Some build tools and IDEs are able to associate console output with a given<br/>test; however, some console output cannot be easily associated with a given test.<br/><br/>With regard to console logging triggered by the Spring Framework itself or by components<br/>registered in the `ApplicationContext`, it is important to understand the lifecycle of an`ApplicationContext` that has been loaded by the Spring TestContext Framework within a<br/>test suite.<br/><br/>The `ApplicationContext` for a test is typically loaded when an instance of the test<br/>class is being prepared — for example, to perform dependency injection into `@Autowired`fields of the test instance. This means that any console logging triggered during the<br/>initialization of the `ApplicationContext` typically cannot be associated with an<br/>individual test method. However, if the context is closed immediately before the<br/>execution of a test method according to [`@DirtiesContext`](#spring-testing-annotation-dirtiescontext)semantics, a new instance of the context will be loaded just prior to execution of the<br/>test method. In the latter scenario, an IDE or build tool may potentially associate<br/>console logging with the individual test method.<br/><br/>The `ApplicationContext` for a test can be closed via one of the following scenarios.<br/><br/>* The context is closed according to `@DirtiesContext` semantics.<br/><br/>* The context is closed because it has been automatically evicted from the cache<br/> according to the LRU eviction policy.<br/><br/>* The context is closed via a JVM shutdown hook when the JVM for the test suite<br/> terminates.<br/><br/>If the context is closed according to `@DirtiesContext` semantics after a particular test<br/>method, an IDE or build tool may potentially associate console logging with the<br/>individual test method. If the context is closed according to `@DirtiesContext` semantics<br/>after a test class, any console logging triggered during the shutdown of the`ApplicationContext` cannot be associated with an individual test method. Similarly, any<br/>console logging triggered during the shutdown phase via a JVM shutdown hook cannot be<br/>associated with an individual test method.<br/><br/>When a Spring `ApplicationContext` is closed via a JVM shutdown hook, callbacks executed<br/>during the shutdown phase are executed on a thread named `SpringContextShutdownHook`. So,<br/>if you wish to disable console logging triggered when the `ApplicationContext` is closed<br/>via a JVM shutdown hook, you may be able to register a custom filter with your logging<br/>framework that allows you to ignore any logging initiated by that thread.| +|---|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +##### Context Hierarchies + +When writing integration tests that rely on a loaded Spring `ApplicationContext`, it is +often sufficient to test against a single context. However, there are times when it is +beneficial or even necessary to test against a hierarchy of `ApplicationContext`instances. For example, if you are developing a Spring MVC web application, you typically +have a root `WebApplicationContext` loaded by Spring’s `ContextLoaderListener` and a +child `WebApplicationContext` loaded by Spring’s `DispatcherServlet`. This results in a +parent-child context hierarchy where shared components and infrastructure configuration +are declared in the root context and consumed in the child context by web-specific +components. Another use case can be found in Spring Batch applications, where you often +have a parent context that provides configuration for shared batch infrastructure and a +child context for the configuration of a specific batch job. + +You can write integration tests that use context hierarchies by declaring context +configuration with the `@ContextHierarchy` annotation, either on an individual test class +or within a test class hierarchy. If a context hierarchy is declared on multiple classes +within a test class hierarchy, you can also merge or override the context configuration +for a specific, named level in the context hierarchy. When merging configuration for a +given level in the hierarchy, the configuration resource type (that is, XML configuration +files or component classes) must be consistent. Otherwise, it is perfectly acceptable to +have different levels in a context hierarchy configured using different resource types. + +The remaining JUnit Jupiter based examples in this section show common configuration +scenarios for integration tests that require the use of context hierarchies. + +Single test class with context hierarchy + +`ControllerIntegrationTests` represents a typical integration testing scenario for a +Spring MVC web application by declaring a context hierarchy that consists of two levels, +one for the root `WebApplicationContext` (loaded by using the `TestAppConfig``@Configuration` class) and one for the dispatcher servlet `WebApplicationContext`(loaded by using the `WebConfig` `@Configuration` class). The `WebApplicationContext`that is autowired into the test instance is the one for the child context (that is, the +lowest context in the hierarchy). The following listing shows this configuration scenario: + +Java + +``` +@ExtendWith(SpringExtension.class) +@WebAppConfiguration +@ContextHierarchy({ + @ContextConfiguration(classes = TestAppConfig.class), + @ContextConfiguration(classes = WebConfig.class) +}) +class ControllerIntegrationTests { + + @Autowired + WebApplicationContext wac; + + // ... +} +``` + +Kotlin + +``` +@ExtendWith(SpringExtension::class) +@WebAppConfiguration +@ContextHierarchy( + ContextConfiguration(classes = [TestAppConfig::class]), + ContextConfiguration(classes = [WebConfig::class])) +class ControllerIntegrationTests { + + @Autowired + lateinit var wac: WebApplicationContext + + // ... +} +``` + +Class hierarchy with implicit parent context + +The test classes in this example define a context hierarchy within a test class +hierarchy. `AbstractWebTests` declares the configuration for a root`WebApplicationContext` in a Spring-powered web application. Note, however, that`AbstractWebTests` does not declare `@ContextHierarchy`. Consequently, subclasses of`AbstractWebTests` can optionally participate in a context hierarchy or follow the +standard semantics for `@ContextConfiguration`. `SoapWebServiceTests` and`RestWebServiceTests` both extend `AbstractWebTests` and define a context hierarchy by +using `@ContextHierarchy`. The result is that three application contexts are loaded (one +for each declaration of `@ContextConfiguration`), and the application context loaded +based on the configuration in `AbstractWebTests` is set as the parent context for each of +the contexts loaded for the concrete subclasses. The following listing shows this +configuration scenario: + +Java + +``` +@ExtendWith(SpringExtension.class) +@WebAppConfiguration +@ContextConfiguration("file:src/main/webapp/WEB-INF/applicationContext.xml") +public abstract class AbstractWebTests {} + +@ContextHierarchy(@ContextConfiguration("/spring/soap-ws-config.xml")) +public class SoapWebServiceTests extends AbstractWebTests {} + +@ContextHierarchy(@ContextConfiguration("/spring/rest-ws-config.xml")) +public class RestWebServiceTests extends AbstractWebTests {} +``` + +Kotlin + +``` +@ExtendWith(SpringExtension::class) +@WebAppConfiguration +@ContextConfiguration("file:src/main/webapp/WEB-INF/applicationContext.xml") +abstract class AbstractWebTests + +@ContextHierarchy(ContextConfiguration("/spring/soap-ws-config.xml")) +class SoapWebServiceTests : AbstractWebTests() + +@ContextHierarchy(ContextConfiguration("/spring/rest-ws-config.xml")) +class RestWebServiceTests : AbstractWebTests() +``` + +Class hierarchy with merged context hierarchy configuration + +The classes in this example show the use of named hierarchy levels in order to merge the +configuration for specific levels in a context hierarchy. `BaseTests` defines two levels +in the hierarchy, `parent` and `child`. `ExtendedTests` extends `BaseTests` and instructs +the Spring TestContext Framework to merge the context configuration for the `child`hierarchy level, by ensuring that the names declared in the `name` attribute in`@ContextConfiguration` are both `child`. The result is that three application contexts +are loaded: one for `/app-config.xml`, one for `/user-config.xml`, and one for`{"/user-config.xml", "/order-config.xml"}`. As with the previous example, the +application context loaded from `/app-config.xml` is set as the parent context for the +contexts loaded from `/user-config.xml` and `{"/user-config.xml", "/order-config.xml"}`. +The following listing shows this configuration scenario: + +Java + +``` +@ExtendWith(SpringExtension.class) +@ContextHierarchy({ + @ContextConfiguration(name = "parent", locations = "/app-config.xml"), + @ContextConfiguration(name = "child", locations = "/user-config.xml") +}) +class BaseTests {} + +@ContextHierarchy( + @ContextConfiguration(name = "child", locations = "/order-config.xml") +) +class ExtendedTests extends BaseTests {} +``` + +Kotlin + +``` +@ExtendWith(SpringExtension::class) +@ContextHierarchy( + ContextConfiguration(name = "parent", locations = ["/app-config.xml"]), + ContextConfiguration(name = "child", locations = ["/user-config.xml"])) +open class BaseTests {} + +@ContextHierarchy( + ContextConfiguration(name = "child", locations = ["/order-config.xml"]) +) +class ExtendedTests : BaseTests() {} +``` + +Class hierarchy with overridden context hierarchy configuration + +In contrast to the previous example, this example demonstrates how to override the +configuration for a given named level in a context hierarchy by setting the`inheritLocations` flag in `@ContextConfiguration` to `false`. Consequently, the +application context for `ExtendedTests` is loaded only from `/test-user-config.xml` and +has its parent set to the context loaded from `/app-config.xml`. The following listing +shows this configuration scenario: + +Java + +``` +@ExtendWith(SpringExtension.class) +@ContextHierarchy({ + @ContextConfiguration(name = "parent", locations = "/app-config.xml"), + @ContextConfiguration(name = "child", locations = "/user-config.xml") +}) +class BaseTests {} + +@ContextHierarchy( + @ContextConfiguration( + name = "child", + locations = "/test-user-config.xml", + inheritLocations = false +)) +class ExtendedTests extends BaseTests {} +``` + +Kotlin + +``` +@ExtendWith(SpringExtension::class) +@ContextHierarchy( + ContextConfiguration(name = "parent", locations = ["/app-config.xml"]), + ContextConfiguration(name = "child", locations = ["/user-config.xml"])) +open class BaseTests {} + +@ContextHierarchy( + ContextConfiguration( + name = "child", + locations = ["/test-user-config.xml"], + inheritLocations = false + )) +class ExtendedTests : BaseTests() {} +``` + +| |Dirtying a context within a context hierarchy<br/><br/>If you use `@DirtiesContext` in a test whose context is configured as part of a<br/>context hierarchy, you can use the `hierarchyMode` flag to control how the context cache<br/>is cleared. For further details, see the discussion of `@DirtiesContext` in[Spring Testing Annotations](#spring-testing-annotation-dirtiescontext) and the[`@DirtiesContext`](https://docs.spring.io/spring-framework/docs/5.3.16/javadoc-api/org/springframework/test/annotation/DirtiesContext.html) javadoc.| +|---|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +#### 3.5.7. Dependency Injection of Test Fixtures + +When you use the `DependencyInjectionTestExecutionListener` (which is configured by +default), the dependencies of your test instances are injected from beans in the +application context that you configured with `@ContextConfiguration` or related +annotations. You may use setter injection, field injection, or both, depending on +which annotations you choose and whether you place them on setter methods or fields. +If you are using JUnit Jupiter you may also optionally use constructor injection +(see [Dependency Injection with `SpringExtension`](#testcontext-junit-jupiter-di)). For consistency with Spring’s annotation-based +injection support, you may also use Spring’s `@Autowired` annotation or the `@Inject`annotation from JSR-330 for field and setter injection. + +| |For testing frameworks other than JUnit Jupiter, the TestContext framework does not<br/>participate in instantiation of the test class. Thus, the use of `@Autowired` or`@Inject` for constructors has no effect for test classes.| +|---|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +| |Although field injection is discouraged in production code, field injection is<br/>actually quite natural in test code. The rationale for the difference is that you will<br/>never instantiate your test class directly. Consequently, there is no need to be able to<br/>invoke a `public` constructor or setter method on your test class.| +|---|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +Because `@Autowired` is used to perform [autowiring by +type](core.html#beans-factory-autowire), if you have multiple bean definitions of the same type, you cannot rely on this +approach for those particular beans. In that case, you can use `@Autowired` in +conjunction with `@Qualifier`. You can also choose to use `@Inject` in conjunction with`@Named`. Alternatively, if your test class has access to its `ApplicationContext`, you +can perform an explicit lookup by using (for example) a call to`applicationContext.getBean("titleRepository", TitleRepository.class)`. + +If you do not want dependency injection applied to your test instances, do not annotate +fields or setter methods with `@Autowired` or `@Inject`. Alternatively, you can disable +dependency injection altogether by explicitly configuring your class with`@TestExecutionListeners` and omitting `DependencyInjectionTestExecutionListener.class`from the list of listeners. + +Consider the scenario of testing a `HibernateTitleRepository` class, as outlined in the[Goals](#integration-testing-goals) section. The next two code listings demonstrate the +use of `@Autowired` on fields and setter methods. The application context configuration +is presented after all sample code listings. + +| |The dependency injection behavior in the following code listings is not specific to JUnit<br/>Jupiter. The same DI techniques can be used in conjunction with any supported testing<br/>framework.<br/><br/>The following examples make calls to static assertion methods, such as `assertNotNull()`,<br/>but without prepending the call with `Assertions`. In such cases, assume that the method<br/>was properly imported through an `import static` declaration that is not shown in the<br/>example.| +|---|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +The first code listing shows a JUnit Jupiter based implementation of the test class that +uses `@Autowired` for field injection: + +Java + +``` +@ExtendWith(SpringExtension.class) +// specifies the Spring configuration to load for this test fixture +@ContextConfiguration("repository-config.xml") +class HibernateTitleRepositoryTests { + + // this instance will be dependency injected by type + @Autowired + HibernateTitleRepository titleRepository; + + @Test + void findById() { + Title title = titleRepository.findById(new Long(10)); + assertNotNull(title); + } +} +``` + +Kotlin + +``` +@ExtendWith(SpringExtension::class) +// specifies the Spring configuration to load for this test fixture +@ContextConfiguration("repository-config.xml") +class HibernateTitleRepositoryTests { + + // this instance will be dependency injected by type + @Autowired + lateinit var titleRepository: HibernateTitleRepository + + @Test + fun findById() { + val title = titleRepository.findById(10) + assertNotNull(title) + } +} +``` + +Alternatively, you can configure the class to use `@Autowired` for setter injection, as +follows: + +Java + +``` +@ExtendWith(SpringExtension.class) +// specifies the Spring configuration to load for this test fixture +@ContextConfiguration("repository-config.xml") +class HibernateTitleRepositoryTests { + + // this instance will be dependency injected by type + HibernateTitleRepository titleRepository; + + @Autowired + void setTitleRepository(HibernateTitleRepository titleRepository) { + this.titleRepository = titleRepository; + } + + @Test + void findById() { + Title title = titleRepository.findById(new Long(10)); + assertNotNull(title); + } +} +``` + +Kotlin + +``` +@ExtendWith(SpringExtension::class) +// specifies the Spring configuration to load for this test fixture +@ContextConfiguration("repository-config.xml") +class HibernateTitleRepositoryTests { + + // this instance will be dependency injected by type + lateinit var titleRepository: HibernateTitleRepository + + @Autowired + fun setTitleRepository(titleRepository: HibernateTitleRepository) { + this.titleRepository = titleRepository + } + + @Test + fun findById() { + val title = titleRepository.findById(10) + assertNotNull(title) + } +} +``` + +The preceding code listings use the same XML context file referenced by the`@ContextConfiguration` annotation (that is, `repository-config.xml`). The following +shows this configuration: + +``` +<?xml version="1.0" encoding="UTF-8"?> +<beans xmlns="http://www.springframework.org/schema/beans" + xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" + xsi:schemaLocation="http://www.springframework.org/schema/beans + https://www.springframework.org/schema/beans/spring-beans.xsd"> + + <!-- this bean will be injected into the HibernateTitleRepositoryTests class --> + <bean id="titleRepository" class="com.foo.repository.hibernate.HibernateTitleRepository"> + <property name="sessionFactory" ref="sessionFactory"/> + </bean> + + <bean id="sessionFactory" class="org.springframework.orm.hibernate5.LocalSessionFactoryBean"> + <!-- configuration elided for brevity --> + </bean> + +</beans> +``` + +| |If you are extending from a Spring-provided test base class that happens to use`@Autowired` on one of its setter methods, you might have multiple beans of the affected<br/>type defined in your application context (for example, multiple `DataSource` beans). In<br/>such a case, you can override the setter method and use the `@Qualifier` annotation to<br/>indicate a specific target bean, as follows (but make sure to delegate to the overridden<br/>method in the superclass as well):<br/><br/>Java<br/><br/>```<br/>// ...<br/><br/> @Autowired<br/> @Override<br/> public void setDataSource(@Qualifier("myDataSource") DataSource dataSource) {<br/> super.setDataSource(dataSource);<br/> }<br/><br/>// ...<br/>```<br/><br/>Kotlin<br/><br/>```<br/>// ...<br/><br/> @Autowired<br/> override fun setDataSource(@Qualifier("myDataSource") dataSource: DataSource) {<br/> super.setDataSource(dataSource)<br/> }<br/><br/>// ...<br/>```<br/><br/>The specified qualifier value indicates the specific `DataSource` bean to inject,<br/>narrowing the set of type matches to a specific bean. Its value is matched against`<qualifier>` declarations within the corresponding `<bean>` definitions. The bean name<br/>is used as a fallback qualifier value, so you can effectively also point to a specific<br/>bean by name there (as shown earlier, assuming that `myDataSource` is the bean `id`).| +|---|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +#### 3.5.8. Testing Request- and Session-scoped Beans + +Spring has supported [Request- and session-scoped +beans](core.html#beans-factory-scopes-other) since the early years, and you can test your request-scoped and session-scoped +beans by following these steps: + +* Ensure that a `WebApplicationContext` is loaded for your test by annotating your test + class with `@WebAppConfiguration`. + +* Inject the mock request or session into your test instance and prepare your test + fixture as appropriate. + +* Invoke your web component that you retrieved from the configured`WebApplicationContext` (with dependency injection). + +* Perform assertions against the mocks. + +The next code snippet shows the XML configuration for a login use case. Note that the`userService` bean has a dependency on a request-scoped `loginAction` bean. Also, the`LoginAction` is instantiated by using [SpEL expressions](core.html#expressions) that +retrieve the username and password from the current HTTP request. In our test, we want to +configure these request parameters through the mock managed by the TestContext framework. +The following listing shows the configuration for this use case: + +Request-scoped bean configuration + +``` +<beans> + + <bean id="userService" class="com.example.SimpleUserService" + c:loginAction-ref="loginAction"/> + + <bean id="loginAction" class="com.example.LoginAction" + c:username="#{request.getParameter('user')}" + c:password="#{request.getParameter('pswd')}" + scope="request"> + <aop:scoped-proxy/> + </bean> + +</beans> +``` + +In `RequestScopedBeanTests`, we inject both the `UserService` (that is, the subject under +test) and the `MockHttpServletRequest` into our test instance. Within our`requestScope()` test method, we set up our test fixture by setting request parameters in +the provided `MockHttpServletRequest`. When the `loginUser()` method is invoked on our`userService`, we are assured that the user service has access to the request-scoped`loginAction` for the current `MockHttpServletRequest` (that is, the one in which we just +set parameters). We can then perform assertions against the results based on the known +inputs for the username and password. The following listing shows how to do so: + +Java + +``` +@SpringJUnitWebConfig +class RequestScopedBeanTests { + + @Autowired UserService userService; + @Autowired MockHttpServletRequest request; + + @Test + void requestScope() { + request.setParameter("user", "enigma"); + request.setParameter("pswd", "$pr!ng"); + + LoginResults results = userService.loginUser(); + // assert results + } +} +``` + +Kotlin + +``` +@SpringJUnitWebConfig +class RequestScopedBeanTests { + + @Autowired lateinit var userService: UserService + @Autowired lateinit var request: MockHttpServletRequest + + @Test + fun requestScope() { + request.setParameter("user", "enigma") + request.setParameter("pswd", "\$pr!ng") + + val results = userService.loginUser() + // assert results + } +} +``` + +The following code snippet is similar to the one we saw earlier for a request-scoped +bean. However, this time, the `userService` bean has a dependency on a session-scoped`userPreferences` bean. Note that the `UserPreferences` bean is instantiated by using a +SpEL expression that retrieves the theme from the current HTTP session. In our test, we +need to configure a theme in the mock session managed by the TestContext framework. The +following example shows how to do so: + +Session-scoped bean configuration + +``` +<beans> + + <bean id="userService" class="com.example.SimpleUserService" + c:userPreferences-ref="userPreferences" /> + + <bean id="userPreferences" class="com.example.UserPreferences" + c:theme="#{session.getAttribute('theme')}" + scope="session"> + <aop:scoped-proxy/> + </bean> + +</beans> +``` + +In `SessionScopedBeanTests`, we inject the `UserService` and the `MockHttpSession` into +our test instance. Within our `sessionScope()` test method, we set up our test fixture by +setting the expected `theme` attribute in the provided `MockHttpSession`. When the`processUserPreferences()` method is invoked on our `userService`, we are assured that +the user service has access to the session-scoped `userPreferences` for the current`MockHttpSession`, and we can perform assertions against the results based on the +configured theme. The following example shows how to do so: + +Java + +``` +@SpringJUnitWebConfig +class SessionScopedBeanTests { + + @Autowired UserService userService; + @Autowired MockHttpSession session; + + @Test + void sessionScope() throws Exception { + session.setAttribute("theme", "blue"); + + Results results = userService.processUserPreferences(); + // assert results + } +} +``` + +Kotlin + +``` +@SpringJUnitWebConfig +class SessionScopedBeanTests { + + @Autowired lateinit var userService: UserService + @Autowired lateinit var session: MockHttpSession + + @Test + fun sessionScope() { + session.setAttribute("theme", "blue") + + val results = userService.processUserPreferences() + // assert results + } +} +``` + +#### 3.5.9. Transaction Management + +In the TestContext framework, transactions are managed by the`TransactionalTestExecutionListener`, which is configured by default, even if you do not +explicitly declare `@TestExecutionListeners` on your test class. To enable support for +transactions, however, you must configure a `PlatformTransactionManager` bean in the`ApplicationContext` that is loaded with `@ContextConfiguration` semantics (further +details are provided later). In addition, you must declare Spring’s `@Transactional`annotation either at the class or the method level for your tests. + +##### Test-managed Transactions + +Test-managed transactions are transactions that are managed declaratively by using the`TransactionalTestExecutionListener` or programmatically by using `TestTransaction`(described later). You should not confuse such transactions with Spring-managed +transactions (those managed directly by Spring within the `ApplicationContext` loaded for +tests) or application-managed transactions (those managed programmatically within +application code that is invoked by tests). Spring-managed and application-managed +transactions typically participate in test-managed transactions. However, you should use +caution if Spring-managed or application-managed transactions are configured with any +propagation type other than `REQUIRED` or `SUPPORTS` (see the discussion on[transaction propagation](data-access.html#tx-propagation) for details). + +| |Preemptive timeouts and test-managed transactions<br/><br/>Caution must be taken when using any form of preemptive timeouts from a testing framework<br/>in conjunction with Spring’s test-managed transactions.<br/><br/>Specifically, Spring’s testing support binds transaction state to the current thread (via<br/>a `java.lang.ThreadLocal` variable) *before* the current test method is invoked. If a<br/>testing framework invokes the current test method in a new thread in order to support a<br/>preemptive timeout, any actions performed within the current test method will *not* be<br/>invoked within the test-managed transaction. Consequently, the result of any such actions<br/>will not be rolled back with the test-managed transaction. On the contrary, such actions<br/>will be committed to the persistent store — for example, a relational database — even<br/>though the test-managed transaction is properly rolled back by Spring.<br/><br/>Situations in which this can occur include but are not limited to the following.<br/><br/>* JUnit 4’s `@Test(timeout = …​)` support and `TimeOut` rule<br/><br/>* JUnit Jupiter’s `assertTimeoutPreemptively(…​)` methods in the`org.junit.jupiter.api.Assertions` class<br/><br/>* TestNG’s `@Test(timeOut = …​)` support| +|---|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +##### Enabling and Disabling Transactions + +Annotating a test method with `@Transactional` causes the test to be run within a +transaction that is, by default, automatically rolled back after completion of the test. +If a test class is annotated with `@Transactional`, each test method within that class +hierarchy runs within a transaction. Test methods that are not annotated with`@Transactional` (at the class or method level) are not run within a transaction. Note +that `@Transactional` is not supported on test lifecycle methods — for example, methods +annotated with JUnit Jupiter’s `@BeforeAll`, `@BeforeEach`, etc. Furthermore, tests that +are annotated with `@Transactional` but have the `propagation` attribute set to`NOT_SUPPORTED` or `NEVER` are not run within a transaction. + +| Attribute | Supported for test-managed transactions | +|--------------------------------------------|----------------------------------------------------------------------| +| `value` and `transactionManager` | yes | +| `propagation` |only `Propagation.NOT_SUPPORTED` and `Propagation.NEVER` are supported| +| `isolation` | no | +| `timeout` | no | +| `readOnly` | no | +| `rollbackFor` and `rollbackForClassName` | no: use `TestTransaction.flagForRollback()` instead | +|`noRollbackFor` and `noRollbackForClassName`| no: use `TestTransaction.flagForCommit()` instead | + +| |Method-level lifecycle methods — for example, methods annotated with JUnit Jupiter’s`@BeforeEach` or `@AfterEach` — are run within a test-managed transaction. On the other<br/>hand, suite-level and class-level lifecycle methods — for example, methods annotated with<br/>JUnit Jupiter’s `@BeforeAll` or `@AfterAll` and methods annotated with TestNG’s`@BeforeSuite`, `@AfterSuite`, `@BeforeClass`, or `@AfterClass` — are *not* run within a<br/>test-managed transaction.<br/><br/>If you need to run code in a suite-level or class-level lifecycle method within a<br/>transaction, you may wish to inject a corresponding `PlatformTransactionManager` into<br/>your test class and then use that with a `TransactionTemplate` for programmatic<br/>transaction management.| +|---|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +Note that [`AbstractTransactionalJUnit4SpringContextTests`](#testcontext-support-classes-junit4) and[`AbstractTransactionalTestNGSpringContextTests`](#testcontext-support-classes-testng)are preconfigured for transactional support at the class level. + +The following example demonstrates a common scenario for writing an integration test for +a Hibernate-based `UserRepository`: + +Java + +``` +@SpringJUnitConfig(TestConfig.class) +@Transactional +class HibernateUserRepositoryTests { + + @Autowired + HibernateUserRepository repository; + + @Autowired + SessionFactory sessionFactory; + + JdbcTemplate jdbcTemplate; + + @Autowired + void setDataSource(DataSource dataSource) { + this.jdbcTemplate = new JdbcTemplate(dataSource); + } + + @Test + void createUser() { + // track initial state in test database: + final int count = countRowsInTable("user"); + + User user = new User(...); + repository.save(user); + + // Manual flush is required to avoid false positive in test + sessionFactory.getCurrentSession().flush(); + assertNumUsers(count + 1); + } + + private int countRowsInTable(String tableName) { + return JdbcTestUtils.countRowsInTable(this.jdbcTemplate, tableName); + } + + private void assertNumUsers(int expected) { + assertEquals("Number of rows in the [user] table.", expected, countRowsInTable("user")); + } +} +``` + +Kotlin + +``` +@SpringJUnitConfig(TestConfig::class) +@Transactional +class HibernateUserRepositoryTests { + + @Autowired + lateinit var repository: HibernateUserRepository + + @Autowired + lateinit var sessionFactory: SessionFactory + + lateinit var jdbcTemplate: JdbcTemplate + + @Autowired + fun setDataSource(dataSource: DataSource) { + this.jdbcTemplate = JdbcTemplate(dataSource) + } + + @Test + fun createUser() { + // track initial state in test database: + val count = countRowsInTable("user") + + val user = User() + repository.save(user) + + // Manual flush is required to avoid false positive in test + sessionFactory.getCurrentSession().flush() + assertNumUsers(count + 1) + } + + private fun countRowsInTable(tableName: String): Int { + return JdbcTestUtils.countRowsInTable(jdbcTemplate, tableName) + } + + private fun assertNumUsers(expected: Int) { + assertEquals("Number of rows in the [user] table.", expected, countRowsInTable("user")) + } +} +``` + +As explained in [Transaction Rollback and Commit Behavior](#testcontext-tx-rollback-and-commit-behavior), there is no need to +clean up the database after the `createUser()` method runs, since any changes made to the +database are automatically rolled back by the `TransactionalTestExecutionListener`. + +##### Transaction Rollback and Commit Behavior + +By default, test transactions will be automatically rolled back after completion of the +test; however, transactional commit and rollback behavior can be configured declaratively +via the `@Commit` and `@Rollback` annotations. See the corresponding entries in the[annotation support](#integration-testing-annotations) section for further details. + +##### Programmatic Transaction Management + +You can interact with test-managed transactions programmatically by using the static +methods in `TestTransaction`. For example, you can use `TestTransaction` within test +methods, before methods, and after methods to start or end the current test-managed +transaction or to configure the current test-managed transaction for rollback or commit. +Support for `TestTransaction` is automatically available whenever the`TransactionalTestExecutionListener` is enabled. + +The following example demonstrates some of the features of `TestTransaction`. See the +javadoc for [`TestTransaction`](https://docs.spring.io/spring-framework/docs/5.3.16/javadoc-api/org/springframework/test/context/transaction/TestTransaction.html)for further details. + +Java + +``` +@ContextConfiguration(classes = TestConfig.class) +public class ProgrammaticTransactionManagementTests extends + AbstractTransactionalJUnit4SpringContextTests { + + @Test + public void transactionalTest() { + // assert initial state in test database: + assertNumUsers(2); + + deleteFromTables("user"); + + // changes to the database will be committed! + TestTransaction.flagForCommit(); + TestTransaction.end(); + assertFalse(TestTransaction.isActive()); + assertNumUsers(0); + + TestTransaction.start(); + // perform other actions against the database that will + // be automatically rolled back after the test completes... + } + + protected void assertNumUsers(int expected) { + assertEquals("Number of rows in the [user] table.", expected, countRowsInTable("user")); + } +} +``` + +Kotlin + +``` +@ContextConfiguration(classes = [TestConfig::class]) +class ProgrammaticTransactionManagementTests : AbstractTransactionalJUnit4SpringContextTests() { + + @Test + fun transactionalTest() { + // assert initial state in test database: + assertNumUsers(2) + + deleteFromTables("user") + + // changes to the database will be committed! + TestTransaction.flagForCommit() + TestTransaction.end() + assertFalse(TestTransaction.isActive()) + assertNumUsers(0) + + TestTransaction.start() + // perform other actions against the database that will + // be automatically rolled back after the test completes... + } + + protected fun assertNumUsers(expected: Int) { + assertEquals("Number of rows in the [user] table.", expected, countRowsInTable("user")) + } +} +``` + +##### Running Code Outside of a Transaction + +Occasionally, you may need to run certain code before or after a transactional test +method but outside the transactional context — for example, to verify the initial +database state prior to running your test or to verify expected transactional commit +behavior after your test runs (if the test was configured to commit the transaction).`TransactionalTestExecutionListener` supports the `@BeforeTransaction` and`@AfterTransaction` annotations for exactly such scenarios. You can annotate any `void`method in a test class or any `void` default method in a test interface with one of these +annotations, and the `TransactionalTestExecutionListener` ensures that your before +transaction method or after transaction method runs at the appropriate time. + +| |Any before methods (such as methods annotated with JUnit Jupiter’s `@BeforeEach`)<br/>and any after methods (such as methods annotated with JUnit Jupiter’s `@AfterEach`) are<br/>run within a transaction. In addition, methods annotated with `@BeforeTransaction` or`@AfterTransaction` are not run for test methods that are not configured to run within a<br/>transaction.| +|---|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +##### Configuring a Transaction Manager + +`TransactionalTestExecutionListener` expects a `PlatformTransactionManager` bean to be +defined in the Spring `ApplicationContext` for the test. If there are multiple instances +of `PlatformTransactionManager` within the test’s `ApplicationContext`, you can declare a +qualifier by using `@Transactional("myTxMgr")` or `@Transactional(transactionManager = +"myTxMgr")`, or `TransactionManagementConfigurer` can be implemented by an`@Configuration` class. Consult the[javadoc +for `TestContextTransactionUtils.retrieveTransactionManager()`](https://docs.spring.io/spring-framework/docs/5.3.16/javadoc-api/org/springframework/test/context/transaction/TestContextTransactionUtils.html#retrieveTransactionManager-org.springframework.test.context.TestContext-java.lang.String-) for details on the +algorithm used to look up a transaction manager in the test’s `ApplicationContext`. + +##### Demonstration of All Transaction-related Annotations + +The following JUnit Jupiter based example displays a fictitious integration testing +scenario that highlights all transaction-related annotations. The example is not intended +to demonstrate best practices but rather to demonstrate how these annotations can be +used. See the [annotation support](#integration-testing-annotations) section for further +information and configuration examples. [Transaction management for `@Sql`](#testcontext-executing-sql-declaratively-tx) contains an additional example that uses `@Sql` for +declarative SQL script execution with default transaction rollback semantics. The +following example shows the relevant annotations: + +Java + +``` +@SpringJUnitConfig +@Transactional(transactionManager = "txMgr") +@Commit +class FictitiousTransactionalTest { + + @BeforeTransaction + void verifyInitialDatabaseState() { + // logic to verify the initial state before a transaction is started + } + + @BeforeEach + void setUpTestDataWithinTransaction() { + // set up test data within the transaction + } + + @Test + // overrides the class-level @Commit setting + @Rollback + void modifyDatabaseWithinTransaction() { + // logic which uses the test data and modifies database state + } + + @AfterEach + void tearDownWithinTransaction() { + // run "tear down" logic within the transaction + } + + @AfterTransaction + void verifyFinalDatabaseState() { + // logic to verify the final state after transaction has rolled back + } + +} +``` + +Kotlin + +``` +@SpringJUnitConfig +@Transactional(transactionManager = "txMgr") +@Commit +class FictitiousTransactionalTest { + + @BeforeTransaction + fun verifyInitialDatabaseState() { + // logic to verify the initial state before a transaction is started + } + + @BeforeEach + fun setUpTestDataWithinTransaction() { + // set up test data within the transaction + } + + @Test + // overrides the class-level @Commit setting + @Rollback + fun modifyDatabaseWithinTransaction() { + // logic which uses the test data and modifies database state + } + + @AfterEach + fun tearDownWithinTransaction() { + // run "tear down" logic within the transaction + } + + @AfterTransaction + fun verifyFinalDatabaseState() { + // logic to verify the final state after transaction has rolled back + } + +} +``` + +| |Avoid false positives when testing ORM code<br/><br/>When you test application code that manipulates the state of a Hibernate session or JPA<br/>persistence context, make sure to flush the underlying unit of work within test methods<br/>that run that code. Failing to flush the underlying unit of work can produce false<br/>positives: Your test passes, but the same code throws an exception in a live, production<br/>environment. Note that this applies to any ORM framework that maintains an in-memory unit<br/>of work. In the following Hibernate-based example test case, one method demonstrates a<br/>false positive, and the other method correctly exposes the results of flushing the<br/>session:<br/><br/>Java<br/><br/>```<br/>// ...<br/><br/>@Autowired<br/>SessionFactory sessionFactory;<br/><br/>@Transactional<br/>@Test // no expected exception!<br/>public void falsePositive() {<br/> updateEntityInHibernateSession();<br/> // False positive: an exception will be thrown once the Hibernate<br/> // Session is finally flushed (i.e., in production code)<br/>}<br/><br/>@Transactional<br/>@Test(expected = ...)<br/>public void updateWithSessionFlush() {<br/> updateEntityInHibernateSession();<br/> // Manual flush is required to avoid false positive in test<br/> sessionFactory.getCurrentSession().flush();<br/>}<br/><br/>// ...<br/>```<br/><br/>Kotlin<br/><br/>```<br/>// ...<br/><br/>@Autowired<br/>lateinit var sessionFactory: SessionFactory<br/><br/>@Transactional<br/>@Test // no expected exception!<br/>fun falsePositive() {<br/> updateEntityInHibernateSession()<br/> // False positive: an exception will be thrown once the Hibernate<br/> // Session is finally flushed (i.e., in production code)<br/>}<br/><br/>@Transactional<br/>@Test(expected = ...)<br/>fun updateWithSessionFlush() {<br/> updateEntityInHibernateSession()<br/> // Manual flush is required to avoid false positive in test<br/> sessionFactory.getCurrentSession().flush()<br/>}<br/><br/>// ...<br/>```<br/><br/>The following example shows matching methods for JPA:<br/><br/>Java<br/><br/>```<br/>// ...<br/><br/>@PersistenceContext<br/>EntityManager entityManager;<br/><br/>@Transactional<br/>@Test // no expected exception!<br/>public void falsePositive() {<br/> updateEntityInJpaPersistenceContext();<br/> // False positive: an exception will be thrown once the JPA<br/> // EntityManager is finally flushed (i.e., in production code)<br/>}<br/><br/>@Transactional<br/>@Test(expected = ...)<br/>public void updateWithEntityManagerFlush() {<br/> updateEntityInJpaPersistenceContext();<br/> // Manual flush is required to avoid false positive in test<br/> entityManager.flush();<br/>}<br/><br/>// ...<br/>```<br/><br/>Kotlin<br/><br/>```<br/>// ...<br/><br/>@PersistenceContext<br/>lateinit var entityManager:EntityManager<br/><br/>@Transactional<br/>@Test // no expected exception!<br/>fun falsePositive() {<br/> updateEntityInJpaPersistenceContext()<br/> // False positive: an exception will be thrown once the JPA<br/> // EntityManager is finally flushed (i.e., in production code)<br/>}<br/><br/>@Transactional<br/>@Test(expected = ...)<br/>void updateWithEntityManagerFlush() {<br/> updateEntityInJpaPersistenceContext()<br/> // Manual flush is required to avoid false positive in test<br/> entityManager.flush()<br/>}<br/><br/>// ...<br/>```| +|---|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +#### 3.5.10. Executing SQL Scripts + +When writing integration tests against a relational database, it is often beneficial to +run SQL scripts to modify the database schema or insert test data into tables. The`spring-jdbc` module provides support for *initializing* an embedded or existing database +by executing SQL scripts when the Spring `ApplicationContext` is loaded. See[Embedded database support](data-access.html#jdbc-embedded-database-support) and[Testing data access logic with an +embedded database](data-access.html#jdbc-embedded-database-dao-testing) for details. + +Although it is very useful to initialize a database for testing *once* when the`ApplicationContext` is loaded, sometimes it is essential to be able to modify the +database *during* integration tests. The following sections explain how to run SQL +scripts programmatically and declaratively during integration tests. + +##### Executing SQL scripts programmatically + +Spring provides the following options for executing SQL scripts programmatically within +integration test methods. + +* `org.springframework.jdbc.datasource.init.ScriptUtils` + +* `org.springframework.jdbc.datasource.init.ResourceDatabasePopulator` + +* `org.springframework.test.context.junit4.AbstractTransactionalJUnit4SpringContextTests` + +* `org.springframework.test.context.testng.AbstractTransactionalTestNGSpringContextTests` + +`ScriptUtils` provides a collection of static utility methods for working with SQL +scripts and is mainly intended for internal use within the framework. However, if you +require full control over how SQL scripts are parsed and run, `ScriptUtils` may suit +your needs better than some of the other alternatives described later. See the[javadoc](https://docs.spring.io/spring-framework/docs/5.3.16/javadoc-api/org/springframework/jdbc/datasource/init/ScriptUtils.html) for individual +methods in `ScriptUtils` for further details. + +`ResourceDatabasePopulator` provides an object-based API for programmatically populating, +initializing, or cleaning up a database by using SQL scripts defined in external +resources. `ResourceDatabasePopulator` provides options for configuring the character +encoding, statement separator, comment delimiters, and error handling flags used when +parsing and running the scripts. Each of the configuration options has a reasonable +default value. See the[javadoc](https://docs.spring.io/spring-framework/docs/5.3.16/javadoc-api/org/springframework/jdbc/datasource/init/ResourceDatabasePopulator.html) for +details on default values. To run the scripts configured in a`ResourceDatabasePopulator`, you can invoke either the `populate(Connection)` method to +run the populator against a `java.sql.Connection` or the `execute(DataSource)` method +to run the populator against a `javax.sql.DataSource`. The following example +specifies SQL scripts for a test schema and test data, sets the statement separator to`@@`, and run the scripts against a `DataSource`: + +Java + +``` +@Test +void databaseTest() { + ResourceDatabasePopulator populator = new ResourceDatabasePopulator(); + populator.addScripts( + new ClassPathResource("test-schema.sql"), + new ClassPathResource("test-data.sql")); + populator.setSeparator("@@"); + populator.execute(this.dataSource); + // run code that uses the test schema and data +} +``` + +Kotlin + +``` +@Test +fun databaseTest() { + val populator = ResourceDatabasePopulator() + populator.addScripts( + ClassPathResource("test-schema.sql"), + ClassPathResource("test-data.sql")) + populator.setSeparator("@@") + populator.execute(dataSource) + // run code that uses the test schema and data +} +``` + +Note that `ResourceDatabasePopulator` internally delegates to `ScriptUtils` for parsing +and running SQL scripts. Similarly, the `executeSqlScript(..)` methods in[`AbstractTransactionalJUnit4SpringContextTests`](#testcontext-support-classes-junit4)and [`AbstractTransactionalTestNGSpringContextTests`](#testcontext-support-classes-testng)internally use a `ResourceDatabasePopulator` to run SQL scripts. See the Javadoc for the +various `executeSqlScript(..)` methods for further details. + +##### Executing SQL scripts declaratively with @Sql + +In addition to the aforementioned mechanisms for running SQL scripts programmatically, +you can declaratively configure SQL scripts in the Spring TestContext Framework. +Specifically, you can declare the `@Sql` annotation on a test class or test method to +configure individual SQL statements or the resource paths to SQL scripts that should be +run against a given database before or after an integration test method. Support for`@Sql` is provided by the `SqlScriptsTestExecutionListener`, which is enabled by default. + +| |Method-level `@Sql` declarations override class-level declarations by default. As<br/>of Spring Framework 5.2, however, this behavior may be configured per test class or per<br/>test method via `@SqlMergeMode`. See[Merging and Overriding Configuration with `@SqlMergeMode`](#testcontext-executing-sql-declaratively-script-merging) for further details.| +|---|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +###### Path Resource Semantics + +Each path is interpreted as a Spring `Resource`. A plain path (for example,`"schema.sql"`) is treated as a classpath resource that is relative to the package in +which the test class is defined. A path starting with a slash is treated as an absolute +classpath resource (for example, `"/org/example/schema.sql"`). A path that references a +URL (for example, a path prefixed with `classpath:`, `file:`, `http:`) is loaded by using +the specified resource protocol. + +The following example shows how to use `@Sql` at the class level and at the method level +within a JUnit Jupiter based integration test class: + +Java + +``` +@SpringJUnitConfig +@Sql("/test-schema.sql") +class DatabaseTests { + + @Test + void emptySchemaTest() { + // run code that uses the test schema without any test data + } + + @Test + @Sql({"/test-schema.sql", "/test-user-data.sql"}) + void userTest() { + // run code that uses the test schema and test data + } +} +``` + +Kotlin + +``` +@SpringJUnitConfig +@Sql("/test-schema.sql") +class DatabaseTests { + + @Test + fun emptySchemaTest() { + // run code that uses the test schema without any test data + } + + @Test + @Sql("/test-schema.sql", "/test-user-data.sql") + fun userTest() { + // run code that uses the test schema and test data + } +} +``` + +###### Default Script Detection + +If no SQL scripts or statements are specified, an attempt is made to detect a `default`script, depending on where `@Sql` is declared. If a default cannot be detected, an`IllegalStateException` is thrown. + +* Class-level declaration: If the annotated test class is `com.example.MyTest`, the + corresponding default script is `classpath:com/example/MyTest.sql`. + +* Method-level declaration: If the annotated test method is named `testMethod()` and is + defined in the class `com.example.MyTest`, the corresponding default script is`classpath:com/example/MyTest.testMethod.sql`. + +###### Declaring Multiple `@Sql` Sets + +If you need to configure multiple sets of SQL scripts for a given test class or test +method but with different syntax configuration, different error handling rules, or +different execution phases per set, you can declare multiple instances of `@Sql`. With +Java 8, you can use `@Sql` as a repeatable annotation. Otherwise, you can use the`@SqlGroup` annotation as an explicit container for declaring multiple instances of`@Sql`. + +The following example shows how to use `@Sql` as a repeatable annotation with Java 8: + +Java + +``` +@Test +@Sql(scripts = "/test-schema.sql", config = @SqlConfig(commentPrefix = "`")) +@Sql("/test-user-data.sql") +void userTest() { + // run code that uses the test schema and test data +} +``` + +Kotlin + +``` +// Repeatable annotations with non-SOURCE retention are not yet supported by Kotlin +``` + +In the scenario presented in the preceding example, the `test-schema.sql` script uses a +different syntax for single-line comments. + +The following example is identical to the preceding example, except that the `@Sql`declarations are grouped together within `@SqlGroup`. With Java 8 and above, the use of`@SqlGroup` is optional, but you may need to use `@SqlGroup` for compatibility with +other JVM languages such as Kotlin. + +Java + +``` +@Test +@SqlGroup({ + @Sql(scripts = "/test-schema.sql", config = @SqlConfig(commentPrefix = "`")), + @Sql("/test-user-data.sql") +)} +void userTest() { + // run code that uses the test schema and test data +} +``` + +Kotlin + +``` +@Test +@SqlGroup( + Sql("/test-schema.sql", config = SqlConfig(commentPrefix = "`")), + Sql("/test-user-data.sql")) +fun userTest() { + // Run code that uses the test schema and test data +} +``` + +###### Script Execution Phases + +By default, SQL scripts are run before the corresponding test method. However, if +you need to run a particular set of scripts after the test method (for example, to clean +up database state), you can use the `executionPhase` attribute in `@Sql`, as the +following example shows: + +Java + +``` +@Test +@Sql( + scripts = "create-test-data.sql", + config = @SqlConfig(transactionMode = ISOLATED) +) +@Sql( + scripts = "delete-test-data.sql", + config = @SqlConfig(transactionMode = ISOLATED), + executionPhase = AFTER_TEST_METHOD +) +void userTest() { + // run code that needs the test data to be committed + // to the database outside of the test's transaction +} +``` + +Kotlin + +``` +@Test +@SqlGroup( + Sql("create-test-data.sql", + config = SqlConfig(transactionMode = ISOLATED)), + Sql("delete-test-data.sql", + config = SqlConfig(transactionMode = ISOLATED), + executionPhase = AFTER_TEST_METHOD)) +fun userTest() { + // run code that needs the test data to be committed + // to the database outside of the test's transaction +} +``` + +Note that `ISOLATED` and `AFTER_TEST_METHOD` are statically imported from`Sql.TransactionMode` and `Sql.ExecutionPhase`, respectively. + +###### Script Configuration with `@SqlConfig` + +You can configure script parsing and error handling by using the `@SqlConfig` annotation. +When declared as a class-level annotation on an integration test class, `@SqlConfig`serves as global configuration for all SQL scripts within the test class hierarchy. When +declared directly by using the `config` attribute of the `@Sql` annotation, `@SqlConfig`serves as local configuration for the SQL scripts declared within the enclosing `@Sql`annotation. Every attribute in `@SqlConfig` has an implicit default value, which is +documented in the javadoc of the corresponding attribute. Due to the rules defined for +annotation attributes in the Java Language Specification, it is, unfortunately, not +possible to assign a value of `null` to an annotation attribute. Thus, in order to +support overrides of inherited global configuration, `@SqlConfig` attributes have an +explicit default value of either `""` (for Strings), `{}` (for arrays), or `DEFAULT` (for +enumerations). This approach lets local declarations of `@SqlConfig` selectively override +individual attributes from global declarations of `@SqlConfig` by providing a value other +than `""`, `{}`, or `DEFAULT`. Global `@SqlConfig` attributes are inherited whenever +local `@SqlConfig` attributes do not supply an explicit value other than `""`, `{}`, or`DEFAULT`. Explicit local configuration, therefore, overrides global configuration. + +The configuration options provided by `@Sql` and `@SqlConfig` are equivalent to those +supported by `ScriptUtils` and `ResourceDatabasePopulator` but are a superset of those +provided by the `<jdbc:initialize-database/>` XML namespace element. See the javadoc of +individual attributes in [`@Sql`](https://docs.spring.io/spring-framework/docs/5.3.16/javadoc-api/org/springframework/test/context/jdbc/Sql.html) and[`@SqlConfig`](https://docs.spring.io/spring-framework/docs/5.3.16/javadoc-api/org/springframework/test/context/jdbc/SqlConfig.html) for details. + +**Transaction management for `@Sql`** + +By default, the `SqlScriptsTestExecutionListener` infers the desired transaction +semantics for scripts configured by using `@Sql`. Specifically, SQL scripts are run +without a transaction, within an existing Spring-managed transaction (for example, a +transaction managed by the `TransactionalTestExecutionListener` for a test annotated with`@Transactional`), or within an isolated transaction, depending on the configured value +of the `transactionMode` attribute in `@SqlConfig` and the presence of a`PlatformTransactionManager` in the test’s `ApplicationContext`. As a bare minimum, +however, a `javax.sql.DataSource` must be present in the test’s `ApplicationContext`. + +If the algorithms used by `SqlScriptsTestExecutionListener` to detect a `DataSource` and`PlatformTransactionManager` and infer the transaction semantics do not suit your needs, +you can specify explicit names by setting the `dataSource` and `transactionManager`attributes of `@SqlConfig`. Furthermore, you can control the transaction propagation +behavior by setting the `transactionMode` attribute of `@SqlConfig` (for example, whether +scripts should be run in an isolated transaction). Although a thorough discussion of all +supported options for transaction management with `@Sql` is beyond the scope of this +reference manual, the javadoc for[`@SqlConfig`](https://docs.spring.io/spring-framework/docs/5.3.16/javadoc-api/org/springframework/test/context/jdbc/SqlConfig.html) and[`SqlScriptsTestExecutionListener`](https://docs.spring.io/spring-framework/docs/5.3.16/javadoc-api/org/springframework/test/context/jdbc/SqlScriptsTestExecutionListener.html)provide detailed information, and the following example shows a typical testing scenario +that uses JUnit Jupiter and transactional tests with `@Sql`: + +Java + +``` +@SpringJUnitConfig(TestDatabaseConfig.class) +@Transactional +class TransactionalSqlScriptsTests { + + final JdbcTemplate jdbcTemplate; + + @Autowired + TransactionalSqlScriptsTests(DataSource dataSource) { + this.jdbcTemplate = new JdbcTemplate(dataSource); + } + + @Test + @Sql("/test-data.sql") + void usersTest() { + // verify state in test database: + assertNumUsers(2); + // run code that uses the test data... + } + + int countRowsInTable(String tableName) { + return JdbcTestUtils.countRowsInTable(this.jdbcTemplate, tableName); + } + + void assertNumUsers(int expected) { + assertEquals(expected, countRowsInTable("user"), + "Number of rows in the [user] table."); + } +} +``` + +Kotlin + +``` +@SpringJUnitConfig(TestDatabaseConfig::class) +@Transactional +class TransactionalSqlScriptsTests @Autowired constructor(dataSource: DataSource) { + + val jdbcTemplate: JdbcTemplate = JdbcTemplate(dataSource) + + @Test + @Sql("/test-data.sql") + fun usersTest() { + // verify state in test database: + assertNumUsers(2) + // run code that uses the test data... + } + + fun countRowsInTable(tableName: String): Int { + return JdbcTestUtils.countRowsInTable(jdbcTemplate, tableName) + } + + fun assertNumUsers(expected: Int) { + assertEquals(expected, countRowsInTable("user"), + "Number of rows in the [user] table.") + } +} +``` + +Note that there is no need to clean up the database after the `usersTest()` method is +run, since any changes made to the database (either within the test method or within the`/test-data.sql` script) are automatically rolled back by the`TransactionalTestExecutionListener` (see [transaction management](#testcontext-tx) for +details). + +###### Merging and Overriding Configuration with `@SqlMergeMode` + +As of Spring Framework 5.2, it is possible to merge method-level `@Sql` declarations with +class-level declarations. For example, this allows you to provide the configuration for a +database schema or some common test data once per test class and then provide additional, +use case specific test data per test method. To enable `@Sql` merging, annotate either +your test class or test method with `@SqlMergeMode(MERGE)`. To disable merging for a +specific test method (or specific test subclass), you can switch back to the default mode +via `@SqlMergeMode(OVERRIDE)`. Consult the [`@SqlMergeMode` annotation documentation section](#spring-testing-annotation-sqlmergemode) for examples and further details. + +#### 3.5.11. Parallel Test Execution + +Spring Framework 5.0 introduced basic support for executing tests in parallel within a +single JVM when using the Spring TestContext Framework. In general, this means that most +test classes or test methods can be run in parallel without any changes to test code +or configuration. + +| |For details on how to set up parallel test execution, see the documentation for your<br/>testing framework, build tool, or IDE.| +|---|-------------------------------------------------------------------------------------------------------------------------------| + +Keep in mind that the introduction of concurrency into your test suite can result in +unexpected side effects, strange runtime behavior, and tests that fail intermittently or +seemingly randomly. The Spring Team therefore provides the following general guidelines +for when not to run tests in parallel. + +Do not run tests in parallel if the tests: + +* Use Spring Framework’s `@DirtiesContext` support. + +* Use Spring Boot’s `@MockBean` or `@SpyBean` support. + +* Use JUnit 4’s `@FixMethodOrder` support or any testing framework feature + that is designed to ensure that test methods run in a particular order. Note, + however, that this does not apply if entire test classes are run in parallel. + +* Change the state of shared services or systems such as a database, message broker, + filesystem, and others. This applies to both embedded and external systems. + +| |If parallel test execution fails with an exception stating that the `ApplicationContext`for the current test is no longer active, this typically means that the`ApplicationContext` was removed from the `ContextCache` in a different thread.<br/><br/>This may be due to the use of `@DirtiesContext` or due to automatic eviction from the`ContextCache`. If `@DirtiesContext` is the culprit, you either need to find a way to<br/>avoid using `@DirtiesContext` or exclude such tests from parallel execution. If the<br/>maximum size of the `ContextCache` has been exceeded, you can increase the maximum size<br/>of the cache. See the discussion on [context caching](#testcontext-ctx-management-caching)for details.| +|---|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +| |Parallel test execution in the Spring TestContext Framework is only possible if<br/>the underlying `TestContext` implementation provides a copy constructor, as explained in<br/>the javadoc for [`TestContext`](https://docs.spring.io/spring-framework/docs/5.3.16/javadoc-api/org/springframework/test/context/TestContext.html). The`DefaultTestContext` used in Spring provides such a constructor. However, if you use a<br/>third-party library that provides a custom `TestContext` implementation, you need to<br/>verify that it is suitable for parallel test execution.| +|---|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +#### 3.5.12. TestContext Framework Support Classes + +This section describes the various classes that support the Spring TestContext Framework. + +##### Spring JUnit 4 Runner + +The Spring TestContext Framework offers full integration with JUnit 4 through a custom +runner (supported on JUnit 4.12 or higher). By annotating test classes with`@RunWith(SpringJUnit4ClassRunner.class)` or the shorter `@RunWith(SpringRunner.class)`variant, developers can implement standard JUnit 4-based unit and integration tests and +simultaneously reap the benefits of the TestContext framework, such as support for +loading application contexts, dependency injection of test instances, transactional test +method execution, and so on. If you want to use the Spring TestContext Framework with an +alternative runner (such as JUnit 4’s `Parameterized` runner) or third-party runners +(such as the `MockitoJUnitRunner`), you can, optionally, use[Spring’s support for JUnit rules](#testcontext-junit4-rules) instead. + +The following code listing shows the minimal requirements for configuring a test class to +run with the custom Spring `Runner`: + +Java + +``` +@RunWith(SpringRunner.class) +@TestExecutionListeners({}) +public class SimpleTest { + + @Test + public void testMethod() { + // test logic... + } +} +``` + +Kotlin + +``` +@RunWith(SpringRunner::class) +@TestExecutionListeners +class SimpleTest { + + @Test + fun testMethod() { + // test logic... + } +} +``` + +In the preceding example, `@TestExecutionListeners` is configured with an empty list, to +disable the default listeners, which otherwise would require an `ApplicationContext` to +be configured through `@ContextConfiguration`. + +##### Spring JUnit 4 Rules + +The `org.springframework.test.context.junit4.rules` package provides the following JUnit +4 rules (supported on JUnit 4.12 or higher): + +* `SpringClassRule` + +* `SpringMethodRule` + +`SpringClassRule` is a JUnit `TestRule` that supports class-level features of the Spring +TestContext Framework, whereas `SpringMethodRule` is a JUnit `MethodRule` that supports +instance-level and method-level features of the Spring TestContext Framework. + +In contrast to the `SpringRunner`, Spring’s rule-based JUnit support has the advantage of +being independent of any `org.junit.runner.Runner` implementation and can, therefore, be +combined with existing alternative runners (such as JUnit 4’s `Parameterized`) or +third-party runners (such as the `MockitoJUnitRunner`). + +To support the full functionality of the TestContext framework, you must combine a`SpringClassRule` with a `SpringMethodRule`. The following example shows the proper way +to declare these rules in an integration test: + +Java + +``` +// Optionally specify a non-Spring Runner via @RunWith(...) +@ContextConfiguration +public class IntegrationTest { + + @ClassRule + public static final SpringClassRule springClassRule = new SpringClassRule(); + + @Rule + public final SpringMethodRule springMethodRule = new SpringMethodRule(); + + @Test + public void testMethod() { + // test logic... + } +} +``` + +Kotlin + +``` +// Optionally specify a non-Spring Runner via @RunWith(...) +@ContextConfiguration +class IntegrationTest { + + @Rule + val springMethodRule = SpringMethodRule() + + @Test + fun testMethod() { + // test logic... + } + + companion object { + @ClassRule + val springClassRule = SpringClassRule() + } +} +``` + +##### JUnit 4 Support Classes + +The `org.springframework.test.context.junit4` package provides the following support +classes for JUnit 4-based test cases (supported on JUnit 4.12 or higher): + +* `AbstractJUnit4SpringContextTests` + +* `AbstractTransactionalJUnit4SpringContextTests` + +`AbstractJUnit4SpringContextTests` is an abstract base test class that integrates the +Spring TestContext Framework with explicit `ApplicationContext` testing support in a +JUnit 4 environment. When you extend `AbstractJUnit4SpringContextTests`, you can access a`protected` `applicationContext` instance variable that you can use to perform explicit +bean lookups or to test the state of the context as a whole. + +`AbstractTransactionalJUnit4SpringContextTests` is an abstract transactional extension of`AbstractJUnit4SpringContextTests` that adds some convenience functionality for JDBC +access. This class expects a `javax.sql.DataSource` bean and a`PlatformTransactionManager` bean to be defined in the `ApplicationContext`. When you +extend `AbstractTransactionalJUnit4SpringContextTests`, you can access a `protected``jdbcTemplate` instance variable that you can use to run SQL statements to query the +database. You can use such queries to confirm database state both before and after +running database-related application code, and Spring ensures that such queries run in +the scope of the same transaction as the application code. When used in conjunction with +an ORM tool, be sure to avoid [false positives](#testcontext-tx-false-positives). +As mentioned in [JDBC Testing Support](#integration-testing-support-jdbc),`AbstractTransactionalJUnit4SpringContextTests` also provides convenience methods that +delegate to methods in `JdbcTestUtils` by using the aforementioned `jdbcTemplate`. +Furthermore, `AbstractTransactionalJUnit4SpringContextTests` provides an`executeSqlScript(..)` method for running SQL scripts against the configured `DataSource`. + +| |These classes are a convenience for extension. If you do not want your test classes<br/>to be tied to a Spring-specific class hierarchy, you can configure your own custom test<br/>classes by using `@RunWith(SpringRunner.class)` or [Spring’s<br/>JUnit rules](#testcontext-junit4-rules).| +|---|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +##### SpringExtension for JUnit Jupiter + +The Spring TestContext Framework offers full integration with the JUnit Jupiter testing +framework, introduced in JUnit 5. By annotating test classes with`@ExtendWith(SpringExtension.class)`, you can implement standard JUnit Jupiter-based unit +and integration tests and simultaneously reap the benefits of the TestContext framework, +such as support for loading application contexts, dependency injection of test instances, +transactional test method execution, and so on. + +Furthermore, thanks to the rich extension API in JUnit Jupiter, Spring provides the +following features above and beyond the feature set that Spring supports for JUnit 4 and +TestNG: + +* Dependency injection for test constructors, test methods, and test lifecycle callback + methods. See [Dependency Injection with `SpringExtension`](#testcontext-junit-jupiter-di) for further details. + +* Powerful support for [conditional + test execution](https://junit.org/junit5/docs/current/user-guide/#extensions-conditions) based on SpEL expressions, environment variables, system properties, + and so on. See the documentation for `@EnabledIf` and `@DisabledIf` in[Spring JUnit Jupiter Testing Annotations](#integration-testing-annotations-junit-jupiter) for further details and examples. + +* Custom composed annotations that combine annotations from Spring and JUnit Jupiter. See + the `@TransactionalDevTestConfig` and `@TransactionalIntegrationTest` examples in[Meta-Annotation Support for Testing](#integration-testing-annotations-meta) for further details. + +The following code listing shows how to configure a test class to use the`SpringExtension` in conjunction with `@ContextConfiguration`: + +Java + +``` +// Instructs JUnit Jupiter to extend the test with Spring support. +@ExtendWith(SpringExtension.class) +// Instructs Spring to load an ApplicationContext from TestConfig.class +@ContextConfiguration(classes = TestConfig.class) +class SimpleTests { + + @Test + void testMethod() { + // test logic... + } +} +``` + +Kotlin + +``` +// Instructs JUnit Jupiter to extend the test with Spring support. +@ExtendWith(SpringExtension::class) +// Instructs Spring to load an ApplicationContext from TestConfig::class +@ContextConfiguration(classes = [TestConfig::class]) +class SimpleTests { + + @Test + fun testMethod() { + // test logic... + } +} +``` + +Since you can also use annotations in JUnit 5 as meta-annotations, Spring provides the`@SpringJUnitConfig` and `@SpringJUnitWebConfig` composed annotations to simplify the +configuration of the test `ApplicationContext` and JUnit Jupiter. + +The following example uses `@SpringJUnitConfig` to reduce the amount of configuration +used in the previous example: + +Java + +``` +// Instructs Spring to register the SpringExtension with JUnit +// Jupiter and load an ApplicationContext from TestConfig.class +@SpringJUnitConfig(TestConfig.class) +class SimpleTests { + + @Test + void testMethod() { + // test logic... + } +} +``` + +Kotlin + +``` +// Instructs Spring to register the SpringExtension with JUnit +// Jupiter and load an ApplicationContext from TestConfig.class +@SpringJUnitConfig(TestConfig::class) +class SimpleTests { + + @Test + fun testMethod() { + // test logic... + } +} +``` + +Similarly, the following example uses `@SpringJUnitWebConfig` to create a`WebApplicationContext` for use with JUnit Jupiter: + +Java + +``` +// Instructs Spring to register the SpringExtension with JUnit +// Jupiter and load a WebApplicationContext from TestWebConfig.class +@SpringJUnitWebConfig(TestWebConfig.class) +class SimpleWebTests { + + @Test + void testMethod() { + // test logic... + } +} +``` + +Kotlin + +``` +// Instructs Spring to register the SpringExtension with JUnit +// Jupiter and load a WebApplicationContext from TestWebConfig::class +@SpringJUnitWebConfig(TestWebConfig::class) +class SimpleWebTests { + + @Test + fun testMethod() { + // test logic... + } +} +``` + +See the documentation for `@SpringJUnitConfig` and `@SpringJUnitWebConfig` in[Spring JUnit Jupiter Testing Annotations](#integration-testing-annotations-junit-jupiter) for further details. + +##### Dependency Injection with `SpringExtension` + +`SpringExtension` implements the[`ParameterResolver`](https://junit.org/junit5/docs/current/user-guide/#extensions-parameter-resolution)extension API from JUnit Jupiter, which lets Spring provide dependency injection for test +constructors, test methods, and test lifecycle callback methods. + +Specifically, `SpringExtension` can inject dependencies from the test’s`ApplicationContext` into test constructors and methods that are annotated with`@BeforeAll`, `@AfterAll`, `@BeforeEach`, `@AfterEach`, `@Test`, `@RepeatedTest`,`@ParameterizedTest`, and others. + +###### Constructor Injection + +If a specific parameter in a constructor for a JUnit Jupiter test class is of type`ApplicationContext` (or a sub-type thereof) or is annotated or meta-annotated with`@Autowired`, `@Qualifier`, or `@Value`, Spring injects the value for that specific +parameter with the corresponding bean or value from the test’s `ApplicationContext`. + +Spring can also be configured to autowire all arguments for a test class constructor if +the constructor is considered to be *autowirable*. A constructor is considered to be +autowirable if one of the following conditions is met (in order of precedence). + +* The constructor is annotated with `@Autowired`. + +* `@TestConstructor` is present or meta-present on the test class with the `autowireMode`attribute set to `ALL`. + +* The default *test constructor autowire mode* has been changed to `ALL`. + +See [`@TestConstructor`](#integration-testing-annotations-testconstructor) for details on the use of`@TestConstructor` and how to change the global *test constructor autowire mode*. + +| |If the constructor for a test class is considered to be *autowirable*, Spring<br/>assumes the responsibility for resolving arguments for all parameters in the constructor.<br/>Consequently, no other `ParameterResolver` registered with JUnit Jupiter can resolve<br/>parameters for such a constructor.| +|---|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +| |Constructor injection for test classes must not be used in conjunction with JUnit<br/>Jupiter’s `@TestInstance(PER_CLASS)` support if `@DirtiesContext` is used to close the<br/>test’s `ApplicationContext` before or after test methods.<br/><br/>The reason is that `@TestInstance(PER_CLASS)` instructs JUnit Jupiter to cache the test<br/>instance between test method invocations. Consequently, the test instance will retain<br/>references to beans that were originally injected from an `ApplicationContext` that has<br/>been subsequently closed. Since the constructor for the test class will only be invoked<br/>once in such scenarios, dependency injection will not occur again, and subsequent tests<br/>will interact with beans from the closed `ApplicationContext` which may result in errors.<br/><br/>To use `@DirtiesContext` with "before test method" or "after test method" modes in<br/>conjunction with `@TestInstance(PER_CLASS)`, one must configure dependencies from Spring<br/>to be supplied via field or setter injection so that they can be re-injected between test<br/>method invocations.| +|---|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +In the following example, Spring injects the `OrderService` bean from the`ApplicationContext` loaded from `TestConfig.class` into the`OrderServiceIntegrationTests` constructor. + +Java + +``` +@SpringJUnitConfig(TestConfig.class) +class OrderServiceIntegrationTests { + + private final OrderService orderService; + + @Autowired + OrderServiceIntegrationTests(OrderService orderService) { + this.orderService = orderService; + } + + // tests that use the injected OrderService +} +``` + +Kotlin + +``` +@SpringJUnitConfig(TestConfig::class) +class OrderServiceIntegrationTests @Autowired constructor(private val orderService: OrderService){ + // tests that use the injected OrderService +} +``` + +Note that this feature lets test dependencies be `final` and therefore immutable. + +If the `spring.test.constructor.autowire.mode` property is to `all` (see[`@TestConstructor`](#integration-testing-annotations-testconstructor)), we can omit the declaration of`@Autowired` on the constructor in the previous example, resulting in the following. + +Java + +``` +@SpringJUnitConfig(TestConfig.class) +class OrderServiceIntegrationTests { + + private final OrderService orderService; + + OrderServiceIntegrationTests(OrderService orderService) { + this.orderService = orderService; + } + + // tests that use the injected OrderService +} +``` + +Kotlin + +``` +@SpringJUnitConfig(TestConfig::class) +class OrderServiceIntegrationTests(val orderService:OrderService) { + // tests that use the injected OrderService +} +``` + +###### Method Injection + +If a parameter in a JUnit Jupiter test method or test lifecycle callback method is of +type `ApplicationContext` (or a sub-type thereof) or is annotated or meta-annotated with`@Autowired`, `@Qualifier`, or `@Value`, Spring injects the value for that specific +parameter with the corresponding bean from the test’s `ApplicationContext`. + +In the following example, Spring injects the `OrderService` from the `ApplicationContext`loaded from `TestConfig.class` into the `deleteOrder()` test method: + +Java + +``` +@SpringJUnitConfig(TestConfig.class) +class OrderServiceIntegrationTests { + + @Test + void deleteOrder(@Autowired OrderService orderService) { + // use orderService from the test's ApplicationContext + } +} +``` + +Kotlin + +``` +@SpringJUnitConfig(TestConfig::class) +class OrderServiceIntegrationTests { + + @Test + fun deleteOrder(@Autowired orderService: OrderService) { + // use orderService from the test's ApplicationContext + } +} +``` + +Due to the robustness of the `ParameterResolver` support in JUnit Jupiter, you can also +have multiple dependencies injected into a single method, not only from Spring but also +from JUnit Jupiter itself or other third-party extensions. + +The following example shows how to have both Spring and JUnit Jupiter inject dependencies +into the `placeOrderRepeatedly()` test method simultaneously. + +Java + +``` +@SpringJUnitConfig(TestConfig.class) +class OrderServiceIntegrationTests { + + @RepeatedTest(10) + void placeOrderRepeatedly(RepetitionInfo repetitionInfo, + @Autowired OrderService orderService) { + + // use orderService from the test's ApplicationContext + // and repetitionInfo from JUnit Jupiter + } +} +``` + +Kotlin + +``` +@SpringJUnitConfig(TestConfig::class) +class OrderServiceIntegrationTests { + + @RepeatedTest(10) + fun placeOrderRepeatedly(repetitionInfo:RepetitionInfo, @Autowired orderService:OrderService) { + + // use orderService from the test's ApplicationContext + // and repetitionInfo from JUnit Jupiter + } +} +``` + +Note that the use of `@RepeatedTest` from JUnit Jupiter lets the test method gain access +to the `RepetitionInfo`. + +##### `@Nested` test class configuration + +The *Spring TestContext Framework* has supported the use of test-related annotations on`@Nested` test classes in JUnit Jupiter since Spring Framework 5.0; however, until Spring +Framework 5.3 class-level test configuration annotations were not *inherited* from +enclosing classes like they are from superclasses. + +Spring Framework 5.3 introduces first-class support for inheriting test class +configuration from enclosing classes, and such configuration will be inherited by +default. To change from the default `INHERIT` mode to `OVERRIDE` mode, you may annotate +an individual `@Nested` test class with`@NestedTestConfiguration(EnclosingConfiguration.OVERRIDE)`. An explicit`@NestedTestConfiguration` declaration will apply to the annotated test class as well as +any of its subclasses and nested classes. Thus, you may annotate a top-level test class +with `@NestedTestConfiguration`, and that will apply to all of its nested test classes +recursively. + +In order to allow development teams to change the default to `OVERRIDE` – for example, +for compatibility with Spring Framework 5.0 through 5.2 – the default mode can be changed +globally via a JVM system property or a `spring.properties` file in the root of the +classpath. See the ["Changing +the default enclosing configuration inheritance mode"](#integration-testing-annotations-nestedtestconfiguration) note for details. + +Although the following "Hello World" example is very simplistic, it shows how to declare +common configuration on a top-level class that is inherited by its `@Nested` test +classes. In this particular example, only the `TestConfig` configuration class is +inherited. Each nested test class provides its own set of active profiles, resulting in a +distinct `ApplicationContext` for each nested test class (see[Context Caching](#testcontext-ctx-management-caching) for details). Consult the list of[supported annotations](#integration-testing-annotations-nestedtestconfiguration) to see +which annotations can be inherited in `@Nested` test classes. + +Java + +``` +@SpringJUnitConfig(TestConfig.class) +class GreetingServiceTests { + + @Nested + @ActiveProfiles("lang_en") + class EnglishGreetings { + + @Test + void hello(@Autowired GreetingService service) { + assertThat(service.greetWorld()).isEqualTo("Hello World"); + } + } + + @Nested + @ActiveProfiles("lang_de") + class GermanGreetings { + + @Test + void hello(@Autowired GreetingService service) { + assertThat(service.greetWorld()).isEqualTo("Hallo Welt"); + } + } +} +``` + +Kotlin + +``` +@SpringJUnitConfig(TestConfig::class) +class GreetingServiceTests { + + @Nested + @ActiveProfiles("lang_en") + inner class EnglishGreetings { + + @Test + fun hello(@Autowired service:GreetingService) { + assertThat(service.greetWorld()).isEqualTo("Hello World") + } + } + + @Nested + @ActiveProfiles("lang_de") + inner class GermanGreetings { + + @Test + fun hello(@Autowired service:GreetingService) { + assertThat(service.greetWorld()).isEqualTo("Hallo Welt") + } + } +} +``` + +##### TestNG Support Classes + +The `org.springframework.test.context.testng` package provides the following support +classes for TestNG based test cases: + +* `AbstractTestNGSpringContextTests` + +* `AbstractTransactionalTestNGSpringContextTests` + +`AbstractTestNGSpringContextTests` is an abstract base test class that integrates the +Spring TestContext Framework with explicit `ApplicationContext` testing support in a +TestNG environment. When you extend `AbstractTestNGSpringContextTests`, you can access a`protected` `applicationContext` instance variable that you can use to perform explicit +bean lookups or to test the state of the context as a whole. + +`AbstractTransactionalTestNGSpringContextTests` is an abstract transactional extension of`AbstractTestNGSpringContextTests` that adds some convenience functionality for JDBC +access. This class expects a `javax.sql.DataSource` bean and a`PlatformTransactionManager` bean to be defined in the `ApplicationContext`. When you +extend `AbstractTransactionalTestNGSpringContextTests`, you can access a `protected``jdbcTemplate` instance variable that you can use to run SQL statements to query the +database. You can use such queries to confirm database state both before and after +running database-related application code, and Spring ensures that such queries run in +the scope of the same transaction as the application code. When used in conjunction with +an ORM tool, be sure to avoid [false positives](#testcontext-tx-false-positives). +As mentioned in [JDBC Testing Support](#integration-testing-support-jdbc),`AbstractTransactionalTestNGSpringContextTests` also provides convenience methods that +delegate to methods in `JdbcTestUtils` by using the aforementioned `jdbcTemplate`. +Furthermore, `AbstractTransactionalTestNGSpringContextTests` provides an`executeSqlScript(..)` method for running SQL scripts against the configured `DataSource`. + +| |These classes are a convenience for extension. If you do not want your test classes<br/>to be tied to a Spring-specific class hierarchy, you can configure your own custom test<br/>classes by using `@ContextConfiguration`, `@TestExecutionListeners`, and so on and by<br/>manually instrumenting your test class with a `TestContextManager`. See the source code<br/>of `AbstractTestNGSpringContextTests` for an example of how to instrument your test class.| +|---|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +### 3.6. WebTestClient + +`WebTestClient` is an HTTP client designed for testing server applications. It wraps +Spring’s [WebClient](web-reactive.html#webflux-client) and uses it to perform requests +but exposes a testing facade for verifying responses. `WebTestClient` can be used to +perform end-to-end HTTP tests. It can also be used to test Spring MVC and Spring WebFlux +applications without a running server via mock server request and response objects. + +| |Kotlin users: See [this section](languages.html#kotlin-webtestclient-issue)related to use of the `WebTestClient`.| +|---|-----------------------------------------------------------------------------------------------------------------| + +#### 3.6.1. Setup + +To set up a `WebTestClient` you need to choose a server setup to bind to. This can be one +of several mock server setup choices or a connection to a live server. + +##### Bind to Controller + +This setup allows you to test specific controller(s) via mock request and response objects, +without a running server. + +For WebFlux applications, use the following which loads infrastructure equivalent to the[WebFlux Java config](web-reactive.html#webflux-config), registers the given +controller(s), and creates a [WebHandler chain](web-reactive.html#webflux-web-handler-api)to handle requests: + +Java + +``` +WebTestClient client = + WebTestClient.bindToController(new TestController()).build(); +``` + +Kotlin + +``` +val client = WebTestClient.bindToController(TestController()).build() +``` + +For Spring MVC, use the following which delegates to the[StandaloneMockMvcBuilder](https://docs.spring.io/spring-framework/docs/5.3.16/javadoc-api/org/springframework/test/web/servlet/setup/StandaloneMockMvcBuilder.html)to load infrastructure equivalent to the [WebMvc Java config](web.html#mvc-config), +registers the given controller(s), and creates an instance of[MockMvc](#spring-mvc-test-framework) to handle requests: + +Java + +``` +WebTestClient client = + MockMvcWebTestClient.bindToController(new TestController()).build(); +``` + +Kotlin + +``` +val client = MockMvcWebTestClient.bindToController(TestController()).build() +``` + +##### Bind to `ApplicationContext` + +This setup allows you to load Spring configuration with Spring MVC or Spring WebFlux +infrastructure and controller declarations and use it to handle requests via mock request +and response objects, without a running server. + +For WebFlux, use the following where the Spring `ApplicationContext` is passed to[WebHttpHandlerBuilder](https://docs.spring.io/spring-framework/docs/5.3.16/javadoc-api/org/springframework/web/server/adapter/WebHttpHandlerBuilder.html#applicationContext-org.springframework.context.ApplicationContext-)to create the [WebHandler chain](web-reactive.html#webflux-web-handler-api) to handle +requests: + +Java + +``` +@SpringJUnitConfig(WebConfig.class) (1) +class MyTests { + + WebTestClient client; + + @BeforeEach + void setUp(ApplicationContext context) { (2) + client = WebTestClient.bindToApplicationContext(context).build(); (3) + } +} +``` + +|**1**|Specify the configuration to load| +|-----|---------------------------------| +|**2**| Inject the configuration | +|**3**| Create the `WebTestClient` | + +Kotlin + +``` +@SpringJUnitConfig(WebConfig::class) (1) +class MyTests { + + lateinit var client: WebTestClient + + @BeforeEach + fun setUp(context: ApplicationContext) { (2) + client = WebTestClient.bindToApplicationContext(context).build() (3) + } +} +``` + +|**1**|Specify the configuration to load| +|-----|---------------------------------| +|**2**| Inject the configuration | +|**3**| Create the `WebTestClient` | + +For Spring MVC, use the following where the Spring `ApplicationContext` is passed to[MockMvcBuilders.webAppContextSetup](https://docs.spring.io/spring-framework/docs/5.3.16/javadoc-api/org/springframework/test/web/servlet/setup/MockMvcBuilders.html#webAppContextSetup-org.springframework.web.context.WebApplicationContext-)to create a [MockMvc](#spring-mvc-test-framework) instance to handle +requests: + +Java + +``` +@ExtendWith(SpringExtension.class) +@WebAppConfiguration("classpath:META-INF/web-resources") (1) +@ContextHierarchy({ + @ContextConfiguration(classes = RootConfig.class), + @ContextConfiguration(classes = WebConfig.class) +}) +class MyTests { + + @Autowired + WebApplicationContext wac; (2) + + WebTestClient client; + + @BeforeEach + void setUp() { + client = MockMvcWebTestClient.bindToApplicationContext(this.wac).build(); (3) + } +} +``` + +|**1**|Specify the configuration to load| +|-----|---------------------------------| +|**2**| Inject the configuration | +|**3**| Create the `WebTestClient` | + +Kotlin + +``` +@ExtendWith(SpringExtension.class) +@WebAppConfiguration("classpath:META-INF/web-resources") (1) +@ContextHierarchy({ + @ContextConfiguration(classes = RootConfig.class), + @ContextConfiguration(classes = WebConfig.class) +}) +class MyTests { + + @Autowired + lateinit var wac: WebApplicationContext; (2) + + lateinit var client: WebTestClient + + @BeforeEach + fun setUp() { (2) + client = MockMvcWebTestClient.bindToApplicationContext(wac).build() (3) + } +} +``` + +|**1**|Specify the configuration to load| +|-----|---------------------------------| +|**2**| Inject the configuration | +|**3**| Create the `WebTestClient` | + +##### Bind to Router Function + +This setup allows you to test [functional endpoints](web-reactive.html#webflux-fn) via +mock request and response objects, without a running server. + +For WebFlux, use the following which delegates to `RouterFunctions.toWebHandler` to +create a server setup to handle requests: + +Java + +``` +RouterFunction<?> route = ... +client = WebTestClient.bindToRouterFunction(route).build(); +``` + +Kotlin + +``` +val route: RouterFunction<*> = ... +val client = WebTestClient.bindToRouterFunction(route).build() +``` + +For Spring MVC there are currently no options to test[WebMvc functional endpoints](web.html#webmvc-fn). + +##### Bind to Server + +This setup connects to a running server to perform full, end-to-end HTTP tests: + +Java + +``` +client = WebTestClient.bindToServer().baseUrl("http://localhost:8080").build(); +``` + +Kotlin + +``` +client = WebTestClient.bindToServer().baseUrl("http://localhost:8080").build() +``` + +##### Client Config + +In addition to the server setup options described earlier, you can also configure client +options, including base URL, default headers, client filters, and others. These options +are readily available following `bindToServer()`. For all other configuration options, +you need to use `configureClient()` to transition from server to client configuration, as +follows: + +Java + +``` +client = WebTestClient.bindToController(new TestController()) + .configureClient() + .baseUrl("/test") + .build(); +``` + +Kotlin + +``` +client = WebTestClient.bindToController(TestController()) + .configureClient() + .baseUrl("/test") + .build() +``` + +#### 3.6.2. Writing Tests + +`WebTestClient` provides an API identical to [WebClient](web-reactive.html#webflux-client)up to the point of performing a request by using `exchange()`. See the[WebClient](web-reactive.html#webflux-client-body) documentation for examples on how to +prepare a request with any content including form data, multipart data, and more. + +After the call to `exchange()`, `WebTestClient` diverges from the `WebClient` and +instead continues with a workflow to verify responses. + +To assert the response status and headers, use the following: + +Java + +``` +client.get().uri("/persons/1") + .accept(MediaType.APPLICATION_JSON) + .exchange() + .expectStatus().isOk() + .expectHeader().contentType(MediaType.APPLICATION_JSON); +``` + +Kotlin + +``` +client.get().uri("/persons/1") + .accept(MediaType.APPLICATION_JSON) + .exchange() + .expectStatus().isOk() + .expectHeader().contentType(MediaType.APPLICATION_JSON) +``` + +If you would like for all expectations to be asserted even if one of them fails, you can +use `expectAll(..)` instead of multiple chained `expect*(..)` calls. This feature is +similar to the *soft assertions* support in AssertJ and the `assertAll()` support in +JUnit Jupiter. + +Java + +``` +client.get().uri("/persons/1") + .accept(MediaType.APPLICATION_JSON) + .exchange() + .expectAll( + spec -> spec.expectStatus().isOk(), + spec -> spec.expectHeader().contentType(MediaType.APPLICATION_JSON) + ); +``` + +You can then choose to decode the response body through one of the following: + +* `expectBody(Class<T>)`: Decode to single object. + +* `expectBodyList(Class<T>)`: Decode and collect objects to `List<T>`. + +* `expectBody()`: Decode to `byte[]` for [JSON Content](#webtestclient-json) or an empty body. + +And perform assertions on the resulting higher level Object(s): + +Java + +``` +client.get().uri("/persons") + .exchange() + .expectStatus().isOk() + .expectBodyList(Person.class).hasSize(3).contains(person); +``` + +Kotlin + +``` +import org.springframework.test.web.reactive.server.expectBodyList + +client.get().uri("/persons") + .exchange() + .expectStatus().isOk() + .expectBodyList<Person>().hasSize(3).contains(person) +``` + +If the built-in assertions are insufficient, you can consume the object instead and +perform any other assertions: + +Java + +``` +import org.springframework.test.web.reactive.server.expectBody + +client.get().uri("/persons/1") + .exchange() + .expectStatus().isOk() + .expectBody(Person.class) + .consumeWith(result -> { + // custom assertions (e.g. AssertJ)... + }); +``` + +Kotlin + +``` +client.get().uri("/persons/1") + .exchange() + .expectStatus().isOk() + .expectBody<Person>() + .consumeWith { + // custom assertions (e.g. AssertJ)... + } +``` + +Or you can exit the workflow and obtain an `EntityExchangeResult`: + +Java + +``` +EntityExchangeResult<Person> result = client.get().uri("/persons/1") + .exchange() + .expectStatus().isOk() + .expectBody(Person.class) + .returnResult(); +``` + +Kotlin + +``` +import org.springframework.test.web.reactive.server.expectBody + +val result = client.get().uri("/persons/1") + .exchange() + .expectStatus().isOk + .expectBody<Person>() + .returnResult() +``` + +| |When you need to decode to a target type with generics, look for the overloaded methods<br/>that accept[`ParameterizedTypeReference`](https://docs.spring.io/spring-framework/docs/5.3.16/javadoc-api/org/springframework/core/ParameterizedTypeReference.html)instead of `Class<T>`.| +|---|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +##### No Content + +If the response is not expected to have content, you can assert that as follows: + +Java + +``` +client.post().uri("/persons") + .body(personMono, Person.class) + .exchange() + .expectStatus().isCreated() + .expectBody().isEmpty(); +``` + +Kotlin + +``` +client.post().uri("/persons") + .bodyValue(person) + .exchange() + .expectStatus().isCreated() + .expectBody().isEmpty() +``` + +If you want to ignore the response content, the following releases the content without +any assertions: + +Java + +``` +client.get().uri("/persons/123") + .exchange() + .expectStatus().isNotFound() + .expectBody(Void.class); +``` + +Kotlin + +``` +client.get().uri("/persons/123") + .exchange() + .expectStatus().isNotFound + .expectBody<Unit>() +``` + +##### JSON Content + +You can use `expectBody()` without a target type to perform assertions on the raw +content rather than through higher level Object(s). + +To verify the full JSON content with [JSONAssert](https://jsonassert.skyscreamer.org): + +Java + +``` +client.get().uri("/persons/1") + .exchange() + .expectStatus().isOk() + .expectBody() + .json("{\"name\":\"Jane\"}") +``` + +Kotlin + +``` +client.get().uri("/persons/1") + .exchange() + .expectStatus().isOk() + .expectBody() + .json("{\"name\":\"Jane\"}") +``` + +To verify JSON content with [JSONPath](https://github.com/jayway/JsonPath): + +Java + +``` +client.get().uri("/persons") + .exchange() + .expectStatus().isOk() + .expectBody() + .jsonPath("$[0].name").isEqualTo("Jane") + .jsonPath("$[1].name").isEqualTo("Jason"); +``` + +Kotlin + +``` +client.get().uri("/persons") + .exchange() + .expectStatus().isOk() + .expectBody() + .jsonPath("$[0].name").isEqualTo("Jane") + .jsonPath("$[1].name").isEqualTo("Jason") +``` + +##### Streaming Responses + +To test potentially infinite streams such as `"text/event-stream"` or`"application/x-ndjson"`, start by verifying the response status and headers, and then +obtain a `FluxExchangeResult`: + +Java + +``` +FluxExchangeResult<MyEvent> result = client.get().uri("/events") + .accept(TEXT_EVENT_STREAM) + .exchange() + .expectStatus().isOk() + .returnResult(MyEvent.class); +``` + +Kotlin + +``` +import org.springframework.test.web.reactive.server.returnResult + +val result = client.get().uri("/events") + .accept(TEXT_EVENT_STREAM) + .exchange() + .expectStatus().isOk() + .returnResult<MyEvent>() +``` + +Now you’re ready to consume the response stream with `StepVerifier` from `reactor-test`: + +Java + +``` +Flux<Event> eventFlux = result.getResponseBody(); + +StepVerifier.create(eventFlux) + .expectNext(person) + .expectNextCount(4) + .consumeNextWith(p -> ...) + .thenCancel() + .verify(); +``` + +Kotlin + +``` +val eventFlux = result.getResponseBody() + +StepVerifier.create(eventFlux) + .expectNext(person) + .expectNextCount(4) + .consumeNextWith { p -> ... } + .thenCancel() + .verify() +``` + +##### MockMvc Assertions + +`WebTestClient` is an HTTP client and as such it can only verify what is in the client +response including status, headers, and body. + +When testing a Spring MVC application with a MockMvc server setup, you have the extra +choice to perform further assertions on the server response. To do that start by +obtaining an `ExchangeResult` after asserting the body: + +Java + +``` +// For a response with a body +EntityExchangeResult<Person> result = client.get().uri("/persons/1") + .exchange() + .expectStatus().isOk() + .expectBody(Person.class) + .returnResult(); + +// For a response without a body +EntityExchangeResult<Void> result = client.get().uri("/path") + .exchange() + .expectBody().isEmpty(); +``` + +Kotlin + +``` +// For a response with a body +val result = client.get().uri("/persons/1") + .exchange() + .expectStatus().isOk() + .expectBody(Person.class) + .returnResult(); + +// For a response without a body +val result = client.get().uri("/path") + .exchange() + .expectBody().isEmpty(); +``` + +Then switch to MockMvc server response assertions: + +Java + +``` +MockMvcWebTestClient.resultActionsFor(result) + .andExpect(model().attribute("integer", 3)) + .andExpect(model().attribute("string", "a string value")); +``` + +Kotlin + +``` +MockMvcWebTestClient.resultActionsFor(result) + .andExpect(model().attribute("integer", 3)) + .andExpect(model().attribute("string", "a string value")); +``` + +### 3.7. MockMvc + +The Spring MVC Test framework, also known as MockMvc, provides support for testing Spring +MVC applications. It performs full Spring MVC request handling but via mock request and +response objects instead of a running server. + +MockMvc can be used on its own to perform requests and verify responses. It can also be +used through the [WebTestClient](#webtestclient) where MockMvc is plugged in as the server to handle +requests with. The advantage of `WebTestClient` is the option to work with higher level +objects instead of raw data as well as the ability to switch to full, end-to-end HTTP +tests against a live server and use the same test API. + +#### 3.7.1. Overview + +You can write plain unit tests for Spring MVC by instantiating a controller, injecting it +with dependencies, and calling its methods. However such tests do not verify request +mappings, data binding, message conversion, type conversion, validation, and nor +do they involve any of the supporting `@InitBinder`, `@ModelAttribute`, or`@ExceptionHandler` methods. + +The Spring MVC Test framework, also known as `MockMvc`, aims to provide more complete +testing for Spring MVC controllers without a running server. It does that by invoking +the `DispacherServlet` and passing[“mock” implementations of the Servlet API](#mock-objects-servlet) from the`spring-test` module which replicates the full Spring MVC request handling without +a running server. + +MockMvc is a server side test framework that lets you verify most of the functionality +of a Spring MVC application using lightweight and targeted tests. You can use it on +its own to perform requests and to verify responses, or you can also use it through +the [WebTestClient](#webtestclient) API with MockMvc plugged in as the server to handle requests +with. + +##### Static Imports + +When using MockMvc directly to perform requests, you’ll need static imports for: + +* `MockMvcBuilders.*` + +* `MockMvcRequestBuilders.*` + +* `MockMvcResultMatchers.*` + +* `MockMvcResultHandlers.*` + +An easy way to remember that is search for `MockMvc*`. If using Eclipse be sure to also +add the above as “favorite static members” in the Eclipse preferences. + +When using MockMvc through the [WebTestClient](#webtestclient) you do not need static imports. +The `WebTestClient` provides a fluent API without static imports. + +##### Setup Choices + +MockMvc can be setup in one of two ways. One is to point directly to the controllers you +want to test and programmatically configure Spring MVC infrastructure. The second is to +point to Spring configuration with Spring MVC and controller infrastructure in it. + +To set up MockMvc for testing a specific controller, use the following: + +Java + +``` +class MyWebTests { + + MockMvc mockMvc; + + @BeforeEach + void setup() { + this.mockMvc = MockMvcBuilders.standaloneSetup(new AccountController()).build(); + } + + // ... + +} +``` + +Kotlin + +``` +class MyWebTests { + + lateinit var mockMvc : MockMvc + + @BeforeEach + fun setup() { + mockMvc = MockMvcBuilders.standaloneSetup(AccountController()).build() + } + + // ... + +} +``` + +Or you can also use this setup when testing through the[WebTestClient](#webtestclient-controller-config) which delegates to the same builder +as shown above. + +To set up MockMvc through Spring configuration, use the following: + +Java + +``` +@SpringJUnitWebConfig(locations = "my-servlet-context.xml") +class MyWebTests { + + MockMvc mockMvc; + + @BeforeEach + void setup(WebApplicationContext wac) { + this.mockMvc = MockMvcBuilders.webAppContextSetup(this.wac).build(); + } + + // ... + +} +``` + +Kotlin + +``` +@SpringJUnitWebConfig(locations = ["my-servlet-context.xml"]) +class MyWebTests { + + lateinit var mockMvc: MockMvc + + @BeforeEach + fun setup(wac: WebApplicationContext) { + mockMvc = MockMvcBuilders.webAppContextSetup(wac).build() + } + + // ... + +} +``` + +Or you can also use this setup when testing through the[WebTestClient](#webtestclient-context-config) which delegates to the same builder +as shown above. + +Which setup option should you use? + +The `webAppContextSetup` loads your actual Spring MVC configuration, resulting in a more +complete integration test. Since the TestContext framework caches the loaded Spring +configuration, it helps keep tests running fast, even as you introduce more tests in your +test suite. Furthermore, you can inject mock services into controllers through Spring +configuration to remain focused on testing the web layer. The following example declares +a mock service with Mockito: + +``` +<bean id="accountService" class="org.mockito.Mockito" factory-method="mock"> + <constructor-arg value="org.example.AccountService"/> +</bean> +``` + +You can then inject the mock service into the test to set up and verify your +expectations, as the following example shows: + +Java + +``` +@SpringJUnitWebConfig(locations = "test-servlet-context.xml") +class AccountTests { + + @Autowired + AccountService accountService; + + MockMvc mockMvc; + + @BeforeEach + void setup(WebApplicationContext wac) { + this.mockMvc = MockMvcBuilders.webAppContextSetup(wac).build(); + } + + // ... + +} +``` + +Kotlin + +``` +@SpringJUnitWebConfig(locations = ["test-servlet-context.xml"]) +class AccountTests { + + @Autowired + lateinit var accountService: AccountService + + lateinit mockMvc: MockMvc + + @BeforeEach + fun setup(wac: WebApplicationContext) { + mockMvc = MockMvcBuilders.webAppContextSetup(wac).build() + } + + // ... + +} +``` + +The `standaloneSetup`, on the other hand, is a little closer to a unit test. It tests one +controller at a time. You can manually inject the controller with mock dependencies, and +it does not involve loading Spring configuration. Such tests are more focused on style +and make it easier to see which controller is being tested, whether any specific Spring +MVC configuration is required to work, and so on. The `standaloneSetup` is also a very +convenient way to write ad-hoc tests to verify specific behavior or to debug an issue. + +As with most “integration versus unit testing” debates, there is no right or wrong +answer. However, using the `standaloneSetup` does imply the need for additional`webAppContextSetup` tests in order to verify your Spring MVC configuration. +Alternatively, you can write all your tests with `webAppContextSetup`, in order to always +test against your actual Spring MVC configuration. + +##### Setup Features + +No matter which MockMvc builder you use, all `MockMvcBuilder` implementations provide +some common and very useful features. For example, you can declare an `Accept` header for +all requests and expect a status of 200 as well as a `Content-Type` header in all +responses, as follows: + +Java + +``` +// static import of MockMvcBuilders.standaloneSetup + +MockMvc mockMvc = standaloneSetup(new MusicController()) + .defaultRequest(get("/").accept(MediaType.APPLICATION_JSON)) + .alwaysExpect(status().isOk()) + .alwaysExpect(content().contentType("application/json;charset=UTF-8")) + .build(); +``` + +Kotlin + +``` +// Not possible in Kotlin until https://youtrack.jetbrains.com/issue/KT-22208 is fixed +``` + +In addition, third-party frameworks (and applications) can pre-package setup +instructions, such as those in a `MockMvcConfigurer`. The Spring Framework has one such +built-in implementation that helps to save and re-use the HTTP session across requests. +You can use it as follows: + +Java + +``` +// static import of SharedHttpSessionConfigurer.sharedHttpSession + +MockMvc mockMvc = MockMvcBuilders.standaloneSetup(new TestController()) + .apply(sharedHttpSession()) + .build(); + +// Use mockMvc to perform requests... +``` + +Kotlin + +``` +// Not possible in Kotlin until https://youtrack.jetbrains.com/issue/KT-22208 is fixed +``` + +See the javadoc for[`ConfigurableMockMvcBuilder`](https://docs.spring.io/spring-framework/docs/5.3.16/javadoc-api/org/springframework/test/web/servlet/setup/ConfigurableMockMvcBuilder.html)for a list of all MockMvc builder features or use the IDE to explore the available options. + +##### Performing Requests + +This section shows how to use MockMvc on its own to perform requests and verify responses. +If using MockMvc through the `WebTestClient` please see the corresponding section on[Writing Tests](#webtestclient-tests) instead. + +To perform requests that use any HTTP method, as the following example shows: + +Java + +``` +// static import of MockMvcRequestBuilders.* + +mockMvc.perform(post("/hotels/{id}", 42).accept(MediaType.APPLICATION_JSON)); +``` + +Kotlin + +``` +import org.springframework.test.web.servlet.post + +mockMvc.post("/hotels/{id}", 42) { + accept = MediaType.APPLICATION_JSON +} +``` + +You can also perform file upload requests that internally use`MockMultipartHttpServletRequest` so that there is no actual parsing of a multipart +request. Rather, you have to set it up to be similar to the following example: + +Java + +``` +mockMvc.perform(multipart("/doc").file("a1", "ABC".getBytes("UTF-8"))); +``` + +Kotlin + +``` +import org.springframework.test.web.servlet.multipart + +mockMvc.multipart("/doc") { + file("a1", "ABC".toByteArray(charset("UTF8"))) +} +``` + +You can specify query parameters in URI template style, as the following example shows: + +Java + +``` +mockMvc.perform(get("/hotels?thing={thing}", "somewhere")); +``` + +Kotlin + +``` +mockMvc.get("/hotels?thing={thing}", "somewhere") +``` + +You can also add Servlet request parameters that represent either query or form +parameters, as the following example shows: + +Java + +``` +mockMvc.perform(get("/hotels").param("thing", "somewhere")); +``` + +Kotlin + +``` +import org.springframework.test.web.servlet.get + +mockMvc.get("/hotels") { + param("thing", "somewhere") +} +``` + +If application code relies on Servlet request parameters and does not check the query +string explicitly (as is most often the case), it does not matter which option you use. +Keep in mind, however, that query parameters provided with the URI template are decoded +while request parameters provided through the `param(…​)` method are expected to already +be decoded. + +In most cases, it is preferable to leave the context path and the Servlet path out of the +request URI. If you must test with the full request URI, be sure to set the `contextPath`and `servletPath` accordingly so that request mappings work, as the following example +shows: + +Java + +``` +mockMvc.perform(get("/app/main/hotels/{id}").contextPath("/app").servletPath("/main")) +``` + +Kotlin + +``` +import org.springframework.test.web.servlet.get + +mockMvc.get("/app/main/hotels/{id}") { + contextPath = "/app" + servletPath = "/main" +} +``` + +In the preceding example, it would be cumbersome to set the `contextPath` and`servletPath` with every performed request. Instead, you can set up default request +properties, as the following example shows: + +Java + +``` +class MyWebTests { + + MockMvc mockMvc; + + @BeforeEach + void setup() { + mockMvc = standaloneSetup(new AccountController()) + .defaultRequest(get("/") + .contextPath("/app").servletPath("/main") + .accept(MediaType.APPLICATION_JSON)).build(); + } +} +``` + +Kotlin + +``` +// Not possible in Kotlin until https://youtrack.jetbrains.com/issue/KT-22208 is fixed +``` + +The preceding properties affect every request performed through the `MockMvc` instance. +If the same property is also specified on a given request, it overrides the default +value. That is why the HTTP method and URI in the default request do not matter, since +they must be specified on every request. + +##### Defining Expectations + +You can define expectations by appending one or more `andExpect(..)` calls after +performing a request, as the following example shows. As soon as one expectation fails, +no other expectations will be asserted. + +Java + +``` +// static import of MockMvcRequestBuilders.* and MockMvcResultMatchers.* + +mockMvc.perform(get("/accounts/1")).andExpect(status().isOk()); +``` + +Kotlin + +``` +import org.springframework.test.web.servlet.get + +mockMvc.get("/accounts/1").andExpect { + status().isOk() +} +``` + +You can define multiple expectations by appending `andExpectAll(..)` after performing a +request, as the following example shows. In contrast to `andExpect(..)`,`andExpectAll(..)` guarantees that all supplied expectations will be asserted and that +all failures will be tracked and reported. + +Java + +``` +// static import of MockMvcRequestBuilders.* and MockMvcResultMatchers.* + +mockMvc.perform(get("/accounts/1")).andExpectAll( + status().isOk(), + content().contentType("application/json;charset=UTF-8")); +``` + +`MockMvcResultMatchers.*` provides a number of expectations, some of which are further +nested with more detailed expectations. + +Expectations fall in two general categories. The first category of assertions verifies +properties of the response (for example, the response status, headers, and content). +These are the most important results to assert. + +The second category of assertions goes beyond the response. These assertions let you +inspect Spring MVC specific aspects, such as which controller method processed the +request, whether an exception was raised and handled, what the content of the model is, +what view was selected, what flash attributes were added, and so on. They also let you +inspect Servlet specific aspects, such as request and session attributes. + +The following test asserts that binding or validation failed: + +Java + +``` +mockMvc.perform(post("/persons")) + .andExpect(status().isOk()) + .andExpect(model().attributeHasErrors("person")); +``` + +Kotlin + +``` +import org.springframework.test.web.servlet.post + +mockMvc.post("/persons").andExpect { + status().isOk() + model { + attributeHasErrors("person") + } +} +``` + +Many times, when writing tests, it is useful to dump the results of the performed +request. You can do so as follows, where `print()` is a static import from`MockMvcResultHandlers`: + +Java + +``` +mockMvc.perform(post("/persons")) + .andDo(print()) + .andExpect(status().isOk()) + .andExpect(model().attributeHasErrors("person")); +``` + +Kotlin + +``` +import org.springframework.test.web.servlet.post + +mockMvc.post("/persons").andDo { + print() + }.andExpect { + status().isOk() + model { + attributeHasErrors("person") + } + } +``` + +As long as request processing does not cause an unhandled exception, the `print()` method +prints all the available result data to `System.out`. There is also a `log()` method and +two additional variants of the `print()` method, one that accepts an `OutputStream` and +one that accepts a `Writer`. For example, invoking `print(System.err)` prints the result +data to `System.err`, while invoking `print(myWriter)` prints the result data to a custom +writer. If you want to have the result data logged instead of printed, you can invoke the`log()` method, which logs the result data as a single `DEBUG` message under the`org.springframework.test.web.servlet.result` logging category. + +In some cases, you may want to get direct access to the result and verify something that +cannot be verified otherwise. This can be achieved by appending `.andReturn()` after all +other expectations, as the following example shows: + +Java + +``` +MvcResult mvcResult = mockMvc.perform(post("/persons")).andExpect(status().isOk()).andReturn(); +// ... +``` + +Kotlin + +``` +var mvcResult = mockMvc.post("/persons").andExpect { status().isOk() }.andReturn() +// ... +``` + +If all tests repeat the same expectations, you can set up common expectations once when +building the `MockMvc` instance, as the following example shows: + +Java + +``` +standaloneSetup(new SimpleController()) + .alwaysExpect(status().isOk()) + .alwaysExpect(content().contentType("application/json;charset=UTF-8")) + .build() +``` + +Kotlin + +``` +// Not possible in Kotlin until https://youtrack.jetbrains.com/issue/KT-22208 is fixed +``` + +Note that common expectations are always applied and cannot be overridden without +creating a separate `MockMvc` instance. + +When a JSON response content contains hypermedia links created with[Spring HATEOAS](https://github.com/spring-projects/spring-hateoas), you can verify the +resulting links by using JsonPath expressions, as the following example shows: + +Java + +``` +mockMvc.perform(get("/people").accept(MediaType.APPLICATION_JSON)) + .andExpect(jsonPath("$.links[?(@.rel == 'self')].href").value("http://localhost:8080/people")); +``` + +Kotlin + +``` +mockMvc.get("/people") { + accept(MediaType.APPLICATION_JSON) +}.andExpect { + jsonPath("$.links[?(@.rel == 'self')].href") { + value("http://localhost:8080/people") + } +} +``` + +When XML response content contains hypermedia links created with[Spring HATEOAS](https://github.com/spring-projects/spring-hateoas), you can verify the +resulting links by using XPath expressions: + +Java + +``` +Map<String, String> ns = Collections.singletonMap("ns", "http://www.w3.org/2005/Atom"); +mockMvc.perform(get("/handle").accept(MediaType.APPLICATION_XML)) + .andExpect(xpath("/person/ns:link[@rel='self']/@href", ns).string("http://localhost:8080/people")); +``` + +Kotlin + +``` +val ns = mapOf("ns" to "http://www.w3.org/2005/Atom") +mockMvc.get("/handle") { + accept(MediaType.APPLICATION_XML) +}.andExpect { + xpath("/person/ns:link[@rel='self']/@href", ns) { + string("http://localhost:8080/people") + } +} +``` + +##### Async Requests + +This section shows how to use MockMvc on its own to test asynchronous request handling. +If using MockMvc through the [WebTestClient](#webtestclient), there is nothing special to do to make +asynchronous requests work as the `WebTestClient` automatically does what is described +in this section. + +Servlet 3.0 asynchronous requests,[supported in Spring MVC](web.html#mvc-ann-async), work by exiting the Servlet container +thread and allowing the application to compute the response asynchronously, after which +an async dispatch is made to complete processing on a Servlet container thread. + +In Spring MVC Test, async requests can be tested by asserting the produced async value +first, then manually performing the async dispatch, and finally verifying the response. +Below is an example test for controller methods that return `DeferredResult`, `Callable`, +or reactive type such as Reactor `Mono`: + +Java + +``` +// static import of MockMvcRequestBuilders.* and MockMvcResultMatchers.* + +@Test +void test() throws Exception { + MvcResult mvcResult = this.mockMvc.perform(get("/path")) + .andExpect(status().isOk()) (1) + .andExpect(request().asyncStarted()) (2) + .andExpect(request().asyncResult("body")) (3) + .andReturn(); + + this.mockMvc.perform(asyncDispatch(mvcResult)) (4) + .andExpect(status().isOk()) (5) + .andExpect(content().string("body")); +} +``` + +|**1**| Check response status is still unchanged | +|-----|---------------------------------------------------------------------| +|**2**| Async processing must have started | +|**3**| Wait and assert the async result | +|**4**|Manually perform an ASYNC dispatch (as there is no running container)| +|**5**| Verify the final response | + +Kotlin + +``` +@Test +fun test() { + var mvcResult = mockMvc.get("/path").andExpect { + status().isOk() (1) + request { asyncStarted() } (2) + // TODO Remove unused generic parameter + request { asyncResult<Nothing>("body") } (3) + }.andReturn() + + mockMvc.perform(asyncDispatch(mvcResult)) (4) + .andExpect { + status().isOk() (5) + content().string("body") + } +} +``` + +|**1**| Check response status is still unchanged | +|-----|---------------------------------------------------------------------| +|**2**| Async processing must have started | +|**3**| Wait and assert the async result | +|**4**|Manually perform an ASYNC dispatch (as there is no running container)| +|**5**| Verify the final response | + +##### Streaming Responses + +The best way to test streaming responses such as Server-Sent Events is through the[WebTestClient](#webtestclient) which can be used as a test client to connect to a `MockMvc` instance +to perform tests on Spring MVC controllers without a running server. For example: + +Java + +``` +WebTestClient client = MockMvcWebTestClient.bindToController(new SseController()).build(); + +FluxExchangeResult<Person> exchangeResult = client.get() + .uri("/persons") + .exchange() + .expectStatus().isOk() + .expectHeader().contentType("text/event-stream") + .returnResult(Person.class); + +// Use StepVerifier from Project Reactor to test the streaming response + +StepVerifier.create(exchangeResult.getResponseBody()) + .expectNext(new Person("N0"), new Person("N1"), new Person("N2")) + .expectNextCount(4) + .consumeNextWith(person -> assertThat(person.getName()).endsWith("7")) + .thenCancel() + .verify(); +``` + +`WebTestClient` can also connect to a live server and perform full end-to-end integration +tests. This is also supported in Spring Boot where you can[test a running server](https://docs.spring.io/spring-boot/docs/current/reference/html/spring-boot-features.html#boot-features-testing-spring-boot-applications-testing-with-running-server). + +##### Filter Registrations + +When setting up a `MockMvc` instance, you can register one or more Servlet `Filter`instances, as the following example shows: + +Java + +``` +mockMvc = standaloneSetup(new PersonController()).addFilters(new CharacterEncodingFilter()).build(); +``` + +Kotlin + +``` +// Not possible in Kotlin until https://youtrack.jetbrains.com/issue/KT-22208 is fixed +``` + +Registered filters are invoked through the `MockFilterChain` from `spring-test`, and the +last filter delegates to the `DispatcherServlet`. + +##### MockMvc vs End-to-End Tests + +MockMVc is built on Servlet API mock implementations from the`spring-test` module and does not rely on a running container. Therefore, there are +some differences when compared to full end-to-end integration tests with an actual +client and a live server running. + +The easiest way to think about this is by starting with a blank `MockHttpServletRequest`. +Whatever you add to it is what the request becomes. Things that may catch you by surprise +are that there is no context path by default; no `jsessionid` cookie; no forwarding, +error, or async dispatches; and, therefore, no actual JSP rendering. Instead, +“forwarded” and “redirected” URLs are saved in the `MockHttpServletResponse` and can +be asserted with expectations. + +This means that, if you use JSPs, you can verify the JSP page to which the request was +forwarded, but no HTML is rendered. In other words, the JSP is not invoked. Note, +however, that all other rendering technologies that do not rely on forwarding, such as +Thymeleaf and Freemarker, render HTML to the response body as expected. The same is true +for rendering JSON, XML, and other formats through `@ResponseBody` methods. + +Alternatively, you may consider the full end-to-end integration testing support from +Spring Boot with `@SpringBootTest`. See the[Spring Boot Reference Guide](https://docs.spring.io/spring-boot/docs/current/reference/html/spring-boot-features.html#boot-features-testing). + +There are pros and cons for each approach. The options provided in Spring MVC Test are +different stops on the scale from classic unit testing to full integration testing. To be +certain, none of the options in Spring MVC Test fall under the category of classic unit +testing, but they are a little closer to it. For example, you can isolate the web layer +by injecting mocked services into controllers, in which case you are testing the web +layer only through the `DispatcherServlet` but with actual Spring configuration, as you +might test the data access layer in isolation from the layers above it. Also, you can use +the stand-alone setup, focusing on one controller at a time and manually providing the +configuration required to make it work. + +Another important distinction when using Spring MVC Test is that, conceptually, such +tests are the server-side, so you can check what handler was used, if an exception was +handled with a HandlerExceptionResolver, what the content of the model is, what binding +errors there were, and other details. That means that it is easier to write expectations, +since the server is not an opaque box, as it is when testing it through an actual HTTP +client. This is generally an advantage of classic unit testing: It is easier to write, +reason about, and debug but does not replace the need for full integration tests. At the +same time, it is important not to lose sight of the fact that the response is the most +important thing to check. In short, there is room here for multiple styles and strategies +of testing even within the same project. + +##### Further Examples + +The framework’s own tests include[many sample tests](https://github.com/spring-projects/spring-framework/tree/main/spring-test/src/test/java/org/springframework/test/web/servlet/samples) intended to show how to use MockMvc on its own or through the[WebTestClient](https://github.com/spring-projects/spring-framework/tree/main/spring-test/src/test/java/org/springframework/test/web/servlet/samples/client). Browse these examples for further ideas. + +#### 3.7.2. HtmlUnit Integration + +Spring provides integration between [MockMvc](#spring-mvc-test-server) and[HtmlUnit](http://htmlunit.sourceforge.net/). This simplifies performing end-to-end testing +when using HTML-based views. This integration lets you: + +* Easily test HTML pages by using tools such as[HtmlUnit](http://htmlunit.sourceforge.net/),[WebDriver](https://www.seleniumhq.org), and[Geb](http://www.gebish.org/manual/current/#spock-junit-testng) without the need to + deploy to a Servlet container. + +* Test JavaScript within pages. + +* Optionally, test using mock services to speed up testing. + +* Share logic between in-container end-to-end tests and out-of-container integration tests. + +| |MockMvc works with templating technologies that do not rely on a Servlet Container<br/>(for example, Thymeleaf, FreeMarker, and others), but it does not work with JSPs, since<br/>they rely on the Servlet container.| +|---|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +##### Why HtmlUnit Integration? + +The most obvious question that comes to mind is “Why do I need this?” The answer is +best found by exploring a very basic sample application. Assume you have a Spring MVC web +application that supports CRUD operations on a `Message` object. The application also +supports paging through all messages. How would you go about testing it? + +With Spring MVC Test, we can easily test if we are able to create a `Message`, as follows: + +Java + +``` +MockHttpServletRequestBuilder createMessage = post("/messages/") + .param("summary", "Spring Rocks") + .param("text", "In case you didn't know, Spring Rocks!"); + +mockMvc.perform(createMessage) + .andExpect(status().is3xxRedirection()) + .andExpect(redirectedUrl("/messages/123")); +``` + +Kotlin + +``` +@Test +fun test() { + mockMvc.post("/messages/") { + param("summary", "Spring Rocks") + param("text", "In case you didn't know, Spring Rocks!") + }.andExpect { + status().is3xxRedirection() + redirectedUrl("/messages/123") + } +} +``` + +What if we want to test the form view that lets us create the message? For example, +assume our form looks like the following snippet: + +``` +<form id="messageForm" action="/messages/" method="post"> + <div class="pull-right"><a href="/messages/">Messages</a></div> + + <label for="summary">Summary</label> + <input type="text" class="required" id="summary" name="summary" value="" /> + + <label for="text">Message</label> + <textarea id="text" name="text"></textarea> + + <div class="form-actions"> + <input type="submit" value="Create" /> + </div> +</form> +``` + +How do we ensure that our form produce the correct request to create a new message? A +naive attempt might resemble the following: + +Java + +``` +mockMvc.perform(get("/messages/form")) + .andExpect(xpath("//input[@name='summary']").exists()) + .andExpect(xpath("//textarea[@name='text']").exists()); +``` + +Kotlin + +``` +mockMvc.get("/messages/form").andExpect { + xpath("//input[@name='summary']") { exists() } + xpath("//textarea[@name='text']") { exists() } +} +``` + +This test has some obvious drawbacks. If we update our controller to use the parameter`message` instead of `text`, our form test continues to pass, even though the HTML form +is out of synch with the controller. To resolve this we can combine our two tests, as +follows: + +Java + +``` +String summaryParamName = "summary"; +String textParamName = "text"; +mockMvc.perform(get("/messages/form")) + .andExpect(xpath("//input[@name='" + summaryParamName + "']").exists()) + .andExpect(xpath("//textarea[@name='" + textParamName + "']").exists()); + +MockHttpServletRequestBuilder createMessage = post("/messages/") + .param(summaryParamName, "Spring Rocks") + .param(textParamName, "In case you didn't know, Spring Rocks!"); + +mockMvc.perform(createMessage) + .andExpect(status().is3xxRedirection()) + .andExpect(redirectedUrl("/messages/123")); +``` + +Kotlin + +``` +val summaryParamName = "summary"; +val textParamName = "text"; +mockMvc.get("/messages/form").andExpect { + xpath("//input[@name='$summaryParamName']") { exists() } + xpath("//textarea[@name='$textParamName']") { exists() } +} +mockMvc.post("/messages/") { + param(summaryParamName, "Spring Rocks") + param(textParamName, "In case you didn't know, Spring Rocks!") +}.andExpect { + status().is3xxRedirection() + redirectedUrl("/messages/123") +} +``` + +This would reduce the risk of our test incorrectly passing, but there are still some +problems: + +* What if we have multiple forms on our page? Admittedly, we could update our XPath + expressions, but they get more complicated as we take more factors into account: Are + the fields the correct type? Are the fields enabled? And so on. + +* Another issue is that we are doing double the work we would expect. We must first + verify the view, and then we submit the view with the same parameters we just verified. + Ideally, this could be done all at once. + +* Finally, we still cannot account for some things. For example, what if the form has + JavaScript validation that we wish to test as well? + +The overall problem is that testing a web page does not involve a single interaction. +Instead, it is a combination of how the user interacts with a web page and how that web +page interacts with other resources. For example, the result of a form view is used as +the input to a user for creating a message. In addition, our form view can potentially +use additional resources that impact the behavior of the page, such as JavaScript +validation. + +###### Integration Testing to the Rescue? + +To resolve the issues mentioned earlier, we could perform end-to-end integration testing, +but this has some drawbacks. Consider testing the view that lets us page through the +messages. We might need the following tests: + +* Does our page display a notification to the user to indicate that no results are + available when the messages are empty? + +* Does our page properly display a single message? + +* Does our page properly support paging? + +To set up these tests, we need to ensure our database contains the proper messages. This +leads to a number of additional challenges: + +* Ensuring the proper messages are in the database can be tedious. (Consider foreign key + constraints.) + +* Testing can become slow, since each test would need to ensure that the database is in + the correct state. + +* Since our database needs to be in a specific state, we cannot run tests in parallel. + +* Performing assertions on such items as auto-generated ids, timestamps, and others can + be difficult. + +These challenges do not mean that we should abandon end-to-end integration testing +altogether. Instead, we can reduce the number of end-to-end integration tests by +refactoring our detailed tests to use mock services that run much faster, more reliably, +and without side effects. We can then implement a small number of true end-to-end +integration tests that validate simple workflows to ensure that everything works together +properly. + +###### Enter HtmlUnit Integration + +So how can we achieve a balance between testing the interactions of our pages and still +retain good performance within our test suite? The answer is: “By integrating MockMvc +with HtmlUnit.” + +###### HtmlUnit Integration Options + +You have a number of options when you want to integrate MockMvc with HtmlUnit: + +* [MockMvc and HtmlUnit](#spring-mvc-test-server-htmlunit-mah): Use this option if you + want to use the raw HtmlUnit libraries. + +* [MockMvc and WebDriver](#spring-mvc-test-server-htmlunit-webdriver): Use this option to + ease development and reuse code between integration and end-to-end testing. + +* [MockMvc and Geb](#spring-mvc-test-server-htmlunit-geb): Use this option if you want to + use Groovy for testing, ease development, and reuse code between integration and + end-to-end testing. + +##### MockMvc and HtmlUnit + +This section describes how to integrate MockMvc and HtmlUnit. Use this option if you want +to use the raw HtmlUnit libraries. + +###### MockMvc and HtmlUnit Setup + +First, make sure that you have included a test dependency on`net.sourceforge.htmlunit:htmlunit`. In order to use HtmlUnit with Apache HttpComponents +4.5+, you need to use HtmlUnit 2.18 or higher. + +We can easily create an HtmlUnit `WebClient` that integrates with MockMvc by using the`MockMvcWebClientBuilder`, as follows: + +Java + +``` +WebClient webClient; + +@BeforeEach +void setup(WebApplicationContext context) { + webClient = MockMvcWebClientBuilder + .webAppContextSetup(context) + .build(); +} +``` + +Kotlin + +``` +lateinit var webClient: WebClient + +@BeforeEach +fun setup(context: WebApplicationContext) { + webClient = MockMvcWebClientBuilder + .webAppContextSetup(context) + .build() +} +``` + +| |This is a simple example of using `MockMvcWebClientBuilder`. For advanced usage,<br/>see [Advanced `MockMvcWebClientBuilder`](#spring-mvc-test-server-htmlunit-mah-advanced-builder).| +|---|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +This ensures that any URL that references `localhost` as the server is directed to our`MockMvc` instance without the need for a real HTTP connection. Any other URL is +requested by using a network connection, as normal. This lets us easily test the use of +CDNs. + +###### MockMvc and HtmlUnit Usage + +Now we can use HtmlUnit as we normally would but without the need to deploy our +application to a Servlet container. For example, we can request the view to create a +message with the following: + +Java + +``` +HtmlPage createMsgFormPage = webClient.getPage("http://localhost/messages/form"); +``` + +Kotlin + +``` +val createMsgFormPage = webClient.getPage("http://localhost/messages/form") +``` + +| |The default context path is `""`. Alternatively, we can specify the context path,<br/>as described in [Advanced `MockMvcWebClientBuilder`](#spring-mvc-test-server-htmlunit-mah-advanced-builder).| +|---|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +Once we have a reference to the `HtmlPage`, we can then fill out the form and submit it +to create a message, as the following example shows: + +Java + +``` +HtmlForm form = createMsgFormPage.getHtmlElementById("messageForm"); +HtmlTextInput summaryInput = createMsgFormPage.getHtmlElementById("summary"); +summaryInput.setValueAttribute("Spring Rocks"); +HtmlTextArea textInput = createMsgFormPage.getHtmlElementById("text"); +textInput.setText("In case you didn't know, Spring Rocks!"); +HtmlSubmitInput submit = form.getOneHtmlElementByAttribute("input", "type", "submit"); +HtmlPage newMessagePage = submit.click(); +``` + +Kotlin + +``` +val form = createMsgFormPage.getHtmlElementById("messageForm") +val summaryInput = createMsgFormPage.getHtmlElementById("summary") +summaryInput.setValueAttribute("Spring Rocks") +val textInput = createMsgFormPage.getHtmlElementById("text") +textInput.setText("In case you didn't know, Spring Rocks!") +val submit = form.getOneHtmlElementByAttribute("input", "type", "submit") +val newMessagePage = submit.click() +``` + +Finally, we can verify that a new message was created successfully. The following +assertions use the [AssertJ](https://assertj.github.io/doc/) library: + +Java + +``` +assertThat(newMessagePage.getUrl().toString()).endsWith("/messages/123"); +String id = newMessagePage.getHtmlElementById("id").getTextContent(); +assertThat(id).isEqualTo("123"); +String summary = newMessagePage.getHtmlElementById("summary").getTextContent(); +assertThat(summary).isEqualTo("Spring Rocks"); +String text = newMessagePage.getHtmlElementById("text").getTextContent(); +assertThat(text).isEqualTo("In case you didn't know, Spring Rocks!"); +``` + +Kotlin + +``` +assertThat(newMessagePage.getUrl().toString()).endsWith("/messages/123") +val id = newMessagePage.getHtmlElementById("id").getTextContent() +assertThat(id).isEqualTo("123") +val summary = newMessagePage.getHtmlElementById("summary").getTextContent() +assertThat(summary).isEqualTo("Spring Rocks") +val text = newMessagePage.getHtmlElementById("text").getTextContent() +assertThat(text).isEqualTo("In case you didn't know, Spring Rocks!") +``` + +The preceding code improves on our[MockMvc test](#spring-mvc-test-server-htmlunit-mock-mvc-test) in a number of ways. +First, we no longer have to explicitly verify our form and then create a request that +looks like the form. Instead, we request the form, fill it out, and submit it, thereby +significantly reducing the overhead. + +Another important factor is that [HtmlUnit +uses the Mozilla Rhino engine](http://htmlunit.sourceforge.net/javascript.html) to evaluate JavaScript. This means that we can also test +the behavior of JavaScript within our pages. + +See the [HtmlUnit documentation](http://htmlunit.sourceforge.net/gettingStarted.html) for +additional information about using HtmlUnit. + +###### Advanced `MockMvcWebClientBuilder` + +In the examples so far, we have used `MockMvcWebClientBuilder` in the simplest way +possible, by building a `WebClient` based on the `WebApplicationContext` loaded for us by +the Spring TestContext Framework. This approach is repeated in the following example: + +Java + +``` +WebClient webClient; + +@BeforeEach +void setup(WebApplicationContext context) { + webClient = MockMvcWebClientBuilder + .webAppContextSetup(context) + .build(); +} +``` + +Kotlin + +``` +lateinit var webClient: WebClient + +@BeforeEach +fun setup(context: WebApplicationContext) { + webClient = MockMvcWebClientBuilder + .webAppContextSetup(context) + .build() +} +``` + +We can also specify additional configuration options, as the following example shows: + +Java + +``` +WebClient webClient; + +@BeforeEach +void setup() { + webClient = MockMvcWebClientBuilder + // demonstrates applying a MockMvcConfigurer (Spring Security) + .webAppContextSetup(context, springSecurity()) + // for illustration only - defaults to "" + .contextPath("") + // By default MockMvc is used for localhost only; + // the following will use MockMvc for example.com and example.org as well + .useMockMvcForHosts("example.com","example.org") + .build(); +} +``` + +Kotlin + +``` +lateinit var webClient: WebClient + +@BeforeEach +fun setup() { + webClient = MockMvcWebClientBuilder + // demonstrates applying a MockMvcConfigurer (Spring Security) + .webAppContextSetup(context, springSecurity()) + // for illustration only - defaults to "" + .contextPath("") + // By default MockMvc is used for localhost only; + // the following will use MockMvc for example.com and example.org as well + .useMockMvcForHosts("example.com","example.org") + .build() +} +``` + +As an alternative, we can perform the exact same setup by configuring the `MockMvc`instance separately and supplying it to the `MockMvcWebClientBuilder`, as follows: + +Java + +``` +MockMvc mockMvc = MockMvcBuilders + .webAppContextSetup(context) + .apply(springSecurity()) + .build(); + +webClient = MockMvcWebClientBuilder + .mockMvcSetup(mockMvc) + // for illustration only - defaults to "" + .contextPath("") + // By default MockMvc is used for localhost only; + // the following will use MockMvc for example.com and example.org as well + .useMockMvcForHosts("example.com","example.org") + .build(); +``` + +Kotlin + +``` +// Not possible in Kotlin until https://youtrack.jetbrains.com/issue/KT-22208 is fixed +``` + +This is more verbose, but, by building the `WebClient` with a `MockMvc` instance, we have +the full power of MockMvc at our fingertips. + +| |For additional information on creating a `MockMvc` instance, see[Setup Choices](#spring-mvc-test-server-setup-options).| +|---|-----------------------------------------------------------------------------------------------------------------------| + +##### MockMvc and WebDriver + +In the previous sections, we have seen how to use MockMvc in conjunction with the raw +HtmlUnit APIs. In this section, we use additional abstractions within the Selenium[WebDriver](https://docs.seleniumhq.org/projects/webdriver/) to make things even easier. + +###### Why WebDriver and MockMvc? + +We can already use HtmlUnit and MockMvc, so why would we want to use WebDriver? The +Selenium WebDriver provides a very elegant API that lets us easily organize our code. To +better show how it works, we explore an example in this section. + +| |Despite being a part of [Selenium](https://docs.seleniumhq.org/), WebDriver does not<br/>require a Selenium Server to run your tests.| +|---|-------------------------------------------------------------------------------------------------------------------------------------| + +Suppose we need to ensure that a message is created properly. The tests involve finding +the HTML form input elements, filling them out, and making various assertions. + +This approach results in numerous separate tests because we want to test error conditions +as well. For example, we want to ensure that we get an error if we fill out only part of +the form. If we fill out the entire form, the newly created message should be displayed +afterwards. + +If one of the fields were named “summary”, we might have something that resembles the +following repeated in multiple places within our tests: + +Java + +``` +HtmlTextInput summaryInput = currentPage.getHtmlElementById("summary"); +summaryInput.setValueAttribute(summary); +``` + +Kotlin + +``` +val summaryInput = currentPage.getHtmlElementById("summary") +summaryInput.setValueAttribute(summary) +``` + +So what happens if we change the `id` to `smmry`? Doing so would force us to update all +of our tests to incorporate this change. This violates the DRY principle, so we should +ideally extract this code into its own method, as follows: + +Java + +``` +public HtmlPage createMessage(HtmlPage currentPage, String summary, String text) { + setSummary(currentPage, summary); + // ... +} + +public void setSummary(HtmlPage currentPage, String summary) { + HtmlTextInput summaryInput = currentPage.getHtmlElementById("summary"); + summaryInput.setValueAttribute(summary); +} +``` + +Kotlin + +``` +fun createMessage(currentPage: HtmlPage, summary:String, text:String) :HtmlPage{ + setSummary(currentPage, summary); + // ... +} + +fun setSummary(currentPage:HtmlPage , summary: String) { + val summaryInput = currentPage.getHtmlElementById("summary") + summaryInput.setValueAttribute(summary) +} +``` + +Doing so ensures that we do not have to update all of our tests if we change the UI. + +We might even take this a step further and place this logic within an `Object` that +represents the `HtmlPage` we are currently on, as the following example shows: + +Java + +``` +public class CreateMessagePage { + + final HtmlPage currentPage; + + final HtmlTextInput summaryInput; + + final HtmlSubmitInput submit; + + public CreateMessagePage(HtmlPage currentPage) { + this.currentPage = currentPage; + this.summaryInput = currentPage.getHtmlElementById("summary"); + this.submit = currentPage.getHtmlElementById("submit"); + } + + public <T> T createMessage(String summary, String text) throws Exception { + setSummary(summary); + + HtmlPage result = submit.click(); + boolean error = CreateMessagePage.at(result); + + return (T) (error ? new CreateMessagePage(result) : new ViewMessagePage(result)); + } + + public void setSummary(String summary) throws Exception { + summaryInput.setValueAttribute(summary); + } + + public static boolean at(HtmlPage page) { + return "Create Message".equals(page.getTitleText()); + } +} +``` + +Kotlin + +``` + class CreateMessagePage(private val currentPage: HtmlPage) { + + val summaryInput: HtmlTextInput = currentPage.getHtmlElementById("summary") + + val submit: HtmlSubmitInput = currentPage.getHtmlElementById("submit") + + fun <T> createMessage(summary: String, text: String): T { + setSummary(summary) + + val result = submit.click() + val error = at(result) + + return (if (error) CreateMessagePage(result) else ViewMessagePage(result)) as T + } + + fun setSummary(summary: String) { + summaryInput.setValueAttribute(summary) + } + + fun at(page: HtmlPage): Boolean { + return "Create Message" == page.getTitleText() + } + } +} +``` + +Formerly, this pattern was known as the[Page Object Pattern](https://github.com/SeleniumHQ/selenium/wiki/PageObjects). While we +can certainly do this with HtmlUnit, WebDriver provides some tools that we explore in the +following sections to make this pattern much easier to implement. + +###### MockMvc and WebDriver Setup + +To use Selenium WebDriver with the Spring MVC Test framework, make sure that your project +includes a test dependency on `org.seleniumhq.selenium:selenium-htmlunit-driver`. + +We can easily create a Selenium WebDriver that integrates with MockMvc by using the`MockMvcHtmlUnitDriverBuilder` as the following example shows: + +Java + +``` +WebDriver driver; + +@BeforeEach +void setup(WebApplicationContext context) { + driver = MockMvcHtmlUnitDriverBuilder + .webAppContextSetup(context) + .build(); +} +``` + +Kotlin + +``` +lateinit var driver: WebDriver + +@BeforeEach +fun setup(context: WebApplicationContext) { + driver = MockMvcHtmlUnitDriverBuilder + .webAppContextSetup(context) + .build() +} +``` + +| |This is a simple example of using `MockMvcHtmlUnitDriverBuilder`. For more advanced<br/>usage, see [Advanced `MockMvcHtmlUnitDriverBuilder`](#spring-mvc-test-server-htmlunit-webdriver-advanced-builder).| +|---|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +The preceding example ensures that any URL that references `localhost` as the server is +directed to our `MockMvc` instance without the need for a real HTTP connection. Any other +URL is requested by using a network connection, as normal. This lets us easily test the +use of CDNs. + +###### MockMvc and WebDriver Usage + +Now we can use WebDriver as we normally would but without the need to deploy our +application to a Servlet container. For example, we can request the view to create a +message with the following: + +Java + +``` +CreateMessagePage page = CreateMessagePage.to(driver); +``` + +Kotlin + +``` +val page = CreateMessagePage.to(driver) +``` + +We can then fill out the form and submit it to create a message, as follows: + +Java + +``` +ViewMessagePage viewMessagePage = + page.createMessage(ViewMessagePage.class, expectedSummary, expectedText); +``` + +Kotlin + +``` +val viewMessagePage = + page.createMessage(ViewMessagePage::class, expectedSummary, expectedText) +``` + +This improves on the design of our [HtmlUnit test](#spring-mvc-test-server-htmlunit-mah-usage)by leveraging the Page Object Pattern. As we mentioned in[Why WebDriver and MockMvc?](#spring-mvc-test-server-htmlunit-webdriver-why), we can use the Page Object Pattern +with HtmlUnit, but it is much easier with WebDriver. Consider the following`CreateMessagePage` implementation: + +Java + +``` +public class CreateMessagePage + extends AbstractPage { (1) + + (2) + private WebElement summary; + private WebElement text; + + (3) + @FindBy(css = "input[type=submit]") + private WebElement submit; + + public CreateMessagePage(WebDriver driver) { + super(driver); + } + + public <T> T createMessage(Class<T> resultPage, String summary, String details) { + this.summary.sendKeys(summary); + this.text.sendKeys(details); + this.submit.click(); + return PageFactory.initElements(driver, resultPage); + } + + public static CreateMessagePage to(WebDriver driver) { + driver.get("http://localhost:9990/mail/messages/form"); + return PageFactory.initElements(driver, CreateMessagePage.class); + } +} +``` + +|**1**| `CreateMessagePage` extends the `AbstractPage`. We do not go over the details of`AbstractPage`, but, in summary, it contains common functionality for all of our pages.<br/>For example, if our application has a navigational bar, global error messages, and other<br/>features, we can place this logic in a shared location. | +|-----|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +|**2**|We have a member variable for each of the parts of the HTML page in which we are<br/>interested. These are of type `WebElement`. WebDriver’s[`PageFactory`](https://github.com/SeleniumHQ/selenium/wiki/PageFactory) lets us remove a<br/>lot of code from the HtmlUnit version of `CreateMessagePage` by automatically resolving<br/>each `WebElement`. The[`PageFactory#initElements(WebDriver,Class<T>)`](https://seleniumhq.github.io/selenium/docs/api/java/org/openqa/selenium/support/PageFactory.html#initElements-org.openqa.selenium.WebDriver-java.lang.Class-)method automatically resolves each `WebElement` by using the field name and looking it up<br/>by the `id` or `name` of the element within the HTML page.| +|**3**| We can use the[`@FindBy` annotation](https://github.com/SeleniumHQ/selenium/wiki/PageFactory#making-the-example-work-using-annotations)to override the default lookup behavior. Our example shows how to use the `@FindBy`annotation to look up our submit button with a `css` selector (**input[type=submit]**). | + +Kotlin + +``` +class CreateMessagePage(private val driver: WebDriver) : AbstractPage(driver) { (1) + + (2) + private lateinit var summary: WebElement + private lateinit var text: WebElement + + (3) + @FindBy(css = "input[type=submit]") + private lateinit var submit: WebElement + + fun <T> createMessage(resultPage: Class<T>, summary: String, details: String): T { + this.summary.sendKeys(summary) + text.sendKeys(details) + submit.click() + return PageFactory.initElements(driver, resultPage) + } + companion object { + fun to(driver: WebDriver): CreateMessagePage { + driver.get("http://localhost:9990/mail/messages/form") + return PageFactory.initElements(driver, CreateMessagePage::class.java) + } + } +} +``` + +|**1**| `CreateMessagePage` extends the `AbstractPage`. We do not go over the details of`AbstractPage`, but, in summary, it contains common functionality for all of our pages.<br/>For example, if our application has a navigational bar, global error messages, and other<br/>features, we can place this logic in a shared location. | +|-----|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +|**2**|We have a member variable for each of the parts of the HTML page in which we are<br/>interested. These are of type `WebElement`. WebDriver’s[`PageFactory`](https://github.com/SeleniumHQ/selenium/wiki/PageFactory) lets us remove a<br/>lot of code from the HtmlUnit version of `CreateMessagePage` by automatically resolving<br/>each `WebElement`. The[`PageFactory#initElements(WebDriver,Class<T>)`](https://seleniumhq.github.io/selenium/docs/api/java/org/openqa/selenium/support/PageFactory.html#initElements-org.openqa.selenium.WebDriver-java.lang.Class-)method automatically resolves each `WebElement` by using the field name and looking it up<br/>by the `id` or `name` of the element within the HTML page.| +|**3**| We can use the[`@FindBy` annotation](https://github.com/SeleniumHQ/selenium/wiki/PageFactory#making-the-example-work-using-annotations)to override the default lookup behavior. Our example shows how to use the `@FindBy`annotation to look up our submit button with a `css` selector (**input[type=submit]**). | + +Finally, we can verify that a new message was created successfully. The following +assertions use the [AssertJ](https://assertj.github.io/doc/) assertion library: + +Java + +``` +assertThat(viewMessagePage.getMessage()).isEqualTo(expectedMessage); +assertThat(viewMessagePage.getSuccess()).isEqualTo("Successfully created a new message"); +``` + +Kotlin + +``` +assertThat(viewMessagePage.message).isEqualTo(expectedMessage) +assertThat(viewMessagePage.success).isEqualTo("Successfully created a new message") +``` + +We can see that our `ViewMessagePage` lets us interact with our custom domain model. For +example, it exposes a method that returns a `Message` object: + +Java + +``` +public Message getMessage() throws ParseException { + Message message = new Message(); + message.setId(getId()); + message.setCreated(getCreated()); + message.setSummary(getSummary()); + message.setText(getText()); + return message; +} +``` + +Kotlin + +``` +fun getMessage() = Message(getId(), getCreated(), getSummary(), getText()) +``` + +We can then use the rich domain objects in our assertions. + +Lastly, we must not forget to close the `WebDriver` instance when the test is complete, +as follows: + +Java + +``` +@AfterEach +void destroy() { + if (driver != null) { + driver.close(); + } +} +``` + +Kotlin + +``` +@AfterEach +fun destroy() { + if (driver != null) { + driver.close() + } +} +``` + +For additional information on using WebDriver, see the Selenium[WebDriver documentation](https://github.com/SeleniumHQ/selenium/wiki/Getting-Started). + +###### Advanced `MockMvcHtmlUnitDriverBuilder` + +In the examples so far, we have used `MockMvcHtmlUnitDriverBuilder` in the simplest way +possible, by building a `WebDriver` based on the `WebApplicationContext` loaded for us by +the Spring TestContext Framework. This approach is repeated here, as follows: + +Java + +``` +WebDriver driver; + +@BeforeEach +void setup(WebApplicationContext context) { + driver = MockMvcHtmlUnitDriverBuilder + .webAppContextSetup(context) + .build(); +} +``` + +Kotlin + +``` +lateinit var driver: WebDriver + +@BeforeEach +fun setup(context: WebApplicationContext) { + driver = MockMvcHtmlUnitDriverBuilder + .webAppContextSetup(context) + .build() +} +``` + +We can also specify additional configuration options, as follows: + +Java + +``` +WebDriver driver; + +@BeforeEach +void setup() { + driver = MockMvcHtmlUnitDriverBuilder + // demonstrates applying a MockMvcConfigurer (Spring Security) + .webAppContextSetup(context, springSecurity()) + // for illustration only - defaults to "" + .contextPath("") + // By default MockMvc is used for localhost only; + // the following will use MockMvc for example.com and example.org as well + .useMockMvcForHosts("example.com","example.org") + .build(); +} +``` + +Kotlin + +``` +lateinit var driver: WebDriver + +@BeforeEach +fun setup() { + driver = MockMvcHtmlUnitDriverBuilder + // demonstrates applying a MockMvcConfigurer (Spring Security) + .webAppContextSetup(context, springSecurity()) + // for illustration only - defaults to "" + .contextPath("") + // By default MockMvc is used for localhost only; + // the following will use MockMvc for example.com and example.org as well + .useMockMvcForHosts("example.com","example.org") + .build() +} +``` + +As an alternative, we can perform the exact same setup by configuring the `MockMvc`instance separately and supplying it to the `MockMvcHtmlUnitDriverBuilder`, as follows: + +Java + +``` +MockMvc mockMvc = MockMvcBuilders + .webAppContextSetup(context) + .apply(springSecurity()) + .build(); + +driver = MockMvcHtmlUnitDriverBuilder + .mockMvcSetup(mockMvc) + // for illustration only - defaults to "" + .contextPath("") + // By default MockMvc is used for localhost only; + // the following will use MockMvc for example.com and example.org as well + .useMockMvcForHosts("example.com","example.org") + .build(); +``` + +Kotlin + +``` +// Not possible in Kotlin until https://youtrack.jetbrains.com/issue/KT-22208 is fixed +``` + +This is more verbose, but, by building the `WebDriver` with a `MockMvc` instance, we have +the full power of MockMvc at our fingertips. + +| |For additional information on creating a `MockMvc` instance, see[Setup Choices](#spring-mvc-test-server-setup-options).| +|---|-----------------------------------------------------------------------------------------------------------------------| + +##### MockMvc and Geb + +In the previous section, we saw how to use MockMvc with WebDriver. In this section, we +use [Geb](http://www.gebish.org/) to make our tests even Groovy-er. + +###### Why Geb and MockMvc? + +Geb is backed by WebDriver, so it offers many of the[same benefits](#spring-mvc-test-server-htmlunit-webdriver-why) that we get from +WebDriver. However, Geb makes things even easier by taking care of some of the +boilerplate code for us. + +###### MockMvc and Geb Setup + +We can easily initialize a Geb `Browser` with a Selenium WebDriver that uses MockMvc, as +follows: + +``` +def setup() { + browser.driver = MockMvcHtmlUnitDriverBuilder + .webAppContextSetup(context) + .build() +} +``` + +| |This is a simple example of using `MockMvcHtmlUnitDriverBuilder`. For more advanced<br/>usage, see [Advanced `MockMvcHtmlUnitDriverBuilder`](#spring-mvc-test-server-htmlunit-webdriver-advanced-builder).| +|---|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +This ensures that any URL referencing `localhost` as the server is directed to our`MockMvc` instance without the need for a real HTTP connection. Any other URL is +requested by using a network connection as normal. This lets us easily test the use of +CDNs. + +###### MockMvc and Geb Usage + +Now we can use Geb as we normally would but without the need to deploy our application to +a Servlet container. For example, we can request the view to create a message with the +following: + +``` +to CreateMessagePage +``` + +We can then fill out the form and submit it to create a message, as follows: + +``` +when: +form.summary = expectedSummary +form.text = expectedMessage +submit.click(ViewMessagePage) +``` + +Any unrecognized method calls or property accesses or references that are not found are +forwarded to the current page object. This removes a lot of the boilerplate code we +needed when using WebDriver directly. + +As with direct WebDriver usage, this improves on the design of our[HtmlUnit test](#spring-mvc-test-server-htmlunit-mah-usage) by using the Page Object +Pattern. As mentioned previously, we can use the Page Object Pattern with HtmlUnit and +WebDriver, but it is even easier with Geb. Consider our new Groovy-based`CreateMessagePage` implementation: + +``` +class CreateMessagePage extends Page { + static url = 'messages/form' + static at = { assert title == 'Messages : Create'; true } + static content = { + submit { $('input[type=submit]') } + form { $('form') } + errors(required:false) { $('label.error, .alert-error')?.text() } + } +} +``` + +Our `CreateMessagePage` extends `Page`. We do not go over the details of `Page`, but, in +summary, it contains common functionality for all of our pages. We define a URL in which +this page can be found. This lets us navigate to the page, as follows: + +``` +to CreateMessagePage +``` + +We also have an `at` closure that determines if we are at the specified page. It should +return `true` if we are on the correct page. This is why we can assert that we are on the +correct page, as follows: + +``` +then: +at CreateMessagePage +errors.contains('This field is required.') +``` + +| |We use an assertion in the closure so that we can determine where things went wrong<br/>if we were at the wrong page.| +|---|---------------------------------------------------------------------------------------------------------------------| + +Next, we create a `content` closure that specifies all the areas of interest within the +page. We can use a[jQuery-ish Navigator +API](http://www.gebish.org/manual/current/#the-jquery-ish-navigator-api) to select the content in which we are interested. + +Finally, we can verify that a new message was created successfully, as follows: + +``` +then: +at ViewMessagePage +success == 'Successfully created a new message' +id +date +summary == expectedSummary +message == expectedMessage +``` + +For further details on how to get the most out of Geb, see[The Book of Geb](http://www.gebish.org/manual/current/) user’s manual. + +### 3.8. Testing Client Applications + +You can use client-side tests to test code that internally uses the `RestTemplate`. The +idea is to declare expected requests and to provide “stub” responses so that you can +focus on testing the code in isolation (that is, without running a server). The following +example shows how to do so: + +Java + +``` +RestTemplate restTemplate = new RestTemplate(); + +MockRestServiceServer mockServer = MockRestServiceServer.bindTo(restTemplate).build(); +mockServer.expect(requestTo("/greeting")).andRespond(withSuccess()); + +// Test code that uses the above RestTemplate ... + +mockServer.verify(); +``` + +Kotlin + +``` +val restTemplate = RestTemplate() + +val mockServer = MockRestServiceServer.bindTo(restTemplate).build() +mockServer.expect(requestTo("/greeting")).andRespond(withSuccess()) + +// Test code that uses the above RestTemplate ... + +mockServer.verify() +``` + +In the preceding example, `MockRestServiceServer` (the central class for client-side REST +tests) configures the `RestTemplate` with a custom `ClientHttpRequestFactory` that +asserts actual requests against expectations and returns “stub” responses. In this +case, we expect a request to `/greeting` and want to return a 200 response with`text/plain` content. We can define additional expected requests and stub responses as +needed. When we define expected requests and stub responses, the `RestTemplate` can be +used in client-side code as usual. At the end of testing, `mockServer.verify()` can be +used to verify that all expectations have been satisfied. + +By default, requests are expected in the order in which expectations were declared. You +can set the `ignoreExpectOrder` option when building the server, in which case all +expectations are checked (in order) to find a match for a given request. That means +requests are allowed to come in any order. The following example uses `ignoreExpectOrder`: + +Java + +``` +server = MockRestServiceServer.bindTo(restTemplate).ignoreExpectOrder(true).build(); +``` + +Kotlin + +``` +server = MockRestServiceServer.bindTo(restTemplate).ignoreExpectOrder(true).build() +``` + +Even with unordered requests by default, each request is allowed to run once only. +The `expect` method provides an overloaded variant that accepts an `ExpectedCount`argument that specifies a count range (for example, `once`, `manyTimes`, `max`, `min`,`between`, and so on). The following example uses `times`: + +Java + +``` +RestTemplate restTemplate = new RestTemplate(); + +MockRestServiceServer mockServer = MockRestServiceServer.bindTo(restTemplate).build(); +mockServer.expect(times(2), requestTo("/something")).andRespond(withSuccess()); +mockServer.expect(times(3), requestTo("/somewhere")).andRespond(withSuccess()); + +// ... + +mockServer.verify(); +``` + +Kotlin + +``` +val restTemplate = RestTemplate() + +val mockServer = MockRestServiceServer.bindTo(restTemplate).build() +mockServer.expect(times(2), requestTo("/something")).andRespond(withSuccess()) +mockServer.expect(times(3), requestTo("/somewhere")).andRespond(withSuccess()) + +// ... + +mockServer.verify() +``` + +Note that, when `ignoreExpectOrder` is not set (the default), and, therefore, requests +are expected in order of declaration, then that order applies only to the first of any +expected request. For example if "/something" is expected two times followed by +"/somewhere" three times, then there should be a request to "/something" before there is +a request to "/somewhere", but, aside from that subsequent "/something" and "/somewhere", +requests can come at any time. + +As an alternative to all of the above, the client-side test support also provides a`ClientHttpRequestFactory` implementation that you can configure into a `RestTemplate` to +bind it to a `MockMvc` instance. That allows processing requests using actual server-side +logic but without running a server. The following example shows how to do so: + +Java + +``` +MockMvc mockMvc = MockMvcBuilders.webAppContextSetup(this.wac).build(); +this.restTemplate = new RestTemplate(new MockMvcClientHttpRequestFactory(mockMvc)); + +// Test code that uses the above RestTemplate ... +``` + +Kotlin + +``` +val mockMvc = MockMvcBuilders.webAppContextSetup(this.wac).build() +restTemplate = RestTemplate(MockMvcClientHttpRequestFactory(mockMvc)) + +// Test code that uses the above RestTemplate ... +``` + +#### 3.8.1. Static Imports + +As with server-side tests, the fluent API for client-side tests requires a few static +imports. Those are easy to find by searching for `MockRest*`. Eclipse users should add`MockRestRequestMatchers.*` and `MockRestResponseCreators.*` as +“favorite static members” in the Eclipse preferences under Java → Editor → Content +Assist → Favorites. That allows using content assist after typing the first character of +the static method name. Other IDEs (such IntelliJ) may not require any additional +configuration. Check for the support for code completion on static members. + +#### 3.8.2. Further Examples of Client-side REST Tests + +Spring MVC Test’s own tests include[example +tests](https://github.com/spring-projects/spring-framework/tree/main/spring-test/src/test/java/org/springframework/test/web/client/samples) of client-side REST tests. + +## 4. Further Resources + +See the following resources for more information about testing: + +* [JUnit](https://www.junit.org/): “A programmer-friendly testing framework for Java”. + Used by the Spring Framework in its test suite and supported in the[Spring TestContext Framework](#testcontext-framework). + +* [TestNG](https://testng.org/): A testing framework inspired by JUnit with added support + for test groups, data-driven testing, distributed testing, and other features. Supported + in the [Spring TestContext Framework](#testcontext-framework) + +* [AssertJ](https://assertj.github.io/doc/): “Fluent assertions for Java”, + including support for Java 8 lambdas, streams, and other features. + +* [Mock Objects](https://en.wikipedia.org/wiki/Mock_Object): Article in Wikipedia. + +* [MockObjects.com](http://www.mockobjects.com/): Web site dedicated to mock objects, a + technique for improving the design of code within test-driven development. + +* [Mockito](https://mockito.github.io): Java mock library based on the[Test Spy](http://xunitpatterns.com/Test%20Spy.html) pattern. Used by the Spring Framework + in its test suite. + +* [EasyMock](https://easymock.org/): Java library “that provides Mock Objects for + interfaces (and objects through the class extension) by generating them on the fly using + Java’s proxy mechanism.” + +* [JMock](https://jmock.org/): Library that supports test-driven development of Java code + with mock objects. + +* [DbUnit](https://www.dbunit.org/): JUnit extension (also usable with Ant and Maven) that + is targeted at database-driven projects and, among other things, puts your database into + a known state between test runs. + +* [Testcontainers](https://www.testcontainers.org/): Java library that supports JUnit + tests, providing lightweight, throwaway instances of common databases, Selenium web + browsers, or anything else that can run in a Docker container. + +* [The Grinder](https://sourceforge.net/projects/grinder/): Java load testing framework. + +* [SpringMockK](https://github.com/Ninja-Squad/springmockk): Support for Spring Boot + integration tests written in Kotlin using [MockK](https://mockk.io/) instead of Mockito. + diff --git a/docs/en/spring-framework/web-reactive.md b/docs/en/spring-framework/web-reactive.md new file mode 100644 index 0000000000000000000000000000000000000000..75ff8f3d6f9b1dc233e7f91c45ad02d8eb99990f --- /dev/null +++ b/docs/en/spring-framework/web-reactive.md @@ -0,0 +1,8286 @@ +# Web on Reactive Stack + +This part of the documentation covers support for reactive-stack web applications built +on a [Reactive Streams](https://www.reactive-streams.org/) API to run on non-blocking +servers, such as Netty, Undertow, and Servlet 3.1+ containers. Individual chapters cover +the [Spring WebFlux](webflux.html#webflux) framework, +the reactive [`WebClient`](#webflux-client), support for [testing](#webflux-test), +and [reactive libraries](#webflux-reactive-libraries). For Servlet-stack web applications, +see [Web on Servlet Stack](web.html#spring-web). + +## 1. Spring WebFlux + +The original web framework included in the Spring Framework, Spring Web MVC, was +purpose-built for the Servlet API and Servlet containers. The reactive-stack web framework, +Spring WebFlux, was added later in version 5.0. It is fully non-blocking, supports[Reactive Streams](https://www.reactive-streams.org/) back pressure, and runs on such servers as +Netty, Undertow, and Servlet 3.1+ containers. + +Both web frameworks mirror the names of their source modules +([spring-webmvc](https://github.com/spring-projects/spring-framework/tree/main/spring-webmvc) and[spring-webflux](https://github.com/spring-projects/spring-framework/tree/main/spring-webflux)) and co-exist side by side in the +Spring Framework. Each module is optional. Applications can use one or the other module or, +in some cases, both — for example, Spring MVC controllers with the reactive `WebClient`. + +### 1.1. Overview + +Why was Spring WebFlux created? + +Part of the answer is the need for a non-blocking web stack to handle concurrency with a +small number of threads and scale with fewer hardware resources. Servlet 3.1 did provide +an API for non-blocking I/O. However, using it leads away from the rest of the Servlet API, +where contracts are synchronous (`Filter`, `Servlet`) or blocking (`getParameter`,`getPart`). This was the motivation for a new common API to serve as a foundation across +any non-blocking runtime. That is important because of servers (such as Netty) that are +well-established in the async, non-blocking space. + +The other part of the answer is functional programming. Much as the addition of annotations +in Java 5 created opportunities (such as annotated REST controllers or unit tests), the addition +of lambda expressions in Java 8 created opportunities for functional APIs in Java. +This is a boon for non-blocking applications and continuation-style APIs (as popularized +by `CompletableFuture` and [ReactiveX](http://reactivex.io/)) that allow declarative +composition of asynchronous logic. At the programming-model level, Java 8 enabled Spring +WebFlux to offer functional web endpoints alongside annotated controllers. + +#### 1.1.1. Define “Reactive” + +We touched on “non-blocking” and “functional” but what does reactive mean? + +The term, “reactive,” refers to programming models that are built around reacting to change — network components reacting to I/O events, UI controllers reacting to mouse events, and others. +In that sense, non-blocking is reactive, because, instead of being blocked, we are now in the mode +of reacting to notifications as operations complete or data becomes available. + +There is also another important mechanism that we on the Spring team associate with “reactive” +and that is non-blocking back pressure. In synchronous, imperative code, blocking calls +serve as a natural form of back pressure that forces the caller to wait. In non-blocking +code, it becomes important to control the rate of events so that a fast producer does not +overwhelm its destination. + +Reactive Streams is a[small spec](https://github.com/reactive-streams/reactive-streams-jvm/blob/master/README.md#specification)(also [adopted](https://docs.oracle.com/javase/9/docs/api/java/util/concurrent/Flow.html) in Java 9) +that defines the interaction between asynchronous components with back pressure. +For example a data repository (acting as[Publisher](https://www.reactive-streams.org/reactive-streams-1.0.1-javadoc/org/reactivestreams/Publisher.html)) +can produce data that an HTTP server (acting as[Subscriber](https://www.reactive-streams.org/reactive-streams-1.0.1-javadoc/org/reactivestreams/Subscriber.html)) +can then write to the response. The main purpose of Reactive Streams is to let the +subscriber control how quickly or how slowly the publisher produces data. + +| |**Common question: what if a publisher cannot slow down?** <br/>The purpose of Reactive Streams is only to establish the mechanism and a boundary.<br/>If a publisher cannot slow down, it has to decide whether to buffer, drop, or fail.| +|---|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +#### 1.1.2. Reactive API + +Reactive Streams plays an important role for interoperability. It is of interest to libraries +and infrastructure components but less useful as an application API, because it is too +low-level. Applications need a higher-level and richer, functional API to +compose async logic — similar to the Java 8 `Stream` API but not only for collections. +This is the role that reactive libraries play. + +[Reactor](https://github.com/reactor/reactor) is the reactive library of choice for +Spring WebFlux. It provides the[`Mono`](https://projectreactor.io/docs/core/release/api/reactor/core/publisher/Mono.html) and[`Flux`](https://projectreactor.io/docs/core/release/api/reactor/core/publisher/Flux.html) API types +to work on data sequences of 0..1 (`Mono`) and 0..N (`Flux`) through a rich set of operators aligned with the +ReactiveX [vocabulary of operators](http://reactivex.io/documentation/operators.html). +Reactor is a Reactive Streams library and, therefore, all of its operators support non-blocking back pressure. +Reactor has a strong focus on server-side Java. It is developed in close collaboration +with Spring. + +WebFlux requires Reactor as a core dependency but it is interoperable with other reactive +libraries via Reactive Streams. As a general rule, a WebFlux API accepts a plain `Publisher`as input, adapts it to a Reactor type internally, uses that, and returns either a`Flux` or a `Mono` as output. So, you can pass any `Publisher` as input and you can apply +operations on the output, but you need to adapt the output for use with another reactive library. +Whenever feasible (for example, annotated controllers), WebFlux adapts transparently to the use +of RxJava or another reactive library. See [Reactive Libraries](#webflux-reactive-libraries) for more details. + +| |In addition to Reactive APIs, WebFlux can also be used with[Coroutines](languages.html#coroutines) APIs in Kotlin which provides a more imperative style of programming.<br/>The following Kotlin code samples will be provided with Coroutines APIs.| +|---|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +#### 1.1.3. Programming Models + +The `spring-web` module contains the reactive foundation that underlies Spring WebFlux, +including HTTP abstractions, Reactive Streams [adapters](#webflux-httphandler) for supported +servers, [codecs](#webflux-codecs), and a core [`WebHandler` API](#webflux-web-handler-api) comparable to +the Servlet API but with non-blocking contracts. + +On that foundation, Spring WebFlux provides a choice of two programming models: + +* [Annotated Controllers](#webflux-controller): Consistent with Spring MVC and based on the same annotations + from the `spring-web` module. Both Spring MVC and WebFlux controllers support reactive + (Reactor and RxJava) return types, and, as a result, it is not easy to tell them apart. One notable + difference is that WebFlux also supports reactive `@RequestBody` arguments. + +* [Functional Endpoints](#webflux-fn): Lambda-based, lightweight, and functional programming model. You can think of + this as a small library or a set of utilities that an application can use to route and + handle requests. The big difference with annotated controllers is that the application + is in charge of request handling from start to finish versus declaring intent through + annotations and being called back. + +#### 1.1.4. Applicability + +Spring MVC or WebFlux? + +A natural question to ask but one that sets up an unsound dichotomy. Actually, both +work together to expand the range of available options. The two are designed for +continuity and consistency with each other, they are available side by side, and feedback +from each side benefits both sides. The following diagram shows how the two relate, what they +have in common, and what each supports uniquely: + +![spring mvc and webflux venn](images/spring-mvc-and-webflux-venn.png) + +We suggest that you consider the following specific points: + +* If you have a Spring MVC application that works fine, there is no need to change. + Imperative programming is the easiest way to write, understand, and debug code. + You have maximum choice of libraries, since, historically, most are blocking. + +* If you are already shopping for a non-blocking web stack, Spring WebFlux offers the same + execution model benefits as others in this space and also provides a choice of servers + (Netty, Tomcat, Jetty, Undertow, and Servlet 3.1+ containers), a choice of programming models + (annotated controllers and functional web endpoints), and a choice of reactive libraries + (Reactor, RxJava, or other). + +* If you are interested in a lightweight, functional web framework for use with Java 8 lambdas + or Kotlin, you can use the Spring WebFlux functional web endpoints. That can also be a good choice + for smaller applications or microservices with less complex requirements that can benefit + from greater transparency and control. + +* In a microservice architecture, you can have a mix of applications with either Spring MVC + or Spring WebFlux controllers or with Spring WebFlux functional endpoints. Having support + for the same annotation-based programming model in both frameworks makes it easier to + re-use knowledge while also selecting the right tool for the right job. + +* A simple way to evaluate an application is to check its dependencies. If you have blocking + persistence APIs (JPA, JDBC) or networking APIs to use, Spring MVC is the best choice + for common architectures at least. It is technically feasible with both Reactor and + RxJava to perform blocking calls on a separate thread but you would not be making the + most of a non-blocking web stack. + +* If you have a Spring MVC application with calls to remote services, try the reactive `WebClient`. + You can return reactive types (Reactor, RxJava, [or other](#webflux-reactive-libraries)) + directly from Spring MVC controller methods. The greater the latency per call or the + interdependency among calls, the more dramatic the benefits. Spring MVC controllers + can call other reactive components too. + +* If you have a large team, keep in mind the steep learning curve in the shift to non-blocking, + functional, and declarative programming. A practical way to start without a full switch + is to use the reactive `WebClient`. Beyond that, start small and measure the benefits. + We expect that, for a wide range of applications, the shift is unnecessary. If you are + unsure what benefits to look for, start by learning about how non-blocking I/O works + (for example, concurrency on single-threaded Node.js) and its effects. + +#### 1.1.5. Servers + +Spring WebFlux is supported on Tomcat, Jetty, Servlet 3.1+ containers, as well as on +non-Servlet runtimes such as Netty and Undertow. All servers are adapted to a low-level,[common API](#webflux-httphandler) so that higher-level[programming models](#webflux-programming-models) can be supported across servers. + +Spring WebFlux does not have built-in support to start or stop a server. However, it is +easy to [assemble](#webflux-web-handler-api) an application from Spring configuration and[WebFlux infrastructure](#webflux-config) and [run it](#webflux-httphandler) with a few +lines of code. + +Spring Boot has a WebFlux starter that automates these steps. By default, the starter uses +Netty, but it is easy to switch to Tomcat, Jetty, or Undertow by changing your +Maven or Gradle dependencies. Spring Boot defaults to Netty, because it is more widely +used in the asynchronous, non-blocking space and lets a client and a server share resources. + +Tomcat and Jetty can be used with both Spring MVC and WebFlux. Keep in mind, however, that +the way they are used is very different. Spring MVC relies on Servlet blocking I/O and +lets applications use the Servlet API directly if they need to. Spring WebFlux +relies on Servlet 3.1 non-blocking I/O and uses the Servlet API behind a low-level +adapter. It is not exposed for direct use. + +For Undertow, Spring WebFlux uses Undertow APIs directly without the Servlet API. + +#### 1.1.6. Performance + +Performance has many characteristics and meanings. Reactive and non-blocking generally +do not make applications run faster. They can, in some cases, (for example, if using the`WebClient` to run remote calls in parallel). On the whole, it requires more work to do +things the non-blocking way and that can slightly increase the required processing time. + +The key expected benefit of reactive and non-blocking is the ability to scale with a small, +fixed number of threads and less memory. That makes applications more resilient under load, +because they scale in a more predictable way. In order to observe those benefits, however, you +need to have some latency (including a mix of slow and unpredictable network I/O). +That is where the reactive stack begins to show its strengths, and the differences can be +dramatic. + +#### 1.1.7. Concurrency Model + +Both Spring MVC and Spring WebFlux support annotated controllers, but there is a key +difference in the concurrency model and the default assumptions for blocking and threads. + +In Spring MVC (and servlet applications in general), it is assumed that applications can +block the current thread, (for example, for remote calls). For this reason, servlet containers +use a large thread pool to absorb potential blocking during request handling. + +In Spring WebFlux (and non-blocking servers in general), it is assumed that applications +do not block. Therefore, non-blocking servers use a small, fixed-size thread pool +(event loop workers) to handle requests. + +| |“To scale” and “small number of threads” may sound contradictory but to never block the<br/>current thread (and rely on callbacks instead) means that you do not need extra threads, as<br/>there are no blocking calls to absorb.| +|---|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +Invoking a Blocking API + +What if you do need to use a blocking library? Both Reactor and RxJava provide the`publishOn` operator to continue processing on a different thread. That means there is an +easy escape hatch. Keep in mind, however, that blocking APIs are not a good fit for +this concurrency model. + +Mutable State + +In Reactor and RxJava, you declare logic through operators. At runtime, a reactive +pipeline is formed where data is processed sequentially, in distinct stages. A key benefit +of this is that it frees applications from having to protect mutable state because +application code within that pipeline is never invoked concurrently. + +Threading Model + +What threads should you expect to see on a server running with Spring WebFlux? + +* On a “vanilla” Spring WebFlux server (for example, no data access nor other optional + dependencies), you can expect one thread for the server and several others for request + processing (typically as many as the number of CPU cores). Servlet containers, however, + may start with more threads (for example, 10 on Tomcat), in support of both servlet (blocking) I/O + and servlet 3.1 (non-blocking) I/O usage. + +* The reactive `WebClient` operates in event loop style. So you can see a small, fixed + number of processing threads related to that (for example, `reactor-http-nio-` with the Reactor + Netty connector). However, if Reactor Netty is used for both client and server, the two + share event loop resources by default. + +* Reactor and RxJava provide thread pool abstractions, called schedulers, to use with the`publishOn` operator that is used to switch processing to a different thread pool. + The schedulers have names that suggest a specific concurrency strategy — for example, “parallel” + (for CPU-bound work with a limited number of threads) or “elastic” (for I/O-bound work with + a large number of threads). If you see such threads, it means some code is using a + specific thread pool `Scheduler` strategy. + +* Data access libraries and other third party dependencies can also create and use threads + of their own. + +Configuring + +The Spring Framework does not provide support for starting and stopping[servers](#webflux-server-choice). To configure the threading model for a server, +you need to use server-specific configuration APIs, or, if you use Spring Boot, +check the Spring Boot configuration options for each server. You can[configure](#webflux-client-builder) the `WebClient` directly. +For all other libraries, see their respective documentation. + +### 1.2. Reactive Core + +The `spring-web` module contains the following foundational support for reactive web +applications: + +* For server request processing there are two levels of support. + + * [HttpHandler](#webflux-httphandler): Basic contract for HTTP request handling with + non-blocking I/O and Reactive Streams back pressure, along with adapters for Reactor Netty, + Undertow, Tomcat, Jetty, and any Servlet 3.1+ container. + + * [`WebHandler` API](#webflux-web-handler-api): Slightly higher level, general-purpose web API for + request handling, on top of which concrete programming models such as annotated + controllers and functional endpoints are built. + +* For the client side, there is a basic `ClientHttpConnector` contract to perform HTTP + requests with non-blocking I/O and Reactive Streams back pressure, along with adapters for[Reactor Netty](https://github.com/reactor/reactor-netty), reactive[Jetty HttpClient](https://github.com/jetty-project/jetty-reactive-httpclient)and [Apache HttpComponents](https://hc.apache.org/). + The higher level [WebClient](#webflux-client) used in applications + builds on this basic contract. + +* For client and server, [codecs](#webflux-codecs) for serialization and + deserialization of HTTP request and response content. + +#### 1.2.1. `HttpHandler` + +[HttpHandler](https://docs.spring.io/spring-framework/docs/5.3.16/javadoc-api/org/springframework/http/server/reactive/HttpHandler.html)is a simple contract with a single method to handle a request and a response. It is +intentionally minimal, and its main and only purpose is to be a minimal abstraction +over different HTTP server APIs. + +The following table describes the supported server APIs: + +| Server name | Server API used | Reactive Streams support | +|---------------------|--------------------------------------------------------------------------------|-------------------------------------------------------------------| +| Netty | Netty API | [Reactor Netty](https://github.com/reactor/reactor-netty) | +| Undertow | Undertow API | spring-web: Undertow to Reactive Streams bridge | +| Tomcat |Servlet 3.1 non-blocking I/O; Tomcat API to read and write ByteBuffers vs byte[]|spring-web: Servlet 3.1 non-blocking I/O to Reactive Streams bridge| +| Jetty | Servlet 3.1 non-blocking I/O; Jetty API to write ByteBuffers vs byte[] |spring-web: Servlet 3.1 non-blocking I/O to Reactive Streams bridge| +|Servlet 3.1 container| Servlet 3.1 non-blocking I/O |spring-web: Servlet 3.1 non-blocking I/O to Reactive Streams bridge| + +The following table describes server dependencies (also see[supported versions](https://github.com/spring-projects/spring-framework/wiki/What%27s-New-in-the-Spring-Framework)): + +| Server name | Group id | Artifact name | +|-------------|-----------------------|---------------------------| +|Reactor Netty|io.projectreactor.netty| reactor-netty | +| Undertow | io.undertow | undertow-core | +| Tomcat |org.apache.tomcat.embed| tomcat-embed-core | +| Jetty | org.eclipse.jetty |jetty-server, jetty-servlet| + +The code snippets below show using the `HttpHandler` adapters with each server API: + +**Reactor Netty** + +Java + +``` +HttpHandler handler = ... +ReactorHttpHandlerAdapter adapter = new ReactorHttpHandlerAdapter(handler); +HttpServer.create().host(host).port(port).handle(adapter).bind().block(); +``` + +Kotlin + +``` +val handler: HttpHandler = ... +val adapter = ReactorHttpHandlerAdapter(handler) +HttpServer.create().host(host).port(port).handle(adapter).bind().block() +``` + +**Undertow** + +Java + +``` +HttpHandler handler = ... +UndertowHttpHandlerAdapter adapter = new UndertowHttpHandlerAdapter(handler); +Undertow server = Undertow.builder().addHttpListener(port, host).setHandler(adapter).build(); +server.start(); +``` + +Kotlin + +``` +val handler: HttpHandler = ... +val adapter = UndertowHttpHandlerAdapter(handler) +val server = Undertow.builder().addHttpListener(port, host).setHandler(adapter).build() +server.start() +``` + +**Tomcat** + +Java + +``` +HttpHandler handler = ... +Servlet servlet = new TomcatHttpHandlerAdapter(handler); + +Tomcat server = new Tomcat(); +File base = new File(System.getProperty("java.io.tmpdir")); +Context rootContext = server.addContext("", base.getAbsolutePath()); +Tomcat.addServlet(rootContext, "main", servlet); +rootContext.addServletMappingDecoded("/", "main"); +server.setHost(host); +server.setPort(port); +server.start(); +``` + +Kotlin + +``` +val handler: HttpHandler = ... +val servlet = TomcatHttpHandlerAdapter(handler) + +val server = Tomcat() +val base = File(System.getProperty("java.io.tmpdir")) +val rootContext = server.addContext("", base.absolutePath) +Tomcat.addServlet(rootContext, "main", servlet) +rootContext.addServletMappingDecoded("/", "main") +server.host = host +server.setPort(port) +server.start() +``` + +**Jetty** + +Java + +``` +HttpHandler handler = ... +Servlet servlet = new JettyHttpHandlerAdapter(handler); + +Server server = new Server(); +ServletContextHandler contextHandler = new ServletContextHandler(server, ""); +contextHandler.addServlet(new ServletHolder(servlet), "/"); +contextHandler.start(); + +ServerConnector connector = new ServerConnector(server); +connector.setHost(host); +connector.setPort(port); +server.addConnector(connector); +server.start(); +``` + +Kotlin + +``` +val handler: HttpHandler = ... +val servlet = JettyHttpHandlerAdapter(handler) + +val server = Server() +val contextHandler = ServletContextHandler(server, "") +contextHandler.addServlet(ServletHolder(servlet), "/") +contextHandler.start(); + +val connector = ServerConnector(server) +connector.host = host +connector.port = port +server.addConnector(connector) +server.start() +``` + +**Servlet 3.1+ Container** + +To deploy as a WAR to any Servlet 3.1+ container, you can extend and include[`AbstractReactiveWebInitializer`](https://docs.spring.io/spring-framework/docs/5.3.16/javadoc-api/org/springframework/web/server/adapter/AbstractReactiveWebInitializer.html)in the WAR. That class wraps an `HttpHandler` with `ServletHttpHandlerAdapter` and registers +that as a `Servlet`. + +#### 1.2.2. `WebHandler` API + +The `org.springframework.web.server` package builds on the [`HttpHandler`](#webflux-httphandler) contract +to provide a general-purpose web API for processing requests through a chain of multiple[`WebExceptionHandler`](https://docs.spring.io/spring-framework/docs/5.3.16/javadoc-api/org/springframework/web/server/WebExceptionHandler.html), multiple[`WebFilter`](https://docs.spring.io/spring-framework/docs/5.3.16/javadoc-api/org/springframework/web/server/WebFilter.html), and a single[`WebHandler`](https://docs.spring.io/spring-framework/docs/5.3.16/javadoc-api/org/springframework/web/server/WebHandler.html) component. The chain can +be put together with `WebHttpHandlerBuilder` by simply pointing to a Spring`ApplicationContext` where components are[auto-detected](#webflux-web-handler-api-special-beans), and/or by registering components +with the builder. + +While `HttpHandler` has a simple goal to abstract the use of different HTTP servers, the`WebHandler` API aims to provide a broader set of features commonly used in web applications +such as: + +* User session with attributes. + +* Request attributes. + +* Resolved `Locale` or `Principal` for the request. + +* Access to parsed and cached form data. + +* Abstractions for multipart data. + +* and more.. + +##### Special bean types + +The table below lists the components that `WebHttpHandlerBuilder` can auto-detect in a +Spring ApplicationContext, or that can be registered directly with it: + +| Bean name | Bean type |Count| Description | +|----------------------------|----------------------------|-----|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| \<any\> | `WebExceptionHandler` |0..N | Provide handling for exceptions from the chain of `WebFilter` instances and the target`WebHandler`. For more details, see [Exceptions](#webflux-exception-handler). | +| \<any\> | `WebFilter` |0..N | Apply interception style logic to before and after the rest of the filter chain and<br/>the target `WebHandler`. For more details, see [Filters](#webflux-filters). | +| `webHandler` | `WebHandler` | 1 | The handler for the request. | +| `webSessionManager` | `WebSessionManager` |0..1 | The manager for `WebSession` instances exposed through a method on `ServerWebExchange`.`DefaultWebSessionManager` by default. | +| `serverCodecConfigurer` | `ServerCodecConfigurer` |0..1 |For access to `HttpMessageReader` instances for parsing form data and multipart data that is then<br/>exposed through methods on `ServerWebExchange`. `ServerCodecConfigurer.create()` by default.| +| `localeContextResolver` | `LocaleContextResolver` |0..1 | The resolver for `LocaleContext` exposed through a method on `ServerWebExchange`.`AcceptHeaderLocaleContextResolver` by default. | +|`forwardedHeaderTransformer`|`ForwardedHeaderTransformer`|0..1 | For processing forwarded type headers, either by extracting and removing them or by removing them only.<br/>Not used by default. | + +##### Form Data + +`ServerWebExchange` exposes the following method for accessing form data: + +Java + +``` +Mono<MultiValueMap<String, String>> getFormData(); +``` + +Kotlin + +``` +suspend fun getFormData(): MultiValueMap<String, String> +``` + +The `DefaultServerWebExchange` uses the configured `HttpMessageReader` to parse form data +(`application/x-www-form-urlencoded`) into a `MultiValueMap`. By default,`FormHttpMessageReader` is configured for use by the `ServerCodecConfigurer` bean +(see the [Web Handler API](#webflux-web-handler-api)). + +##### Multipart Data + +[Web MVC](web.html#mvc-multipart) + +`ServerWebExchange` exposes the following method for accessing multipart data: + +Java + +``` +Mono<MultiValueMap<String, Part>> getMultipartData(); +``` + +Kotlin + +``` +suspend fun getMultipartData(): MultiValueMap<String, Part> +``` + +The `DefaultServerWebExchange` uses the configured`HttpMessageReader<MultiValueMap<String, Part>>` to parse `multipart/form-data` content +into a `MultiValueMap`. +By default, this is the `DefaultPartHttpMessageReader`, which does not have any third-party +dependencies. +Alternatively, the `SynchronossPartHttpMessageReader` can be used, which is based on the[Synchronoss NIO Multipart](https://github.com/synchronoss/nio-multipart) library. +Both are configured through the `ServerCodecConfigurer` bean +(see the [Web Handler API](#webflux-web-handler-api)). + +To parse multipart data in streaming fashion, you can use the `Flux<Part>` returned from an`HttpMessageReader<Part>` instead. For example, in an annotated controller, use of`@RequestPart` implies `Map`-like access to individual parts by name and, hence, requires +parsing multipart data in full. By contrast, you can use `@RequestBody` to decode the +content to `Flux<Part>` without collecting to a `MultiValueMap`. + +##### Forwarded Headers + +[Web MVC](web.html#filters-forwarded-headers) + +As a request goes through proxies (such as load balancers), the host, port, and +scheme may change. That makes it a challenge, from a client perspective, to create links that point to the correct +host, port, and scheme. + +[RFC 7239](https://tools.ietf.org/html/rfc7239) defines the `Forwarded` HTTP header +that proxies can use to provide information about the original request. There are other +non-standard headers, too, including `X-Forwarded-Host`, `X-Forwarded-Port`,`X-Forwarded-Proto`, `X-Forwarded-Ssl`, and `X-Forwarded-Prefix`. + +`ForwardedHeaderTransformer` is a component that modifies the host, port, and scheme of +the request, based on forwarded headers, and then removes those headers. If you declare +it as a bean with the name `forwardedHeaderTransformer`, it will be[detected](#webflux-web-handler-api-special-beans) and used. + +There are security considerations for forwarded headers, since an application cannot know +if the headers were added by a proxy, as intended, or by a malicious client. This is why +a proxy at the boundary of trust should be configured to remove untrusted forwarded traffic coming +from the outside. You can also configure the `ForwardedHeaderTransformer` with`removeOnly=true`, in which case it removes but does not use the headers. + +| |In 5.1 `ForwardedHeaderFilter` was deprecated and superceded by`ForwardedHeaderTransformer` so forwarded headers can be processed earlier, before the<br/>exchange is created. If the filter is configured anyway, it is taken out of the list of<br/>filters, and `ForwardedHeaderTransformer` is used instead.| +|---|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +#### 1.2.3. Filters + +[Web MVC](web.html#filters) + +In the [`WebHandler` API](#webflux-web-handler-api), you can use a `WebFilter` to apply interception-style +logic before and after the rest of the processing chain of filters and the target`WebHandler`. When using the [WebFlux Config](#webflux-config), registering a `WebFilter` is as simple +as declaring it as a Spring bean and (optionally) expressing precedence by using `@Order` on +the bean declaration or by implementing `Ordered`. + +##### CORS + +[Web MVC](web.html#filters-cors) + +Spring WebFlux provides fine-grained support for CORS configuration through annotations on +controllers. However, when you use it with Spring Security, we advise relying on the built-in`CorsFilter`, which must be ordered ahead of Spring Security’s chain of filters. + +See the section on [CORS](#webflux-cors) and the [webflux-cors.html](webflux-cors.html#webflux-cors-webfilter) for more details. + +#### 1.2.4. Exceptions + +[Web MVC](web.html#mvc-ann-customer-servlet-container-error-page) + +In the [`WebHandler` API](#webflux-web-handler-api), you can use a `WebExceptionHandler` to handle +exceptions from the chain of `WebFilter` instances and the target `WebHandler`. When using the[WebFlux Config](#webflux-config), registering a `WebExceptionHandler` is as simple as declaring it as a +Spring bean and (optionally) expressing precedence by using `@Order` on the bean declaration or +by implementing `Ordered`. + +The following table describes the available `WebExceptionHandler` implementations: + +| Exception Handler | Description | +|---------------------------------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| `ResponseStatusExceptionHandler` |Provides handling for exceptions of type[`ResponseStatusException`](https://docs.spring.io/spring-framework/docs/5.3.16/javadoc-api/org/springframework/web/server/ResponseStatusException.html)by setting the response to the HTTP status code of the exception.| +|`WebFluxResponseStatusExceptionHandler`| Extension of `ResponseStatusExceptionHandler` that can also determine the HTTP status<br/>code of a `@ResponseStatus` annotation on any exception.<br/><br/> This handler is declared in the [WebFlux Config](#webflux-config). | + +#### 1.2.5. Codecs + +[Web MVC](integration.html#rest-message-conversion) + +The `spring-web` and `spring-core` modules provide support for serializing and +deserializing byte content to and from higher level objects through non-blocking I/O with +Reactive Streams back pressure. The following describes this support: + +* [`Encoder`](https://docs.spring.io/spring-framework/docs/5.3.16/javadoc-api/org/springframework/core/codec/Encoder.html) and[`Decoder`](https://docs.spring.io/spring-framework/docs/5.3.16/javadoc-api/org/springframework/core/codec/Decoder.html) are low level contracts to + encode and decode content independent of HTTP. + +* [`HttpMessageReader`](https://docs.spring.io/spring-framework/docs/5.3.16/javadoc-api/org/springframework/http/codec/HttpMessageReader.html) and[`HttpMessageWriter`](https://docs.spring.io/spring-framework/docs/5.3.16/javadoc-api/org/springframework/http/codec/HttpMessageWriter.html) are contracts + to encode and decode HTTP message content. + +* An `Encoder` can be wrapped with `EncoderHttpMessageWriter` to adapt it for use in a web + application, while a `Decoder` can be wrapped with `DecoderHttpMessageReader`. + +* [`DataBuffer`](https://docs.spring.io/spring-framework/docs/5.3.16/javadoc-api/org/springframework/core/io/buffer/DataBuffer.html) abstracts different + byte buffer representations (e.g. Netty `ByteBuf`, `java.nio.ByteBuffer`, etc.) and is + what all codecs work on. See [Data Buffers and Codecs](core.html#databuffers) in the + "Spring Core" section for more on this topic. + +The `spring-core` module provides `byte[]`, `ByteBuffer`, `DataBuffer`, `Resource`, and`String` encoder and decoder implementations. The `spring-web` module provides Jackson +JSON, Jackson Smile, JAXB2, Protocol Buffers and other encoders and decoders along with +web-only HTTP message reader and writer implementations for form data, multipart content, +server-sent events, and others. + +`ClientCodecConfigurer` and `ServerCodecConfigurer` are typically used to configure and +customize the codecs to use in an application. See the section on configuring[HTTP message codecs](#webflux-config-message-codecs). + +##### Jackson JSON + +JSON and binary JSON ([Smile](https://github.com/FasterXML/smile-format-specification)) are +both supported when the Jackson library is present. + +The `Jackson2Decoder` works as follows: + +* Jackson’s asynchronous, non-blocking parser is used to aggregate a stream of byte chunks + into `TokenBuffer`'s each representing a JSON object. + +* Each `TokenBuffer` is passed to Jackson’s `ObjectMapper` to create a higher level object. + +* When decoding to a single-value publisher (e.g. `Mono`), there is one `TokenBuffer`. + +* When decoding to a multi-value publisher (e.g. `Flux`), each `TokenBuffer` is passed to + the `ObjectMapper` as soon as enough bytes are received for a fully formed object. The + input content can be a JSON array, or any[line-delimited JSON](https://en.wikipedia.org/wiki/JSON_streaming) format such as NDJSON, + JSON Lines, or JSON Text Sequences. + +The `Jackson2Encoder` works as follows: + +* For a single value publisher (e.g. `Mono`), simply serialize it through the`ObjectMapper`. + +* For a multi-value publisher with `application/json`, by default collect the values with`Flux#collectToList()` and then serialize the resulting collection. + +* For a multi-value publisher with a streaming media type such as`application/x-ndjson` or `application/stream+x-jackson-smile`, encode, write, and + flush each value individually using a[line-delimited JSON](https://en.wikipedia.org/wiki/JSON_streaming) format. Other + streaming media types may be registered with the encoder. + +* For SSE the `Jackson2Encoder` is invoked per event and the output is flushed to ensure + delivery without delay. + +| |By default both `Jackson2Encoder` and `Jackson2Decoder` do not support elements of type`String`. Instead the default assumption is that a string or a sequence of strings<br/>represent serialized JSON content, to be rendered by the `CharSequenceEncoder`. If what<br/>you need is to render a JSON array from `Flux<String>`, use `Flux#collectToList()` and<br/>encode a `Mono<List<String>>`.| +|---|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +##### Form Data + +`FormHttpMessageReader` and `FormHttpMessageWriter` support decoding and encoding`application/x-www-form-urlencoded` content. + +On the server side where form content often needs to be accessed from multiple places,`ServerWebExchange` provides a dedicated `getFormData()` method that parses the content +through `FormHttpMessageReader` and then caches the result for repeated access. +See [Form Data](#webflux-form-data) in the [`WebHandler` API](#webflux-web-handler-api) section. + +Once `getFormData()` is used, the original raw content can no longer be read from the +request body. For this reason, applications are expected to go through `ServerWebExchange`consistently for access to the cached form data versus reading from the raw request body. + +##### Multipart + +`MultipartHttpMessageReader` and `MultipartHttpMessageWriter` support decoding and +encoding "multipart/form-data" content. In turn `MultipartHttpMessageReader` delegates to +another `HttpMessageReader` for the actual parsing to a `Flux<Part>` and then simply +collects the parts into a `MultiValueMap`. +By default, the `DefaultPartHttpMessageReader` is used, but this can be changed through the`ServerCodecConfigurer`. +For more information about the `DefaultPartHttpMessageReader`, refer to to the[javadoc of `DefaultPartHttpMessageReader`](https://docs.spring.io/spring-framework/docs/5.3.16/javadoc-api/org/springframework/http/codec/multipart/DefaultPartHttpMessageReader.html). + +On the server side where multipart form content may need to be accessed from multiple +places, `ServerWebExchange` provides a dedicated `getMultipartData()` method that parses +the content through `MultipartHttpMessageReader` and then caches the result for repeated access. +See [Multipart Data](#webflux-multipart) in the [`WebHandler` API](#webflux-web-handler-api) section. + +Once `getMultipartData()` is used, the original raw content can no longer be read from the +request body. For this reason applications have to consistently use `getMultipartData()`for repeated, map-like access to parts, or otherwise rely on the`SynchronossPartHttpMessageReader` for a one-time access to `Flux<Part>`. + +##### Limits + +`Decoder` and `HttpMessageReader` implementations that buffer some or all of the input +stream can be configured with a limit on the maximum number of bytes to buffer in memory. +In some cases buffering occurs because input is aggregated and represented as a single +object — for example, a controller method with `@RequestBody byte[]`,`x-www-form-urlencoded` data, and so on. Buffering can also occur with streaming, when +splitting the input stream — for example, delimited text, a stream of JSON objects, and +so on. For those streaming cases, the limit applies to the number of bytes associated +with one object in the stream. + +To configure buffer sizes, you can check if a given `Decoder` or `HttpMessageReader`exposes a `maxInMemorySize` property and if so the Javadoc will have details about default +values. On the server side, `ServerCodecConfigurer` provides a single place from where to +set all codecs, see [HTTP message codecs](#webflux-config-message-codecs). On the client side, the limit for +all codecs can be changed in[WebClient.Builder](#webflux-client-builder-maxinmemorysize). + +For [Multipart parsing](#webflux-codecs-multipart) the `maxInMemorySize` property limits +the size of non-file parts. For file parts, it determines the threshold at which the part +is written to disk. For file parts written to disk, there is an additional`maxDiskUsagePerPart` property to limit the amount of disk space per part. There is also +a `maxParts` property to limit the overall number of parts in a multipart request. +To configure all three in WebFlux, you’ll need to supply a pre-configured instance of`MultipartHttpMessageReader` to `ServerCodecConfigurer`. + +##### Streaming + +[Web MVC](web.html#mvc-ann-async-http-streaming) + +When streaming to the HTTP response (for example, `text/event-stream`,`application/x-ndjson`), it is important to send data periodically, in order to +reliably detect a disconnected client sooner rather than later. Such a send could be a +comment-only, empty SSE event or any other "no-op" data that would effectively serve as +a heartbeat. + +##### `DataBuffer` + +`DataBuffer` is the representation for a byte buffer in WebFlux. The Spring Core part of +this reference has more on that in the section on[Data Buffers and Codecs](core.html#databuffers). The key point to understand is that on some +servers like Netty, byte buffers are pooled and reference counted, and must be released +when consumed to avoid memory leaks. + +WebFlux applications generally do not need to be concerned with such issues, unless they +consume or produce data buffers directly, as opposed to relying on codecs to convert to +and from higher level objects, or unless they choose to create custom codecs. For such +cases please review the information in [Data Buffers and Codecs](core.html#databuffers), +especially the section on [Using DataBuffer](core.html#databuffers-using). + +#### 1.2.6. Logging + +[Web MVC](web.html#mvc-logging) + +`DEBUG` level logging in Spring WebFlux is designed to be compact, minimal, and +human-friendly. It focuses on high value bits of information that are useful over and +over again vs others that are useful only when debugging a specific issue. + +`TRACE` level logging generally follows the same principles as `DEBUG` (and for example also +should not be a firehose) but can be used for debugging any issue. In addition, some log +messages may show a different level of detail at `TRACE` vs `DEBUG`. + +Good logging comes from the experience of using the logs. If you spot anything that does +not meet the stated goals, please let us know. + +##### Log Id + +In WebFlux, a single request can be run over multiple threads and the thread ID +is not useful for correlating log messages that belong to a specific request. This is why +WebFlux log messages are prefixed with a request-specific ID by default. + +On the server side, the log ID is stored in the `ServerWebExchange` attribute +([`LOG_ID_ATTRIBUTE`](https://docs.spring.io/spring-framework/docs/5.3.16/javadoc-api/org/springframework/web/server/ServerWebExchange.html#LOG_ID_ATTRIBUTE)), +while a fully formatted prefix based on that ID is available from`ServerWebExchange#getLogPrefix()`. On the `WebClient` side, the log ID is stored in the`ClientRequest` attribute +([`LOG_ID_ATTRIBUTE`](https://docs.spring.io/spring-framework/docs/5.3.16/javadoc-api/org/springframework/web/reactive/function/client/ClientRequest.html#LOG_ID_ATTRIBUTE)) +,while a fully formatted prefix is available from `ClientRequest#logPrefix()`. + +##### Sensitive Data + +[Web MVC](web.html#mvc-logging-sensitive-data) + +`DEBUG` and `TRACE` logging can log sensitive information. This is why form parameters and +headers are masked by default and you must explicitly enable their logging in full. + +The following example shows how to do so for server-side requests: + +Java + +``` +@Configuration +@EnableWebFlux +class MyConfig implements WebFluxConfigurer { + + @Override + public void configureHttpMessageCodecs(ServerCodecConfigurer configurer) { + configurer.defaultCodecs().enableLoggingRequestDetails(true); + } +} +``` + +Kotlin + +``` +@Configuration +@EnableWebFlux +class MyConfig : WebFluxConfigurer { + + override fun configureHttpMessageCodecs(configurer: ServerCodecConfigurer) { + configurer.defaultCodecs().enableLoggingRequestDetails(true) + } +} +``` + +The following example shows how to do so for client-side requests: + +Java + +``` +Consumer<ClientCodecConfigurer> consumer = configurer -> + configurer.defaultCodecs().enableLoggingRequestDetails(true); + +WebClient webClient = WebClient.builder() + .exchangeStrategies(strategies -> strategies.codecs(consumer)) + .build(); +``` + +Kotlin + +``` +val consumer: (ClientCodecConfigurer) -> Unit = { configurer -> configurer.defaultCodecs().enableLoggingRequestDetails(true) } + +val webClient = WebClient.builder() + .exchangeStrategies({ strategies -> strategies.codecs(consumer) }) + .build() +``` + +##### Appenders + +Logging libraries such as SLF4J and Log4J 2 provide asynchronous loggers that avoid +blocking. While those have their own drawbacks such as potentially dropping messages +that could not be queued for logging, they are the best available options currently +for use in a reactive, non-blocking application. + +##### Custom codecs + +Applications can register custom codecs for supporting additional media types, +or specific behaviors that are not supported by the default codecs. + +Some configuration options expressed by developers are enforced on default codecs. +Custom codecs might want to get a chance to align with those preferences, +like [enforcing buffering limits](#webflux-codecs-limits)or [logging sensitive data](#webflux-logging-sensitive-data). + +The following example shows how to do so for client-side requests: + +Java + +``` +WebClient webClient = WebClient.builder() + .codecs(configurer -> { + CustomDecoder decoder = new CustomDecoder(); + configurer.customCodecs().registerWithDefaultConfig(decoder); + }) + .build(); +``` + +Kotlin + +``` +val webClient = WebClient.builder() + .codecs({ configurer -> + val decoder = CustomDecoder() + configurer.customCodecs().registerWithDefaultConfig(decoder) + }) + .build() +``` + +### 1.3. `DispatcherHandler` + +[Web MVC](web.html#mvc-servlet) + +Spring WebFlux, similarly to Spring MVC, is designed around the front controller pattern, +where a central `WebHandler`, the `DispatcherHandler`, provides a shared algorithm for +request processing, while actual work is performed by configurable, delegate components. +This model is flexible and supports diverse workflows. + +`DispatcherHandler` discovers the delegate components it needs from Spring configuration. +It is also designed to be a Spring bean itself and implements `ApplicationContextAware`for access to the context in which it runs. If `DispatcherHandler` is declared with a bean +name of `webHandler`, it is, in turn, discovered by[`WebHttpHandlerBuilder`](https://docs.spring.io/spring-framework/docs/5.3.16/javadoc-api/org/springframework/web/server/adapter/WebHttpHandlerBuilder.html), +which puts together a request-processing chain, as described in [`WebHandler` API](#webflux-web-handler-api). + +Spring configuration in a WebFlux application typically contains: + +* `DispatcherHandler` with the bean name `webHandler` + +* `WebFilter` and `WebExceptionHandler` beans + +* [`DispatcherHandler` special beans](#webflux-special-bean-types) + +* Others + +The configuration is given to `WebHttpHandlerBuilder` to build the processing chain, +as the following example shows: + +Java + +``` +ApplicationContext context = ... +HttpHandler handler = WebHttpHandlerBuilder.applicationContext(context).build(); +``` + +Kotlin + +``` +val context: ApplicationContext = ... +val handler = WebHttpHandlerBuilder.applicationContext(context).build() +``` + +The resulting `HttpHandler` is ready for use with a [server adapter](#webflux-httphandler). + +#### 1.3.1. Special Bean Types + +[Web MVC](web.html#mvc-servlet-special-bean-types) + +The `DispatcherHandler` delegates to special beans to process requests and render the +appropriate responses. By “special beans,” we mean Spring-managed `Object` instances that +implement WebFlux framework contracts. Those usually come with built-in contracts, but +you can customize their properties, extend them, or replace them. + +The following table lists the special beans detected by the `DispatcherHandler`. Note that +there are also some other beans detected at a lower level (see[Special bean types](#webflux-web-handler-api-special-beans) in the Web Handler API). + +| Bean type | Explanation | +|----------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| `HandlerMapping` |Map a request to a handler. The mapping is based on some criteria, the details of<br/>which vary by `HandlerMapping` implementation — annotated controllers, simple<br/>URL pattern mappings, and others.<br/><br/> The main `HandlerMapping` implementations are `RequestMappingHandlerMapping` for`@RequestMapping` annotated methods, `RouterFunctionMapping` for functional endpoint<br/>routes, and `SimpleUrlHandlerMapping` for explicit registrations of URI path patterns<br/>and `WebHandler` instances.| +| `HandlerAdapter` | Help the `DispatcherHandler` to invoke a handler mapped to a request regardless of<br/>how the handler is actually invoked. For example, invoking an annotated controller<br/>requires resolving annotations. The main purpose of a `HandlerAdapter` is to shield the`DispatcherHandler` from such details. | +|`HandlerResultHandler`| Process the result from the handler invocation and finalize the response.<br/>See [Result Handling](#webflux-resulthandling). | + +#### 1.3.2. WebFlux Config + +[Web MVC](web.html#mvc-servlet-config) + +Applications can declare the infrastructure beans (listed under[Web Handler API](#webflux-web-handler-api-special-beans) and[`DispatcherHandler`](#webflux-special-bean-types)) that are required to process requests. +However, in most cases, the [WebFlux Config](#webflux-config) is the best starting point. It declares the +required beans and provides a higher-level configuration callback API to customize it. + +| |Spring Boot relies on the WebFlux config to configure Spring WebFlux and also provides<br/>many extra convenient options.| +|---|-------------------------------------------------------------------------------------------------------------------------| + +#### 1.3.3. Processing + +[Web MVC](web.html#mvc-servlet-sequence) + +`DispatcherHandler` processes requests as follows: + +* Each `HandlerMapping` is asked to find a matching handler, and the first match is used. + +* If a handler is found, it is run through an appropriate `HandlerAdapter`, which + exposes the return value from the execution as `HandlerResult`. + +* The `HandlerResult` is given to an appropriate `HandlerResultHandler` to complete + processing by writing to the response directly or by using a view to render. + +#### 1.3.4. Result Handling + +The return value from the invocation of a handler, through a `HandlerAdapter`, is wrapped +as a `HandlerResult`, along with some additional context, and passed to the first`HandlerResultHandler` that claims support for it. The following table shows the available`HandlerResultHandler` implementations, all of which are declared in the [WebFlux Config](#webflux-config): + +| Result Handler Type | Return Values | Default Order | +|-----------------------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|-------------------| +|`ResponseEntityResultHandler`| `ResponseEntity`, typically from `@Controller` instances. | 0 | +|`ServerResponseResultHandler`| `ServerResponse`, typically from functional endpoints. | 0 | +| `ResponseBodyResultHandler` | Handle return values from `@ResponseBody` methods or `@RestController` classes. | 100 | +|`ViewResolutionResultHandler`|`CharSequence`, [`View`](https://docs.spring.io/spring-framework/docs/5.3.16/javadoc-api/org/springframework/web/reactive/result/view/View.html),[Model](https://docs.spring.io/spring-framework/docs/5.3.16/javadoc-api/org/springframework/ui/Model.html), `Map`,[Rendering](https://docs.spring.io/spring-framework/docs/5.3.16/javadoc-api/org/springframework/web/reactive/result/view/Rendering.html),<br/>or any other `Object` is treated as a model attribute.<br/><br/> See also [View Resolution](#webflux-viewresolution).|`Integer.MAX_VALUE`| + +#### 1.3.5. Exceptions + +[Web MVC](web.html#mvc-exceptionhandlers) + +The `HandlerResult` returned from a `HandlerAdapter` can expose a function for error +handling based on some handler-specific mechanism. This error function is called if: + +* The handler (for example, `@Controller`) invocation fails. + +* The handling of the handler return value through a `HandlerResultHandler` fails. + +The error function can change the response (for example, to an error status), as long as an error +signal occurs before the reactive type returned from the handler produces any data items. + +This is how `@ExceptionHandler` methods in `@Controller` classes are supported. +By contrast, support for the same in Spring MVC is built on a `HandlerExceptionResolver`. +This generally should not matter. However, keep in mind that, in WebFlux, you cannot use a`@ControllerAdvice` to handle exceptions that occur before a handler is chosen. + +See also [Managing Exceptions](#webflux-ann-controller-exceptions) in the “Annotated Controller” section or[Exceptions](#webflux-exception-handler) in the WebHandler API section. + +#### 1.3.6. View Resolution + +[Web MVC](web.html#mvc-viewresolver) + +View resolution enables rendering to a browser with an HTML template and a model without +tying you to a specific view technology. In Spring WebFlux, view resolution is +supported through a dedicated [HandlerResultHandler](#webflux-resulthandling) that uses`ViewResolver` instances to map a String (representing a logical view name) to a `View`instance. The `View` is then used to render the response. + +##### Handling + +[Web MVC](web.html#mvc-handling) + +The `HandlerResult` passed into `ViewResolutionResultHandler` contains the return value +from the handler and the model that contains attributes added during request +handling. The return value is processed as one of the following: + +* `String`, `CharSequence`: A logical view name to be resolved to a `View` through + the list of configured `ViewResolver` implementations. + +* `void`: Select a default view name based on the request path, minus the leading and + trailing slash, and resolve it to a `View`. The same also happens when a view name + was not provided (for example, model attribute was returned) or an async return value + (for example, `Mono` completed empty). + +* [Rendering](https://docs.spring.io/spring-framework/docs/5.3.16/javadoc-api/org/springframework/web/reactive/result/view/Rendering.html): API for + view resolution scenarios. Explore the options in your IDE with code completion. + +* `Model`, `Map`: Extra model attributes to be added to the model for the request. + +* Any other: Any other return value (except for simple types, as determined by[BeanUtils#isSimpleProperty](https://docs.spring.io/spring-framework/docs/5.3.16/javadoc-api/org/springframework/beans/BeanUtils.html#isSimpleProperty-java.lang.Class-)) + is treated as a model attribute to be added to the model. The attribute name is derived + from the class name by using [conventions](https://docs.spring.io/spring-framework/docs/5.3.16/javadoc-api/org/springframework/core/Conventions.html), + unless a handler method `@ModelAttribute` annotation is present. + +The model can contain asynchronous, reactive types (for example, from Reactor or RxJava). Prior +to rendering, `AbstractView` resolves such model attributes into concrete values +and updates the model. Single-value reactive types are resolved to a single +value or no value (if empty), while multi-value reactive types (for example, `Flux<T>`) are +collected and resolved to `List<T>`. + +To configure view resolution is as simple as adding a `ViewResolutionResultHandler` bean +to your Spring configuration. [WebFlux Config](#webflux-config-view-resolvers) provides a +dedicated configuration API for view resolution. + +See [View Technologies](#webflux-view) for more on the view technologies integrated with Spring WebFlux. + +##### Redirecting + +[Web MVC](web.html#mvc-redirecting-redirect-prefix) + +The special `redirect:` prefix in a view name lets you perform a redirect. The`UrlBasedViewResolver` (and sub-classes) recognize this as an instruction that a +redirect is needed. The rest of the view name is the redirect URL. + +The net effect is the same as if the controller had returned a `RedirectView` or`Rendering.redirectTo("abc").build()`, but now the controller itself can +operate in terms of logical view names. A view name such as`redirect:/some/resource` is relative to the current application, while a view name such as`redirect:https://example.com/arbitrary/path` redirects to an absolute URL. + +##### Content Negotiation + +[Web MVC](web.html#mvc-multiple-representations) + +`ViewResolutionResultHandler` supports content negotiation. It compares the request +media types with the media types supported by each selected `View`. The first `View`that supports the requested media type(s) is used. + +In order to support media types such as JSON and XML, Spring WebFlux provides`HttpMessageWriterView`, which is a special `View` that renders through an[HttpMessageWriter](#webflux-codecs). Typically, you would configure these as default +views through the [WebFlux Configuration](#webflux-config-view-resolvers). Default views are +always selected and used if they match the requested media type. + +### 1.4. Annotated Controllers + +[Web MVC](web.html#mvc-controller) + +Spring WebFlux provides an annotation-based programming model, where `@Controller` and`@RestController` components use annotations to express request mappings, request input, +handle exceptions, and more. Annotated controllers have flexible method signatures and +do not have to extend base classes nor implement specific interfaces. + +The following listing shows a basic example: + +Java + +``` +@RestController +public class HelloController { + + @GetMapping("/hello") + public String handle() { + return "Hello WebFlux"; + } +} +``` + +Kotlin + +``` +@RestController +class HelloController { + + @GetMapping("/hello") + fun handle() = "Hello WebFlux" +} +``` + +In the preceding example, the method returns a `String` to be written to the response body. + +#### 1.4.1. `@Controller` + +[Web MVC](web.html#mvc-ann-controller) + +You can define controller beans by using a standard Spring bean definition. +The `@Controller` stereotype allows for auto-detection and is aligned with Spring general support +for detecting `@Component` classes in the classpath and auto-registering bean definitions +for them. It also acts as a stereotype for the annotated class, indicating its role as +a web component. + +To enable auto-detection of such `@Controller` beans, you can add component scanning to +your Java configuration, as the following example shows: + +Java + +``` +@Configuration +@ComponentScan("org.example.web") (1) +public class WebConfig { + + // ... +} +``` + +|**1**|Scan the `org.example.web` package.| +|-----|-----------------------------------| + +Kotlin + +``` +@Configuration +@ComponentScan("org.example.web") (1) +class WebConfig { + + // ... +} +``` + +|**1**|Scan the `org.example.web` package.| +|-----|-----------------------------------| + +`@RestController` is a [composed annotation](core.html#beans-meta-annotations) that is +itself meta-annotated with `@Controller` and `@ResponseBody`, indicating a controller whose +every method inherits the type-level `@ResponseBody` annotation and, therefore, writes +directly to the response body versus view resolution and rendering with an HTML template. + +#### 1.4.2. Request Mapping + +[Web MVC](web.html#mvc-ann-requestmapping) + +The `@RequestMapping` annotation is used to map requests to controllers methods. It has +various attributes to match by URL, HTTP method, request parameters, headers, and media +types. You can use it at the class level to express shared mappings or at the method level +to narrow down to a specific endpoint mapping. + +There are also HTTP method specific shortcut variants of `@RequestMapping`: + +* `@GetMapping` + +* `@PostMapping` + +* `@PutMapping` + +* `@DeleteMapping` + +* `@PatchMapping` + +The preceding annotations are [Custom Annotations](#webflux-ann-requestmapping-composed) that are provided +because, arguably, most controller methods should be mapped to a specific HTTP method versus +using `@RequestMapping`, which, by default, matches to all HTTP methods. At the same time, a`@RequestMapping` is still needed at the class level to express shared mappings. + +The following example uses type and method level mappings: + +Java + +``` +@RestController +@RequestMapping("/persons") +class PersonController { + + @GetMapping("/{id}") + public Person getPerson(@PathVariable Long id) { + // ... + } + + @PostMapping + @ResponseStatus(HttpStatus.CREATED) + public void add(@RequestBody Person person) { + // ... + } +} +``` + +Kotlin + +``` +@RestController +@RequestMapping("/persons") +class PersonController { + + @GetMapping("/{id}") + fun getPerson(@PathVariable id: Long): Person { + // ... + } + + @PostMapping + @ResponseStatus(HttpStatus.CREATED) + fun add(@RequestBody person: Person) { + // ... + } +} +``` + +##### URI Patterns + +[Web MVC](web.html#mvc-ann-requestmapping-uri-templates) + +You can map requests by using glob patterns and wildcards: + +| Pattern | Description | Example | +|---------------|-------------------------------------------------------------------------------------------------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| `?` | Matches one character | `"/pages/t?st.html"` matches `"/pages/test.html"` and `"/pages/t3st.html"` | +| `*` | Matches zero or more characters within a path segment |`"/resources/*.png"` matches `"/resources/file.png"`<br/><br/>`"/projects/*/versions"` matches `"/projects/spring/versions"` but does not match `"/projects/spring/boot/versions"` | +| `**` | Matches zero or more path segments until the end of the path |`"/resources/**"` matches `"/resources/file.png"` and `"/resources/images/file.png"`<br/><br/>`"/resources/**/file.png"` is invalid as `**` is only allowed at the end of the path.| +| `{name}` | Matches a path segment and captures it as a variable named "name" | `"/projects/{project}/versions"` matches `"/projects/spring/versions"` and captures `project=spring` | +|`{name:[a-z]+}`| Matches the regexp `"[a-z]+"` as a path variable named "name" | `"/projects/{project:[a-z]+}/versions"` matches `"/projects/spring/versions"` but not `"/projects/spring1/versions"` | +| `{*path}` |Matches zero or more path segments until the end of the path and captures it as a variable named "path"| `"/resources/{*file}"` matches `"/resources/images/file.png"` and captures `file=/images/file.png` | + +Captured URI variables can be accessed with `@PathVariable`, as the following example shows: + +Java + +``` +@GetMapping("/owners/{ownerId}/pets/{petId}") +public Pet findPet(@PathVariable Long ownerId, @PathVariable Long petId) { + // ... +} +``` + +Kotlin + +``` +@GetMapping("/owners/{ownerId}/pets/{petId}") +fun findPet(@PathVariable ownerId: Long, @PathVariable petId: Long): Pet { + // ... +} +``` + +You can declare URI variables at the class and method levels, as the following example shows: + +Java + +``` +@Controller +@RequestMapping("/owners/{ownerId}") (1) +public class OwnerController { + + @GetMapping("/pets/{petId}") (2) + public Pet findPet(@PathVariable Long ownerId, @PathVariable Long petId) { + // ... + } +} +``` + +|**1**|Class-level URI mapping. | +|-----|-------------------------| +|**2**|Method-level URI mapping.| + +Kotlin + +``` +@Controller +@RequestMapping("/owners/{ownerId}") (1) +class OwnerController { + + @GetMapping("/pets/{petId}") (2) + fun findPet(@PathVariable ownerId: Long, @PathVariable petId: Long): Pet { + // ... + } +} +``` + +|**1**|Class-level URI mapping. | +|-----|-------------------------| +|**2**|Method-level URI mapping.| + +URI variables are automatically converted to the appropriate type or a `TypeMismatchException`is raised. Simple types (`int`, `long`, `Date`, and so on) are supported by default and you can +register support for any other data type. +See [Type Conversion](#webflux-ann-typeconversion) and [`DataBinder`](#webflux-ann-initbinder). + +URI variables can be named explicitly (for example, `@PathVariable("customId")`), but you can +leave that detail out if the names are the same and you compile your code with debugging +information or with the `-parameters` compiler flag on Java 8. + +The syntax `{*varName}` declares a URI variable that matches zero or more remaining path +segments. For example `/resources/{*path}` matches all files under `/resources/`, and the`"path"` variable captures the complete path under `/resources`. + +The syntax `{varName:regex}` declares a URI variable with a regular expression that has the +syntax: `{varName:regex}`. For example, given a URL of `/spring-web-3.0.5.jar`, the following method +extracts the name, version, and file extension: + +Java + +``` +@GetMapping("/{name:[a-z-]+}-{version:\\d\\.\\d\\.\\d}{ext:\\.[a-z]+}") +public void handle(@PathVariable String version, @PathVariable String ext) { + // ... +} +``` + +Kotlin + +``` +@GetMapping("/{name:[a-z-]+}-{version:\\d\\.\\d\\.\\d}{ext:\\.[a-z]+}") +fun handle(@PathVariable version: String, @PathVariable ext: String) { + // ... +} +``` + +URI path patterns can also have embedded `${…​}` placeholders that are resolved on startup +through `PropertyPlaceHolderConfigurer` against local, system, environment, and other property +sources. You ca use this to, for example, parameterize a base URL based on some external +configuration. + +| |Spring WebFlux uses `PathPattern` and the `PathPatternParser` for URI path matching support.<br/>Both classes are located in `spring-web` and are expressly designed for use with HTTP URL<br/>paths in web applications where a large number of URI path patterns are matched at runtime.| +|---|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +Spring WebFlux does not support suffix pattern matching — unlike Spring MVC, where a +mapping such as `/person` also matches to `/person.*`. For URL-based content +negotiation, if needed, we recommend using a query parameter, which is simpler, more +explicit, and less vulnerable to URL path based exploits. + +##### Pattern Comparison + +[Web MVC](web.html#mvc-ann-requestmapping-pattern-comparison) + +When multiple patterns match a URL, they must be compared to find the best match. This is done +with `PathPattern.SPECIFICITY_COMPARATOR`, which looks for patterns that are more specific. + +For every pattern, a score is computed, based on the number of URI variables and wildcards, +where a URI variable scores lower than a wildcard. A pattern with a lower total score +wins. If two patterns have the same score, the longer is chosen. + +Catch-all patterns (for example, `**`, `{*varName}`) are excluded from the scoring and are always +sorted last instead. If two patterns are both catch-all, the longer is chosen. + +##### Consumable Media Types + +[Web MVC](web.html#mvc-ann-requestmapping-consumes) + +You can narrow the request mapping based on the `Content-Type` of the request, +as the following example shows: + +Java + +``` +@PostMapping(path = "/pets", consumes = "application/json") +public void addPet(@RequestBody Pet pet) { + // ... +} +``` + +Kotlin + +``` +@PostMapping("/pets", consumes = ["application/json"]) +fun addPet(@RequestBody pet: Pet) { + // ... +} +``` + +The consumes attribute also supports negation expressions — for example, `!text/plain` means any +content type other than `text/plain`. + +You can declare a shared `consumes` attribute at the class level. Unlike most other request +mapping attributes, however, when used at the class level, a method-level `consumes` attribute +overrides rather than extends the class-level declaration. + +| |`MediaType` provides constants for commonly used media types — for example,`APPLICATION_JSON_VALUE` and `APPLICATION_XML_VALUE`.| +|---|--------------------------------------------------------------------------------------------------------------------------------| + +##### Producible Media Types + +[Web MVC](web.html#mvc-ann-requestmapping-produces) + +You can narrow the request mapping based on the `Accept` request header and the list of +content types that a controller method produces, as the following example shows: + +Java + +``` +@GetMapping(path = "/pets/{petId}", produces = "application/json") +@ResponseBody +public Pet getPet(@PathVariable String petId) { + // ... +} +``` + +Kotlin + +``` +@GetMapping("/pets/{petId}", produces = ["application/json"]) +@ResponseBody +fun getPet(@PathVariable String petId): Pet { + // ... +} +``` + +The media type can specify a character set. Negated expressions are supported — for example,`!text/plain` means any content type other than `text/plain`. + +You can declare a shared `produces` attribute at the class level. Unlike most other request +mapping attributes, however, when used at the class level, a method-level `produces` attribute +overrides rather than extend the class level declaration. + +| |`MediaType` provides constants for commonly used media types — e.g.`APPLICATION_JSON_VALUE`, `APPLICATION_XML_VALUE`.| +|---|---------------------------------------------------------------------------------------------------------------------| + +##### Parameters and Headers + +[Web MVC](web.html#mvc-ann-requestmapping-params-and-headers) + +You can narrow request mappings based on query parameter conditions. You can test for the +presence of a query parameter (`myParam`), for its absence (`!myParam`), or for a +specific value (`myParam=myValue`). The following examples tests for a parameter with a value: + +Java + +``` +@GetMapping(path = "/pets/{petId}", params = "myParam=myValue") (1) +public void findPet(@PathVariable String petId) { + // ... +} +``` + +|**1**|Check that `myParam` equals `myValue`.| +|-----|--------------------------------------| + +Kotlin + +``` +@GetMapping("/pets/{petId}", params = ["myParam=myValue"]) (1) +fun findPet(@PathVariable petId: String) { + // ... +} +``` + +|**1**|Check that `myParam` equals `myValue`.| +|-----|--------------------------------------| + +You can also use the same with request header conditions, as the follwing example shows: + +Java + +``` +@GetMapping(path = "/pets", headers = "myHeader=myValue") (1) +public void findPet(@PathVariable String petId) { + // ... +} +``` + +|**1**|Check that `myHeader` equals `myValue`.| +|-----|---------------------------------------| + +Kotlin + +``` +@GetMapping("/pets", headers = ["myHeader=myValue"]) (1) +fun findPet(@PathVariable petId: String) { + // ... +} +``` + +|**1**|Check that `myHeader` equals `myValue`.| +|-----|---------------------------------------| + +##### HTTP HEAD, OPTIONS + +[Web MVC](web.html#mvc-ann-requestmapping-head-options) + +`@GetMapping` and `@RequestMapping(method=HttpMethod.GET)` support HTTP HEAD +transparently for request mapping purposes. Controller methods need not change. +A response wrapper, applied in the `HttpHandler` server adapter, ensures a `Content-Length`header is set to the number of bytes written without actually writing to the response. + +By default, HTTP OPTIONS is handled by setting the `Allow` response header to the list of HTTP +methods listed in all `@RequestMapping` methods with matching URL patterns. + +For a `@RequestMapping` without HTTP method declarations, the `Allow` header is set to`GET,HEAD,POST,PUT,PATCH,DELETE,OPTIONS`. Controller methods should always declare the +supported HTTP methods (for example, by using the HTTP method specific variants — `@GetMapping`, `@PostMapping`, and others). + +You can explicitly map a `@RequestMapping` method to HTTP HEAD and HTTP OPTIONS, but that +is not necessary in the common case. + +##### Custom Annotations + +[Web MVC](web.html#mvc-ann-requestmapping-composed) + +Spring WebFlux supports the use of [composed annotations](core.html#beans-meta-annotations)for request mapping. Those are annotations that are themselves meta-annotated with`@RequestMapping` and composed to redeclare a subset (or all) of the `@RequestMapping`attributes with a narrower, more specific purpose. + +`@GetMapping`, `@PostMapping`, `@PutMapping`, `@DeleteMapping`, and `@PatchMapping` are +examples of composed annotations. They are provided, because, arguably, most +controller methods should be mapped to a specific HTTP method versus using `@RequestMapping`, +which, by default, matches to all HTTP methods. If you need an example of composed +annotations, look at how those are declared. + +Spring WebFlux also supports custom request mapping attributes with custom request matching +logic. This is a more advanced option that requires sub-classing`RequestMappingHandlerMapping` and overriding the `getCustomMethodCondition` method, where +you can check the custom attribute and return your own `RequestCondition`. + +##### Explicit Registrations + +[Web MVC](web.html#mvc-ann-requestmapping-registration) + +You can programmatically register Handler methods, which can be used for dynamic +registrations or for advanced cases, such as different instances of the same handler +under different URLs. The following example shows how to do so: + +Java + +``` +@Configuration +public class MyConfig { + + @Autowired + public void setHandlerMapping(RequestMappingHandlerMapping mapping, UserHandler handler) (1) + throws NoSuchMethodException { + + RequestMappingInfo info = RequestMappingInfo + .paths("/user/{id}").methods(RequestMethod.GET).build(); (2) + + Method method = UserHandler.class.getMethod("getUser", Long.class); (3) + + mapping.registerMapping(info, handler, method); (4) + } + +} +``` + +|**1**|Inject target handlers and the handler mapping for controllers.| +|-----|---------------------------------------------------------------| +|**2**| Prepare the request mapping metadata. | +|**3**| Get the handler method. | +|**4**| Add the registration. | + +Kotlin + +``` +@Configuration +class MyConfig { + + @Autowired + fun setHandlerMapping(mapping: RequestMappingHandlerMapping, handler: UserHandler) { (1) + + val info = RequestMappingInfo.paths("/user/{id}").methods(RequestMethod.GET).build() (2) + + val method = UserHandler::class.java.getMethod("getUser", Long::class.java) (3) + + mapping.registerMapping(info, handler, method) (4) + } +} +``` + +|**1**|Inject target handlers and the handler mapping for controllers.| +|-----|---------------------------------------------------------------| +|**2**| Prepare the request mapping metadata. | +|**3**| Get the handler method. | +|**4**| Add the registration. | + +#### 1.4.3. Handler Methods + +[Web MVC](web.html#mvc-ann-methods) + +`@RequestMapping` handler methods have a flexible signature and can choose from a range of +supported controller method arguments and return values. + +##### Method Arguments + +[Web MVC](web.html#mvc-ann-arguments) + +The following table shows the supported controller method arguments. + +Reactive types (Reactor, RxJava, [or other](#webflux-reactive-libraries)) are +supported on arguments that require blocking I/O (for example, reading the request body) to +be resolved. This is marked in the Description column. Reactive types are not expected +on arguments that do not require blocking. + +JDK 1.8’s `java.util.Optional` is supported as a method argument in combination with +annotations that have a `required` attribute (for example, `@RequestParam`, `@RequestHeader`, +and others) and is equivalent to `required=false`. + +| Controller method argument | Description | +|---------------------------------------------------------------------------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| `ServerWebExchange` | Access to the full `ServerWebExchange` — container for the HTTP request and response,<br/>request and session attributes, `checkNotModified` methods, and others. | +| `ServerHttpRequest`, `ServerHttpResponse` | Access to the HTTP request or response. | +| `WebSession` | Access to the session. This does not force the start of a new session unless attributes<br/>are added. Supports reactive types. | +| `java.security.Principal` | The currently authenticated user — possibly a specific `Principal` implementation class if known.<br/>Supports reactive types. | +| `org.springframework.http.HttpMethod` | The HTTP method of the request. | +| `java.util.Locale` | The current request locale, determined by the most specific `LocaleResolver` available — in<br/>effect, the configured `LocaleResolver`/`LocaleContextResolver`. | +| `java.util.TimeZone` + `java.time.ZoneId` | The time zone associated with the current request, as determined by a `LocaleContextResolver`. | +| `@PathVariable` | For access to URI template variables. See [URI Patterns](#webflux-ann-requestmapping-uri-templates). | +| `@MatrixVariable` | For access to name-value pairs in URI path segments. See [Matrix Variables](#webflux-ann-matrix-variables). | +| `@RequestParam` | For access to Servlet request parameters. Parameter values are converted to the declared<br/>method argument type. See [`@RequestParam`](#webflux-ann-requestparam).<br/><br/> Note that use of `@RequestParam` is optional — for example, to set its attributes.<br/>See “Any other argument” later in this table. | +| `@RequestHeader` | For access to request headers. Header values are converted to the declared method argument<br/>type. See [`@RequestHeader`](#webflux-ann-requestheader). | +| `@CookieValue` | For access to cookies. Cookie values are converted to the declared method argument type.<br/>See [`@CookieValue`](#webflux-ann-cookievalue). | +| `@RequestBody` | For access to the HTTP request body. Body content is converted to the declared method<br/>argument type by using `HttpMessageReader` instances. Supports reactive types.<br/>See [`@RequestBody`](#webflux-ann-requestbody). | +| `HttpEntity<B>` | For access to request headers and body. The body is converted with `HttpMessageReader` instances.<br/>Supports reactive types. See [`HttpEntity`](#webflux-ann-httpentity). | +| `@RequestPart` | For access to a part in a `multipart/form-data` request. Supports reactive types.<br/>See [Multipart Content](#webflux-multipart-forms) and [Multipart Data](#webflux-multipart). | +|`java.util.Map`, `org.springframework.ui.Model`, and `org.springframework.ui.ModelMap`.| For access to the model that is used in HTML controllers and is exposed to templates as<br/>part of view rendering. | +| `@ModelAttribute` |For access to an existing attribute in the model (instantiated if not present) with<br/>data binding and validation applied. See [`@ModelAttribute`](#webflux-ann-modelattrib-method-args) as well<br/>as [`Model`](#webflux-ann-modelattrib-methods) and [`DataBinder`](#webflux-ann-initbinder).<br/><br/> Note that use of `@ModelAttribute` is optional — for example, to set its attributes.<br/>See “Any other argument” later in this table.| +| `Errors`, `BindingResult` | For access to errors from validation and data binding for a command object, i.e. a`@ModelAttribute` argument. An `Errors`, or `BindingResult` argument must be declared<br/>immediately after the validated method argument. | +| `SessionStatus` + class-level `@SessionAttributes` | For marking form processing complete, which triggers cleanup of session attributes<br/>declared through a class-level `@SessionAttributes` annotation.<br/>See [`@SessionAttributes`](#webflux-ann-sessionattributes) for more details. | +| `UriComponentsBuilder` | For preparing a URL relative to the current request’s host, port, scheme, and<br/>context path. See [URI Links](#webflux-uri-building). | +| `@SessionAttribute` | For access to any session attribute — in contrast to model attributes stored in the session<br/>as a result of a class-level `@SessionAttributes` declaration. See[`@SessionAttribute`](#webflux-ann-sessionattribute) for more details. | +| `@RequestAttribute` | For access to request attributes. See [`@RequestAttribute`](#webflux-ann-requestattrib) for more details. | +| Any other argument | If a method argument is not matched to any of the above, it is, by default, resolved as<br/>a `@RequestParam` if it is a simple type, as determined by[BeanUtils#isSimpleProperty](https://docs.spring.io/spring-framework/docs/5.3.16/javadoc-api/org/springframework/beans/BeanUtils.html#isSimpleProperty-java.lang.Class-),<br/>or as a `@ModelAttribute`, otherwise. | + +##### Return Values + +[Web MVC](web.html#mvc-ann-return-types) + +The following table shows the supported controller method return values. Note that reactive +types from libraries such as Reactor, RxJava, [or other](#webflux-reactive-libraries) are +generally supported for all return values. + +| Controller method return value | Description | +|------------------------------------------------------------------------------|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| `@ResponseBody` | The return value is encoded through `HttpMessageWriter` instances and written to the response.<br/>See [`@ResponseBody`](#webflux-ann-responsebody). | +| `HttpEntity<B>`, `ResponseEntity<B>` | The return value specifies the full response, including HTTP headers, and the body is encoded<br/>through `HttpMessageWriter` instances and written to the response.<br/>See [`ResponseEntity`](#webflux-ann-responseentity). | +| `HttpHeaders` | For returning a response with headers and no body. | +| `String` | A view name to be resolved with `ViewResolver` instances and used together with the implicit<br/>model — determined through command objects and `@ModelAttribute` methods. The handler<br/>method can also programmatically enrich the model by declaring a `Model` argument<br/>(described [earlier](#webflux-viewresolution-handling)). | +| `View` | A `View` instance to use for rendering together with the implicit model — determined<br/>through command objects and `@ModelAttribute` methods. The handler method can also<br/>programmatically enrich the model by declaring a `Model` argument<br/>(described [earlier](#webflux-viewresolution-handling)). | +| `java.util.Map`, `org.springframework.ui.Model` | Attributes to be added to the implicit model, with the view name implicitly determined<br/>based on the request path. | +| `@ModelAttribute` | An attribute to be added to the model, with the view name implicitly determined based<br/>on the request path.<br/><br/> Note that `@ModelAttribute` is optional. See “Any other return value” later in<br/>this table. | +| `Rendering` | An API for model and view rendering scenarios. | +| `void` |A method with a `void`, possibly asynchronous (for example, `Mono<Void>`), return type (or a `null` return<br/>value) is considered to have fully handled the response if it also has a `ServerHttpResponse`,<br/>a `ServerWebExchange` argument, or an `@ResponseStatus` annotation. The same is also true<br/>if the controller has made a positive ETag or `lastModified` timestamp check.<br/>// TODO: See [Controllers](#webflux-caching-etag-lastmodified) for details.<br/><br/> If none of the above is true, a `void` return type can also indicate “no response body” for<br/>REST controllers or default view name selection for HTML controllers.| +|`Flux<ServerSentEvent>`, `Observable<ServerSentEvent>`, or other reactive type| Emit server-sent events. The `ServerSentEvent` wrapper can be omitted when only data needs<br/>to be written (however, `text/event-stream` must be requested or declared in the mapping<br/>through the `produces` attribute). | +| Any other return value | If a return value is not matched to any of the above, it is, by default, treated as a view<br/>name, if it is `String` or `void` (default view name selection applies), or as a model<br/>attribute to be added to the model, unless it is a simple type, as determined by[BeanUtils#isSimpleProperty](https://docs.spring.io/spring-framework/docs/5.3.16/javadoc-api/org/springframework/beans/BeanUtils.html#isSimpleProperty-java.lang.Class-),<br/>in which case it remains unresolved. | + +##### Type Conversion + +[Web MVC](web.html#mvc-ann-typeconversion) + +Some annotated controller method arguments that represent String-based request input (for example,`@RequestParam`, `@RequestHeader`, `@PathVariable`, `@MatrixVariable`, and `@CookieValue`) +can require type conversion if the argument is declared as something other than `String`. + +For such cases, type conversion is automatically applied based on the configured converters. +By default, simple types (such as `int`, `long`, `Date`, and others) are supported. Type conversion +can be customized through a `WebDataBinder` (see [`DataBinder`](#webflux-ann-initbinder)) or by registering`Formatters` with the `FormattingConversionService` (see [Spring Field Formatting](core.html#format)). + +A practical issue in type conversion is the treatment of an empty String source value. +Such a value is treated as missing if it becomes `null` as a result of type conversion. +This can be the case for `Long`, `UUID`, and other target types. If you want to allow `null`to be injected, either use the `required` flag on the argument annotation, or declare the +argument as `@Nullable`. + +##### Matrix Variables + +[Web MVC](web.html#mvc-ann-matrix-variables) + +[RFC 3986](https://tools.ietf.org/html/rfc3986#section-3.3) discusses name-value pairs in +path segments. In Spring WebFlux, we refer to those as “matrix variables” based on an[“old post”](https://www.w3.org/DesignIssues/MatrixURIs.html) by Tim Berners-Lee, but they +can be also be referred to as URI path parameters. + +Matrix variables can appear in any path segment, with each variable separated by a semicolon and +multiple values separated by commas — for example, `"/cars;color=red,green;year=2012"`. Multiple +values can also be specified through repeated variable names — for example,`"color=red;color=green;color=blue"`. + +Unlike Spring MVC, in WebFlux, the presence or absence of matrix variables in a URL does +not affect request mappings. In other words, you are not required to use a URI variable +to mask variable content. That said, if you want to access matrix variables from a +controller method, you need to add a URI variable to the path segment where matrix +variables are expected. The following example shows how to do so: + +Java + +``` +// GET /pets/42;q=11;r=22 + +@GetMapping("/pets/{petId}") +public void findPet(@PathVariable String petId, @MatrixVariable int q) { + + // petId == 42 + // q == 11 +} +``` + +Kotlin + +``` +// GET /pets/42;q=11;r=22 + +@GetMapping("/pets/{petId}") +fun findPet(@PathVariable petId: String, @MatrixVariable q: Int) { + + // petId == 42 + // q == 11 +} +``` + +Given that all path segments can contain matrix variables, you may sometimes need to +disambiguate which path variable the matrix variable is expected to be in, +as the following example shows: + +Java + +``` +// GET /owners/42;q=11/pets/21;q=22 + +@GetMapping("/owners/{ownerId}/pets/{petId}") +public void findPet( + @MatrixVariable(name="q", pathVar="ownerId") int q1, + @MatrixVariable(name="q", pathVar="petId") int q2) { + + // q1 == 11 + // q2 == 22 +} +``` + +Kotlin + +``` +@GetMapping("/owners/{ownerId}/pets/{petId}") +fun findPet( + @MatrixVariable(name = "q", pathVar = "ownerId") q1: Int, + @MatrixVariable(name = "q", pathVar = "petId") q2: Int) { + + // q1 == 11 + // q2 == 22 +} +``` + +You can define a matrix variable may be defined as optional and specify a default value +as the following example shows: + +Java + +``` +// GET /pets/42 + +@GetMapping("/pets/{petId}") +public void findPet(@MatrixVariable(required=false, defaultValue="1") int q) { + + // q == 1 +} +``` + +Kotlin + +``` +// GET /pets/42 + +@GetMapping("/pets/{petId}") +fun findPet(@MatrixVariable(required = false, defaultValue = "1") q: Int) { + + // q == 1 +} +``` + +To get all matrix variables, use a `MultiValueMap`, as the following example shows: + +Java + +``` +// GET /owners/42;q=11;r=12/pets/21;q=22;s=23 + +@GetMapping("/owners/{ownerId}/pets/{petId}") +public void findPet( + @MatrixVariable MultiValueMap<String, String> matrixVars, + @MatrixVariable(pathVar="petId") MultiValueMap<String, String> petMatrixVars) { + + // matrixVars: ["q" : [11,22], "r" : 12, "s" : 23] + // petMatrixVars: ["q" : 22, "s" : 23] +} +``` + +Kotlin + +``` +// GET /owners/42;q=11;r=12/pets/21;q=22;s=23 + +@GetMapping("/owners/{ownerId}/pets/{petId}") +fun findPet( + @MatrixVariable matrixVars: MultiValueMap<String, String>, + @MatrixVariable(pathVar="petId") petMatrixVars: MultiValueMap<String, String>) { + + // matrixVars: ["q" : [11,22], "r" : 12, "s" : 23] + // petMatrixVars: ["q" : 22, "s" : 23] +} +``` + +##### `@RequestParam` + +[Web MVC](web.html#mvc-ann-requestparam) + +You can use the `@RequestParam` annotation to bind query parameters to a method argument in a +controller. The following code snippet shows the usage: + +Java + +``` +@Controller +@RequestMapping("/pets") +public class EditPetForm { + + // ... + + @GetMapping + public String setupForm(@RequestParam("petId") int petId, Model model) { (1) + Pet pet = this.clinic.loadPet(petId); + model.addAttribute("pet", pet); + return "petForm"; + } + + // ... +} +``` + +|**1**|Using `@RequestParam`.| +|-----|----------------------| + +Kotlin + +``` +import org.springframework.ui.set + +@Controller +@RequestMapping("/pets") +class EditPetForm { + + // ... + + @GetMapping + fun setupForm(@RequestParam("petId") petId: Int, model: Model): String { (1) + val pet = clinic.loadPet(petId) + model["pet"] = pet + return "petForm" + } + + // ... +} +``` + +|**1**|Using `@RequestParam`.| +|-----|----------------------| + +| |The Servlet API “request parameter” concept conflates query parameters, form<br/>data, and multiparts into one. However, in WebFlux, each is accessed individually through`ServerWebExchange`. While `@RequestParam` binds to query parameters only, you can use<br/>data binding to apply query parameters, form data, and multiparts to a[command object](#webflux-ann-modelattrib-method-args).| +|---|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +Method parameters that use the `@RequestParam` annotation are required by default, but +you can specify that a method parameter is optional by setting the required flag of a `@RequestParam`to `false` or by declaring the argument with a `java.util.Optional`wrapper. + +Type conversion is applied automatically if the target method parameter type is not`String`. See [Type Conversion](#webflux-ann-typeconversion). + +When a `@RequestParam` annotation is declared on a `Map<String, String>` or`MultiValueMap<String, String>` argument, the map is populated with all query parameters. + +Note that use of `@RequestParam` is optional — for example, to set its attributes. By +default, any argument that is a simple value type (as determined by[BeanUtils#isSimpleProperty](https://docs.spring.io/spring-framework/docs/5.3.16/javadoc-api/org/springframework/beans/BeanUtils.html#isSimpleProperty-java.lang.Class-)) +and is not resolved by any other argument resolver is treated as if it were annotated +with `@RequestParam`. + +##### `@RequestHeader` + +[Web MVC](web.html#mvc-ann-requestheader) + +You can use the `@RequestHeader` annotation to bind a request header to a method argument in a +controller. + +The following example shows a request with headers: + +``` +Host localhost:8080 +Accept text/html,application/xhtml+xml,application/xml;q=0.9 +Accept-Language fr,en-gb;q=0.7,en;q=0.3 +Accept-Encoding gzip,deflate +Accept-Charset ISO-8859-1,utf-8;q=0.7,*;q=0.7 +Keep-Alive 300 +``` + +The following example gets the value of the `Accept-Encoding` and `Keep-Alive` headers: + +Java + +``` +@GetMapping("/demo") +public void handle( + @RequestHeader("Accept-Encoding") String encoding, (1) + @RequestHeader("Keep-Alive") long keepAlive) { (2) + //... +} +``` + +|**1**|Get the value of the `Accept-Encoging` header.| +|-----|----------------------------------------------| +|**2**| Get the value of the `Keep-Alive` header. | + +Kotlin + +``` +@GetMapping("/demo") +fun handle( + @RequestHeader("Accept-Encoding") encoding: String, (1) + @RequestHeader("Keep-Alive") keepAlive: Long) { (2) + //... +} +``` + +|**1**|Get the value of the `Accept-Encoging` header.| +|-----|----------------------------------------------| +|**2**| Get the value of the `Keep-Alive` header. | + +Type conversion is applied automatically if the target method parameter type is not`String`. See [Type Conversion](#webflux-ann-typeconversion). + +When a `@RequestHeader` annotation is used on a `Map<String, String>`,`MultiValueMap<String, String>`, or `HttpHeaders` argument, the map is populated +with all header values. + +| |Built-in support is available for converting a comma-separated string into an<br/>array or collection of strings or other types known to the type conversion system. For<br/>example, a method parameter annotated with `@RequestHeader("Accept")` may be of type`String` but also of `String[]` or `List<String>`.| +|---|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +##### `@CookieValue` + +[Web MVC](web.html#mvc-ann-cookievalue) + +You can use the `@CookieValue` annotation to bind the value of an HTTP cookie to a method argument +in a controller. + +The following example shows a request with a cookie: + +``` +JSESSIONID=415A4AC178C59DACE0B2C9CA727CDD84 +``` + +The following code sample demonstrates how to get the cookie value: + +Java + +``` +@GetMapping("/demo") +public void handle(@CookieValue("JSESSIONID") String cookie) { (1) + //... +} +``` + +|**1**|Get the cookie value.| +|-----|---------------------| + +Kotlin + +``` +@GetMapping("/demo") +fun handle(@CookieValue("JSESSIONID") cookie: String) { (1) + //... +} +``` + +|**1**|Get the cookie value.| +|-----|---------------------| + +Type conversion is applied automatically if the target method parameter type is not`String`. See [Type Conversion](#webflux-ann-typeconversion). + +##### `@ModelAttribute` + +[Web MVC](web.html#mvc-ann-modelattrib-method-args) + +You can use the `@ModelAttribute` annotation on a method argument to access an attribute from the +model or have it instantiated if not present. The model attribute is also overlaid with +the values of query parameters and form fields whose names match to field names. This is +referred to as data binding, and it saves you from having to deal with parsing and +converting individual query parameters and form fields. The following example binds an instance of `Pet`: + +Java + +``` +@PostMapping("/owners/{ownerId}/pets/{petId}/edit") +public String processSubmit(@ModelAttribute Pet pet) { } (1) +``` + +|**1**|Bind an instance of `Pet`.| +|-----|--------------------------| + +Kotlin + +``` +@PostMapping("/owners/{ownerId}/pets/{petId}/edit") +fun processSubmit(@ModelAttribute pet: Pet): String { } (1) +``` + +|**1**|Bind an instance of `Pet`.| +|-----|--------------------------| + +The `Pet` instance in the preceding example is resolved as follows: + +* From the model if already added through [`Model`](#webflux-ann-modelattrib-methods). + +* From the HTTP session through [`@SessionAttributes`](#webflux-ann-sessionattributes). + +* From the invocation of a default constructor. + +* From the invocation of a “primary constructor” with arguments that match query + parameters or form fields. Argument names are determined through JavaBeans`@ConstructorProperties` or through runtime-retained parameter names in the bytecode. + +After the model attribute instance is obtained, data binding is applied. The`WebExchangeDataBinder` class matches names of query parameters and form fields to field +names on the target `Object`. Matching fields are populated after type conversion is applied +where necessary. For more on data binding (and validation), see[Validation](core.html#validation). For more on customizing data binding, see[`DataBinder`](#webflux-ann-initbinder). + +Data binding can result in errors. By default, a `WebExchangeBindException` is raised, but, +to check for such errors in the controller method, you can add a `BindingResult` argument +immediately next to the `@ModelAttribute`, as the following example shows: + +Java + +``` +@PostMapping("/owners/{ownerId}/pets/{petId}/edit") +public String processSubmit(@ModelAttribute("pet") Pet pet, BindingResult result) { (1) + if (result.hasErrors()) { + return "petForm"; + } + // ... +} +``` + +|**1**|Adding a `BindingResult`.| +|-----|-------------------------| + +Kotlin + +``` +@PostMapping("/owners/{ownerId}/pets/{petId}/edit") +fun processSubmit(@ModelAttribute("pet") pet: Pet, result: BindingResult): String { (1) + if (result.hasErrors()) { + return "petForm" + } + // ... +} +``` + +|**1**|Adding a `BindingResult`.| +|-----|-------------------------| + +You can automatically apply validation after data binding by adding the`javax.validation.Valid` annotation or Spring’s `@Validated` annotation (see also[Bean Validation](core.html#validation-beanvalidation) and[Spring validation](core.html#validation)). The following example uses the `@Valid` annotation: + +Java + +``` +@PostMapping("/owners/{ownerId}/pets/{petId}/edit") +public String processSubmit(@Valid @ModelAttribute("pet") Pet pet, BindingResult result) { (1) + if (result.hasErrors()) { + return "petForm"; + } + // ... +} +``` + +|**1**|Using `@Valid` on a model attribute argument.| +|-----|---------------------------------------------| + +Kotlin + +``` +@PostMapping("/owners/{ownerId}/pets/{petId}/edit") +fun processSubmit(@Valid @ModelAttribute("pet") pet: Pet, result: BindingResult): String { (1) + if (result.hasErrors()) { + return "petForm" + } + // ... +} +``` + +|**1**|Using `@Valid` on a model attribute argument.| +|-----|---------------------------------------------| + +Spring WebFlux, unlike Spring MVC, supports reactive types in the model — for example,`Mono<Account>` or `io.reactivex.Single<Account>`. You can declare a `@ModelAttribute` argument +with or without a reactive type wrapper, and it will be resolved accordingly, +to the actual value if necessary. However, note that, to use a `BindingResult`argument, you must declare the `@ModelAttribute` argument before it without a reactive +type wrapper, as shown earlier. Alternatively, you can handle any errors through the +reactive type, as the following example shows: + +Java + +``` +@PostMapping("/owners/{ownerId}/pets/{petId}/edit") +public Mono<String> processSubmit(@Valid @ModelAttribute("pet") Mono<Pet> petMono) { + return petMono + .flatMap(pet -> { + // ... + }) + .onErrorResume(ex -> { + // ... + }); +} +``` + +Kotlin + +``` +@PostMapping("/owners/{ownerId}/pets/{petId}/edit") +fun processSubmit(@Valid @ModelAttribute("pet") petMono: Mono<Pet>): Mono<String> { + return petMono + .flatMap { pet -> + // ... + } + .onErrorResume{ ex -> + // ... + } +} +``` + +Note that use of `@ModelAttribute` is optional — for example, to set its attributes. +By default, any argument that is not a simple value type( as determined by[BeanUtils#isSimpleProperty](https://docs.spring.io/spring-framework/docs/5.3.16/javadoc-api/org/springframework/beans/BeanUtils.html#isSimpleProperty-java.lang.Class-)) +and is not resolved by any other argument resolver is treated as if it were annotated +with `@ModelAttribute`. + +##### `@SessionAttributes` + +[Web MVC](web.html#mvc-ann-sessionattributes) + +`@SessionAttributes` is used to store model attributes in the `WebSession` between +requests. It is a type-level annotation that declares session attributes used by a +specific controller. This typically lists the names of model attributes or types of +model attributes that should be transparently stored in the session for subsequent +requests to access. + +Consider the following example: + +Java + +``` +@Controller +@SessionAttributes("pet") (1) +public class EditPetForm { + // ... +} +``` + +|**1**|Using the `@SessionAttributes` annotation.| +|-----|------------------------------------------| + +Kotlin + +``` +@Controller +@SessionAttributes("pet") (1) +class EditPetForm { + // ... +} +``` + +|**1**|Using the `@SessionAttributes` annotation.| +|-----|------------------------------------------| + +On the first request, when a model attribute with the name, `pet`, is added to the model, +it is automatically promoted to and saved in the `WebSession`. It remains there until +another controller method uses a `SessionStatus` method argument to clear the storage, +as the following example shows: + +Java + +``` +@Controller +@SessionAttributes("pet") (1) +public class EditPetForm { + + // ... + + @PostMapping("/pets/{id}") + public String handle(Pet pet, BindingResult errors, SessionStatus status) { (2) + if (errors.hasErrors()) { + // ... + } + status.setComplete(); + // ... + } + } +} +``` + +|**1**|Using the `@SessionAttributes` annotation.| +|-----|------------------------------------------| +|**2**| Using a `SessionStatus` variable. | + +Kotlin + +``` +@Controller +@SessionAttributes("pet") (1) +class EditPetForm { + + // ... + + @PostMapping("/pets/{id}") + fun handle(pet: Pet, errors: BindingResult, status: SessionStatus): String { (2) + if (errors.hasErrors()) { + // ... + } + status.setComplete() + // ... + } +} +``` + +|**1**|Using the `@SessionAttributes` annotation.| +|-----|------------------------------------------| +|**2**| Using a `SessionStatus` variable. | + +##### `@SessionAttribute` + +[Web MVC](web.html#mvc-ann-sessionattribute) + +If you need access to pre-existing session attributes that are managed globally +(that is, outside the controller — for example, by a filter) and may or may not be present, +you can use the `@SessionAttribute` annotation on a method parameter, as the following example shows: + +Java + +``` +@GetMapping("/") +public String handle(@SessionAttribute User user) { (1) + // ... +} +``` + +|**1**|Using `@SessionAttribute`.| +|-----|--------------------------| + +Kotlin + +``` +@GetMapping("/") +fun handle(@SessionAttribute user: User): String { (1) + // ... +} +``` + +|**1**|Using `@SessionAttribute`.| +|-----|--------------------------| + +For use cases that require adding or removing session attributes, consider injecting`WebSession` into the controller method. + +For temporary storage of model attributes in the session as part of a controller +workflow, consider using `SessionAttributes`, as described in[`@SessionAttributes`](#webflux-ann-sessionattributes). + +##### `@RequestAttribute` + +[Web MVC](web.html#mvc-ann-requestattrib) + +Similarly to `@SessionAttribute`, you can use the `@RequestAttribute` annotation to +access pre-existing request attributes created earlier (for example, by a `WebFilter`), +as the following example shows: + +Java + +``` +@GetMapping("/") +public String handle(@RequestAttribute Client client) { (1) + // ... +} +``` + +|**1**|Using `@RequestAttribute`.| +|-----|--------------------------| + +Kotlin + +``` +@GetMapping("/") +fun handle(@RequestAttribute client: Client): String { (1) + // ... +} +``` + +|**1**|Using `@RequestAttribute`.| +|-----|--------------------------| + +##### Multipart Content + +[Web MVC](web.html#mvc-multipart-forms) + +As explained in [Multipart Data](#webflux-multipart), `ServerWebExchange` provides access to multipart +content. The best way to handle a file upload form (for example, from a browser) in a controller +is through data binding to a [command object](#webflux-ann-modelattrib-method-args), +as the following example shows: + +Java + +``` +class MyForm { + + private String name; + + private MultipartFile file; + + // ... + +} + +@Controller +public class FileUploadController { + + @PostMapping("/form") + public String handleFormUpload(MyForm form, BindingResult errors) { + // ... + } + +} +``` + +Kotlin + +``` +class MyForm( + val name: String, + val file: MultipartFile) + +@Controller +class FileUploadController { + + @PostMapping("/form") + fun handleFormUpload(form: MyForm, errors: BindingResult): String { + // ... + } + +} +``` + +You can also submit multipart requests from non-browser clients in a RESTful service +scenario. The following example uses a file along with JSON: + +``` +POST /someUrl +Content-Type: multipart/mixed + +--edt7Tfrdusa7r3lNQc79vXuhIIMlatb7PQg7Vp +Content-Disposition: form-data; name="meta-data" +Content-Type: application/json; charset=UTF-8 +Content-Transfer-Encoding: 8bit + +{ + "name": "value" +} +--edt7Tfrdusa7r3lNQc79vXuhIIMlatb7PQg7Vp +Content-Disposition: form-data; name="file-data"; filename="file.properties" +Content-Type: text/xml +Content-Transfer-Encoding: 8bit +... File Data ... +``` + +You can access individual parts with `@RequestPart`, as the following example shows: + +Java + +``` +@PostMapping("/") +public String handle(@RequestPart("meta-data") Part metadata, (1) + @RequestPart("file-data") FilePart file) { (2) + // ... +} +``` + +|**1**|Using `@RequestPart` to get the metadata.| +|-----|-----------------------------------------| +|**2**| Using `@RequestPart` to get the file. | + +Kotlin + +``` +@PostMapping("/") +fun handle(@RequestPart("meta-data") Part metadata, (1) + @RequestPart("file-data") FilePart file): String { (2) + // ... +} +``` + +|**1**|Using `@RequestPart` to get the metadata.| +|-----|-----------------------------------------| +|**2**| Using `@RequestPart` to get the file. | + +To deserialize the raw part content (for example, to JSON — similar to `@RequestBody`), +you can declare a concrete target `Object`, instead of `Part`, as the following example shows: + +Java + +``` +@PostMapping("/") +public String handle(@RequestPart("meta-data") MetaData metadata) { (1) + // ... +} +``` + +|**1**|Using `@RequestPart` to get the metadata.| +|-----|-----------------------------------------| + +Kotlin + +``` +@PostMapping("/") +fun handle(@RequestPart("meta-data") metadata: MetaData): String { (1) + // ... +} +``` + +|**1**|Using `@RequestPart` to get the metadata.| +|-----|-----------------------------------------| + +You can use `@RequestPart` in combination with `javax.validation.Valid` or Spring’s`@Validated` annotation, which causes Standard Bean Validation to be applied. Validation +errors lead to a `WebExchangeBindException` that results in a 400 (BAD\_REQUEST) response. +The exception contains a `BindingResult` with the error details and can also be handled +in the controller method by declaring the argument with an async wrapper and then using +error related operators: + +Java + +``` +@PostMapping("/") +public String handle(@Valid @RequestPart("meta-data") Mono<MetaData> metadata) { + // use one of the onError* operators... +} +``` + +Kotlin + +``` +@PostMapping("/") +fun handle(@Valid @RequestPart("meta-data") metadata: MetaData): String { + // ... +} +``` + +To access all multipart data as a `MultiValueMap`, you can use `@RequestBody`, +as the following example shows: + +Java + +``` +@PostMapping("/") +public String handle(@RequestBody Mono<MultiValueMap<String, Part>> parts) { (1) + // ... +} +``` + +|**1**|Using `@RequestBody`.| +|-----|---------------------| + +Kotlin + +``` +@PostMapping("/") +fun handle(@RequestBody parts: MultiValueMap<String, Part>): String { (1) + // ... +} +``` + +|**1**|Using `@RequestBody`.| +|-----|---------------------| + +To access multipart data sequentially, in streaming fashion, you can use `@RequestBody` with`Flux<Part>` (or `Flow<Part>` in Kotlin) instead, as the following example shows: + +Java + +``` +@PostMapping("/") +public String handle(@RequestBody Flux<Part> parts) { (1) + // ... +} +``` + +|**1**|Using `@RequestBody`.| +|-----|---------------------| + +Kotlin + +``` +@PostMapping("/") +fun handle(@RequestBody parts: Flow<Part>): String { (1) + // ... +} +``` + +|**1**|Using `@RequestBody`.| +|-----|---------------------| + +##### `@RequestBody` + +[Web MVC](web.html#mvc-ann-requestbody) + +You can use the `@RequestBody` annotation to have the request body read and deserialized into an`Object` through an [HttpMessageReader](#webflux-codecs). +The following example uses a `@RequestBody` argument: + +Java + +``` +@PostMapping("/accounts") +public void handle(@RequestBody Account account) { + // ... +} +``` + +Kotlin + +``` +@PostMapping("/accounts") +fun handle(@RequestBody account: Account) { + // ... +} +``` + +Unlike Spring MVC, in WebFlux, the `@RequestBody` method argument supports reactive types +and fully non-blocking reading and (client-to-server) streaming. + +Java + +``` +@PostMapping("/accounts") +public void handle(@RequestBody Mono<Account> account) { + // ... +} +``` + +Kotlin + +``` +@PostMapping("/accounts") +fun handle(@RequestBody accounts: Flow<Account>) { + // ... +} +``` + +You can use the [HTTP message codecs](#webflux-config-message-codecs) option of the [WebFlux Config](#webflux-config) to +configure or customize message readers. + +You can use `@RequestBody` in combination with `javax.validation.Valid` or Spring’s`@Validated` annotation, which causes Standard Bean Validation to be applied. Validation +errors cause a `WebExchangeBindException`, which results in a 400 (BAD\_REQUEST) response. +The exception contains a `BindingResult` with error details and can be handled in the +controller method by declaring the argument with an async wrapper and then using error +related operators: + +Java + +``` +@PostMapping("/accounts") +public void handle(@Valid @RequestBody Mono<Account> account) { + // use one of the onError* operators... +} +``` + +Kotlin + +``` +@PostMapping("/accounts") +fun handle(@Valid @RequestBody account: Mono<Account>) { + // ... +} +``` + +##### `HttpEntity` + +[Web MVC](web.html#mvc-ann-httpentity) + +`HttpEntity` is more or less identical to using [`@RequestBody`](#webflux-ann-requestbody) but is based on a +container object that exposes request headers and the body. The following example uses an`HttpEntity`: + +Java + +``` +@PostMapping("/accounts") +public void handle(HttpEntity<Account> entity) { + // ... +} +``` + +Kotlin + +``` +@PostMapping("/accounts") +fun handle(entity: HttpEntity<Account>) { + // ... +} +``` + +##### `@ResponseBody` + +[Web MVC](web.html#mvc-ann-responsebody) + +You can use the `@ResponseBody` annotation on a method to have the return serialized +to the response body through an [HttpMessageWriter](#webflux-codecs). The following +example shows how to do so: + +Java + +``` +@GetMapping("/accounts/{id}") +@ResponseBody +public Account handle() { + // ... +} +``` + +Kotlin + +``` +@GetMapping("/accounts/{id}") +@ResponseBody +fun handle(): Account { + // ... +} +``` + +`@ResponseBody` is also supported at the class level, in which case it is inherited by +all controller methods. This is the effect of `@RestController`, which is nothing more +than a meta-annotation marked with `@Controller` and `@ResponseBody`. + +`@ResponseBody` supports reactive types, which means you can return Reactor or RxJava +types and have the asynchronous values they produce rendered to the response. +For additional details, see [Streaming](#webflux-codecs-streaming) and[JSON rendering](#webflux-codecs-jackson). + +You can combine `@ResponseBody` methods with JSON serialization views. +See [Jackson JSON](#webflux-ann-jackson) for details. + +You can use the [HTTP message codecs](#webflux-config-message-codecs) option of the [WebFlux Config](#webflux-config) to +configure or customize message writing. + +##### `ResponseEntity` + +[Web MVC](web.html#mvc-ann-responseentity) + +`ResponseEntity` is like [`@ResponseBody`](#webflux-ann-responsebody) but with status and headers. For example: + +Java + +``` +@GetMapping("/something") +public ResponseEntity<String> handle() { + String body = ... ; + String etag = ... ; + return ResponseEntity.ok().eTag(etag).build(body); +} +``` + +Kotlin + +``` +@GetMapping("/something") +fun handle(): ResponseEntity<String> { + val body: String = ... + val etag: String = ... + return ResponseEntity.ok().eTag(etag).build(body) +} +``` + +WebFlux supports using a single value [reactive type](#webflux-reactive-libraries) to +produce the `ResponseEntity` asynchronously, and/or single and multi-value reactive types +for the body. This allows a variety of async responses with `ResponseEntity` as follows: + +* `ResponseEntity<Mono<T>>` or `ResponseEntity<Flux<T>>` make the response status and + headers known immediately while the body is provided asynchronously at a later point. + Use `Mono` if the body consists of 0..1 values or `Flux` if it can produce multiple values. + +* `Mono<ResponseEntity<T>>` provides all three — response status, headers, and body, + asynchronously at a later point. This allows the response status and headers to vary + depending on the outcome of asynchronous request handling. + +* `Mono<ResponseEntity<Mono<T>>>` or `Mono<ResponseEntity<Flux<T>>>` are yet another + possible, albeit less common alternative. They provide the response status and headers + asynchronously first and then the response body, also asynchronously, second. + +##### Jackson JSON + +Spring offers support for the Jackson JSON library. + +##### JSON Views# + +[Web MVC](web.html#mvc-ann-jackson) + +Spring WebFlux provides built-in support for[Jackson’s Serialization Views](https://www.baeldung.com/jackson-json-view-annotation), +which allows rendering only a subset of all fields in an `Object`. To use it with`@ResponseBody` or `ResponseEntity` controller methods, you can use Jackson’s`@JsonView` annotation to activate a serialization view class, as the following example shows: + +Java + +``` +@RestController +public class UserController { + + @GetMapping("/user") + @JsonView(User.WithoutPasswordView.class) + public User getUser() { + return new User("eric", "7!jd#h23"); + } +} + +public class User { + + public interface WithoutPasswordView {}; + public interface WithPasswordView extends WithoutPasswordView {}; + + private String username; + private String password; + + public User() { + } + + public User(String username, String password) { + this.username = username; + this.password = password; + } + + @JsonView(WithoutPasswordView.class) + public String getUsername() { + return this.username; + } + + @JsonView(WithPasswordView.class) + public String getPassword() { + return this.password; + } +} +``` + +Kotlin + +``` +@RestController +class UserController { + + @GetMapping("/user") + @JsonView(User.WithoutPasswordView::class) + fun getUser(): User { + return User("eric", "7!jd#h23") + } +} + +class User( + @JsonView(WithoutPasswordView::class) val username: String, + @JsonView(WithPasswordView::class) val password: String +) { + interface WithoutPasswordView + interface WithPasswordView : WithoutPasswordView +} +``` + +| |`@JsonView` allows an array of view classes but you can only specify only one per<br/>controller method. Use a composite interface if you need to activate multiple views.| +|---|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +#### 1.4.4. `Model` + +[Web MVC](web.html#mvc-ann-modelattrib-methods) + +You can use the `@ModelAttribute` annotation: + +* On a [method argument](#webflux-ann-modelattrib-method-args) in `@RequestMapping` methods + to create or access an Object from the model and to bind it to the request through a`WebDataBinder`. + +* As a method-level annotation in `@Controller` or `@ControllerAdvice` classes, helping + to initialize the model prior to any `@RequestMapping` method invocation. + +* On a `@RequestMapping` method to mark its return value as a model attribute. + +This section discusses `@ModelAttribute` methods, or the second item from the preceding list. +A controller can have any number of `@ModelAttribute` methods. All such methods are +invoked before `@RequestMapping` methods in the same controller. A `@ModelAttribute`method can also be shared across controllers through `@ControllerAdvice`. See the section on[Controller Advice](#webflux-ann-controller-advice) for more details. + +`@ModelAttribute` methods have flexible method signatures. They support many of the same +arguments as `@RequestMapping` methods (except for `@ModelAttribute` itself and anything +related to the request body). + +The following example uses a `@ModelAttribute` method: + +Java + +``` +@ModelAttribute +public void populateModel(@RequestParam String number, Model model) { + model.addAttribute(accountRepository.findAccount(number)); + // add more ... +} +``` + +Kotlin + +``` +@ModelAttribute +fun populateModel(@RequestParam number: String, model: Model) { + model.addAttribute(accountRepository.findAccount(number)) + // add more ... +} +``` + +The following example adds one attribute only: + +Java + +``` +@ModelAttribute +public Account addAccount(@RequestParam String number) { + return accountRepository.findAccount(number); +} +``` + +Kotlin + +``` +@ModelAttribute +fun addAccount(@RequestParam number: String): Account { + return accountRepository.findAccount(number); +} +``` + +| |When a name is not explicitly specified, a default name is chosen based on the type,<br/>as explained in the javadoc for [`Conventions`](https://docs.spring.io/spring-framework/docs/5.3.16/javadoc-api/org/springframework/core/Conventions.html).<br/>You can always assign an explicit name by using the overloaded `addAttribute` method or<br/>through the name attribute on `@ModelAttribute` (for a return value).| +|---|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +Spring WebFlux, unlike Spring MVC, explicitly supports reactive types in the model +(for example, `Mono<Account>` or `io.reactivex.Single<Account>`). Such asynchronous model +attributes can be transparently resolved (and the model updated) to their actual values +at the time of `@RequestMapping` invocation, provided a `@ModelAttribute` argument is +declared without a wrapper, as the following example shows: + +Java + +``` +@ModelAttribute +public void addAccount(@RequestParam String number) { + Mono<Account> accountMono = accountRepository.findAccount(number); + model.addAttribute("account", accountMono); +} + +@PostMapping("/accounts") +public String handle(@ModelAttribute Account account, BindingResult errors) { + // ... +} +``` + +Kotlin + +``` +import org.springframework.ui.set + +@ModelAttribute +fun addAccount(@RequestParam number: String) { + val accountMono: Mono<Account> = accountRepository.findAccount(number) + model["account"] = accountMono +} + +@PostMapping("/accounts") +fun handle(@ModelAttribute account: Account, errors: BindingResult): String { + // ... +} +``` + +In addition, any model attributes that have a reactive type wrapper are resolved to their +actual values (and the model updated) just prior to view rendering. + +You can also use `@ModelAttribute` as a method-level annotation on `@RequestMapping`methods, in which case the return value of the `@RequestMapping` method is interpreted as a +model attribute. This is typically not required, as it is the default behavior in HTML +controllers, unless the return value is a `String` that would otherwise be interpreted +as a view name. `@ModelAttribute` can also help to customize the model attribute name, +as the following example shows: + +Java + +``` +@GetMapping("/accounts/{id}") +@ModelAttribute("myAccount") +public Account handle() { + // ... + return account; +} +``` + +Kotlin + +``` +@GetMapping("/accounts/{id}") +@ModelAttribute("myAccount") +fun handle(): Account { + // ... + return account +} +``` + +#### 1.4.5. `DataBinder` + +[Web MVC](web.html#mvc-ann-initbinder) + +`@Controller` or `@ControllerAdvice` classes can have `@InitBinder` methods, to +initialize instances of `WebDataBinder`. Those, in turn, are used to: + +* Bind request parameters (that is, form data or query) to a model object. + +* Convert `String`-based request values (such as request parameters, path variables, + headers, cookies, and others) to the target type of controller method arguments. + +* Format model object values as `String` values when rendering HTML forms. + +`@InitBinder` methods can register controller-specific `java.beans.PropertyEditor` or +Spring `Converter` and `Formatter` components. In addition, you can use the[WebFlux Java configuration](#webflux-config-conversion) to register `Converter` and`Formatter` types in a globally shared `FormattingConversionService`. + +`@InitBinder` methods support many of the same arguments that `@RequestMapping` methods +do, except for `@ModelAttribute` (command object) arguments. Typically, they are declared +with a `WebDataBinder` argument, for registrations, and a `void` return value. +The following example uses the `@InitBinder` annotation: + +Java + +``` +@Controller +public class FormController { + + @InitBinder (1) + public void initBinder(WebDataBinder binder) { + SimpleDateFormat dateFormat = new SimpleDateFormat("yyyy-MM-dd"); + dateFormat.setLenient(false); + binder.registerCustomEditor(Date.class, new CustomDateEditor(dateFormat, false)); + } + + // ... +} +``` + +|**1**|Using the `@InitBinder` annotation.| +|-----|-----------------------------------| + +Kotlin + +``` +@Controller +class FormController { + + @InitBinder (1) + fun initBinder(binder: WebDataBinder) { + val dateFormat = SimpleDateFormat("yyyy-MM-dd") + dateFormat.isLenient = false + binder.registerCustomEditor(Date::class.java, CustomDateEditor(dateFormat, false)) + } + + // ... +} +``` + +Alternatively, when using a `Formatter`-based setup through a shared`FormattingConversionService`, you could re-use the same approach and register +controller-specific `Formatter` instances, as the following example shows: + +Java + +``` +@Controller +public class FormController { + + @InitBinder + protected void initBinder(WebDataBinder binder) { + binder.addCustomFormatter(new DateFormatter("yyyy-MM-dd")); (1) + } + + // ... +} +``` + +|**1**|Adding a custom formatter (a `DateFormatter`, in this case).| +|-----|------------------------------------------------------------| + +Kotlin + +``` +@Controller +class FormController { + + @InitBinder + fun initBinder(binder: WebDataBinder) { + binder.addCustomFormatter(DateFormatter("yyyy-MM-dd")) (1) + } + + // ... +} +``` + +|**1**|Adding a custom formatter (a `DateFormatter`, in this case).| +|-----|------------------------------------------------------------| + +#### 1.4.6. Managing Exceptions + +[Web MVC](web.html#mvc-ann-exceptionhandler) + +`@Controller` and [@ControllerAdvice](#webflux-ann-controller-advice) classes can have`@ExceptionHandler` methods to handle exceptions from controller methods. The following +example includes such a handler method: + +Java + +``` +@Controller +public class SimpleController { + + // ... + + @ExceptionHandler (1) + public ResponseEntity<String> handle(IOException ex) { + // ... + } +} +``` + +|**1**|Declaring an `@ExceptionHandler`.| +|-----|---------------------------------| + +Kotlin + +``` +@Controller +class SimpleController { + + // ... + + @ExceptionHandler (1) + fun handle(ex: IOException): ResponseEntity<String> { + // ... + } +} +``` + +|**1**|Declaring an `@ExceptionHandler`.| +|-----|---------------------------------| + +The exception can match against a top-level exception being propagated (that is, a direct`IOException` being thrown) or against the immediate cause within a top-level wrapper +exception (for example, an `IOException` wrapped inside an `IllegalStateException`). + +For matching exception types, preferably declare the target exception as a method argument, +as shown in the preceding example. Alternatively, the annotation declaration can narrow the +exception types to match. We generally recommend being as specific as possible in the +argument signature and to declare your primary root exception mappings on a`@ControllerAdvice` prioritized with a corresponding order. +See [the MVC section](web.html#mvc-ann-exceptionhandler) for details. + +| |An `@ExceptionHandler` method in WebFlux supports the same method arguments and<br/>return values as a `@RequestMapping` method, with the exception of request body-<br/>and `@ModelAttribute`-related method arguments.| +|---|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +Support for `@ExceptionHandler` methods in Spring WebFlux is provided by the`HandlerAdapter` for `@RequestMapping` methods. See [`DispatcherHandler`](#webflux-dispatcher-handler)for more detail. + +##### REST API exceptions + +[Web MVC](web.html#mvc-ann-rest-exceptions) + +A common requirement for REST services is to include error details in the body of the +response. The Spring Framework does not automatically do so, because the representation +of error details in the response body is application-specific. However, a`@RestController` can use `@ExceptionHandler` methods with a `ResponseEntity` return +value to set the status and the body of the response. Such methods can also be declared +in `@ControllerAdvice` classes to apply them globally. + +| |Note that Spring WebFlux does not have an equivalent for the Spring MVC`ResponseEntityExceptionHandler`, because WebFlux raises only `ResponseStatusException`(or subclasses thereof), and those do not need to be translated to<br/>an HTTP status code.| +|---|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +#### 1.4.7. Controller Advice + +[Web MVC](web.html#mvc-ann-controller-advice) + +Typically, the `@ExceptionHandler`, `@InitBinder`, and `@ModelAttribute` methods apply +within the `@Controller` class (or class hierarchy) in which they are declared. If you +want such methods to apply more globally (across controllers), you can declare them in a +class annotated with `@ControllerAdvice` or `@RestControllerAdvice`. + +`@ControllerAdvice` is annotated with `@Component`, which means that such classes can be +registered as Spring beans through [component scanning](core.html#beans-java-instantiating-container-scan). `@RestControllerAdvice` is a composed annotation that is annotated +with both `@ControllerAdvice` and `@ResponseBody`, which essentially means`@ExceptionHandler` methods are rendered to the response body through message conversion +(versus view resolution or template rendering). + +On startup, the infrastructure classes for `@RequestMapping` and `@ExceptionHandler`methods detect Spring beans annotated with `@ControllerAdvice` and then apply their +methods at runtime. Global `@ExceptionHandler` methods (from a `@ControllerAdvice`) are +applied *after* local ones (from the `@Controller`). By contrast, global `@ModelAttribute`and `@InitBinder` methods are applied *before* local ones. + +By default, `@ControllerAdvice` methods apply to every request (that is, all controllers), +but you can narrow that down to a subset of controllers by using attributes on the +annotation, as the following example shows: + +Java + +``` +// Target all Controllers annotated with @RestController +@ControllerAdvice(annotations = RestController.class) +public class ExampleAdvice1 {} + +// Target all Controllers within specific packages +@ControllerAdvice("org.example.controllers") +public class ExampleAdvice2 {} + +// Target all Controllers assignable to specific classes +@ControllerAdvice(assignableTypes = {ControllerInterface.class, AbstractController.class}) +public class ExampleAdvice3 {} +``` + +Kotlin + +``` +// Target all Controllers annotated with @RestController +@ControllerAdvice(annotations = [RestController::class]) +public class ExampleAdvice1 {} + +// Target all Controllers within specific packages +@ControllerAdvice("org.example.controllers") +public class ExampleAdvice2 {} + +// Target all Controllers assignable to specific classes +@ControllerAdvice(assignableTypes = [ControllerInterface::class, AbstractController::class]) +public class ExampleAdvice3 {} +``` + +The selectors in the preceding example are evaluated at runtime and may negatively impact +performance if used extensively. See the[`@ControllerAdvice`](https://docs.spring.io/spring-framework/docs/5.3.16/javadoc-api/org/springframework/web/bind/annotation/ControllerAdvice.html)javadoc for more details. + +### 1.5. Functional Endpoints + +[Web MVC](web.html#webmvc-fn) + +Spring WebFlux includes WebFlux.fn, a lightweight functional programming model in which functions +are used to route and handle requests and contracts are designed for immutability. +It is an alternative to the annotation-based programming model but otherwise runs on +the same [Reactive Core](#webflux-reactive-spring-web) foundation. + +#### 1.5.1. Overview + +[Web MVC](web.html#webmvc-fn-overview) + +In WebFlux.fn, an HTTP request is handled with a `HandlerFunction`: a function that takes`ServerRequest` and returns a delayed `ServerResponse` (i.e. `Mono<ServerResponse>`). +Both the request and the response object have immutable contracts that offer JDK 8-friendly +access to the HTTP request and response.`HandlerFunction` is the equivalent of the body of a `@RequestMapping` method in the +annotation-based programming model. + +Incoming requests are routed to a handler function with a `RouterFunction`: a function that +takes `ServerRequest` and returns a delayed `HandlerFunction` (i.e. `Mono<HandlerFunction>`). +When the router function matches, a handler function is returned; otherwise an empty Mono.`RouterFunction` is the equivalent of a `@RequestMapping` annotation, but with the major +difference that router functions provide not just data, but also behavior. + +`RouterFunctions.route()` provides a router builder that facilitates the creation of routers, +as the following example shows: + +Java + +``` +import static org.springframework.http.MediaType.APPLICATION_JSON; +import static org.springframework.web.reactive.function.server.RequestPredicates.*; +import static org.springframework.web.reactive.function.server.RouterFunctions.route; + +PersonRepository repository = ... +PersonHandler handler = new PersonHandler(repository); + +RouterFunction<ServerResponse> route = route() + .GET("/person/{id}", accept(APPLICATION_JSON), handler::getPerson) + .GET("/person", accept(APPLICATION_JSON), handler::listPeople) + .POST("/person", handler::createPerson) + .build(); + +public class PersonHandler { + + // ... + + public Mono<ServerResponse> listPeople(ServerRequest request) { + // ... + } + + public Mono<ServerResponse> createPerson(ServerRequest request) { + // ... + } + + public Mono<ServerResponse> getPerson(ServerRequest request) { + // ... + } +} +``` + +Kotlin + +``` +val repository: PersonRepository = ... +val handler = PersonHandler(repository) + +val route = coRouter { (1) + accept(APPLICATION_JSON).nest { + GET("/person/{id}", handler::getPerson) + GET("/person", handler::listPeople) + } + POST("/person", handler::createPerson) +} + +class PersonHandler(private val repository: PersonRepository) { + + // ... + + suspend fun listPeople(request: ServerRequest): ServerResponse { + // ... + } + + suspend fun createPerson(request: ServerRequest): ServerResponse { + // ... + } + + suspend fun getPerson(request: ServerRequest): ServerResponse { + // ... + } +} +``` + +|**1**|Create router using Coroutines router DSL, a Reactive alternative is also available via `router { }`.| +|-----|-----------------------------------------------------------------------------------------------------| + +One way to run a `RouterFunction` is to turn it into an `HttpHandler` and install it +through one of the built-in [server adapters](#webflux-httphandler): + +* `RouterFunctions.toHttpHandler(RouterFunction)` + +* `RouterFunctions.toHttpHandler(RouterFunction, HandlerStrategies)` + +Most applications can run through the WebFlux Java configuration, see [Running a Server](#webflux-fn-running). + +#### 1.5.2. HandlerFunction + +[Web MVC](web.html#webmvc-fn-handler-functions) + +`ServerRequest` and `ServerResponse` are immutable interfaces that offer JDK 8-friendly +access to the HTTP request and response. +Both request and response provide [Reactive Streams](https://www.reactive-streams.org) back pressure +against the body streams. +The request body is represented with a Reactor `Flux` or `Mono`. +The response body is represented with any Reactive Streams `Publisher`, including `Flux` and `Mono`. +For more on that, see [Reactive Libraries](#webflux-reactive-libraries). + +##### ServerRequest + +`ServerRequest` provides access to the HTTP method, URI, headers, and query parameters, +while access to the body is provided through the `body` methods. + +The following example extracts the request body to a `Mono<String>`: + +Java + +``` +Mono<String> string = request.bodyToMono(String.class); +``` + +Kotlin + +``` +val string = request.awaitBody<String>() +``` + +The following example extracts the body to a `Flux<Person>` (or a `Flow<Person>` in Kotlin), +where `Person` objects are decoded from someserialized form, such as JSON or XML: + +Java + +``` +Flux<Person> people = request.bodyToFlux(Person.class); +``` + +Kotlin + +``` +val people = request.bodyToFlow<Person>() +``` + +The preceding examples are shortcuts that use the more general `ServerRequest.body(BodyExtractor)`, +which accepts the `BodyExtractor` functional strategy interface. The utility class`BodyExtractors` provides access to a number of instances. For example, the preceding examples can +also be written as follows: + +Java + +``` +Mono<String> string = request.body(BodyExtractors.toMono(String.class)); +Flux<Person> people = request.body(BodyExtractors.toFlux(Person.class)); +``` + +Kotlin + +``` + val string = request.body(BodyExtractors.toMono(String::class.java)).awaitSingle() + val people = request.body(BodyExtractors.toFlux(Person::class.java)).asFlow() +``` + +The following example shows how to access form data: + +Java + +``` +Mono<MultiValueMap<String, String>> map = request.formData(); +``` + +Kotlin + +``` +val map = request.awaitFormData() +``` + +The following example shows how to access multipart data as a map: + +Java + +``` +Mono<MultiValueMap<String, Part>> map = request.multipartData(); +``` + +Kotlin + +``` +val map = request.awaitMultipartData() +``` + +The following example shows how to access multiparts, one at a time, in streaming fashion: + +Java + +``` +Flux<Part> parts = request.body(BodyExtractors.toParts()); +``` + +Kotlin + +``` +val parts = request.body(BodyExtractors.toParts()).asFlow() +``` + +##### ServerResponse + +`ServerResponse` provides access to the HTTP response and, since it is immutable, you can use +a `build` method to create it. You can use the builder to set the response status, to add response +headers, or to provide a body. The following example creates a 200 (OK) response with JSON +content: + +Java + +``` +Mono<Person> person = ... +ServerResponse.ok().contentType(MediaType.APPLICATION_JSON).body(person, Person.class); +``` + +Kotlin + +``` +val person: Person = ... +ServerResponse.ok().contentType(MediaType.APPLICATION_JSON).bodyValue(person) +``` + +The following example shows how to build a 201 (CREATED) response with a `Location` header and no body: + +Java + +``` +URI location = ... +ServerResponse.created(location).build(); +``` + +Kotlin + +``` +val location: URI = ... +ServerResponse.created(location).build() +``` + +Depending on the codec used, it is possible to pass hint parameters to customize how the +body is serialized or deserialized. For example, to specify a [Jackson JSON view](https://www.baeldung.com/jackson-json-view-annotation): + +Java + +``` +ServerResponse.ok().hint(Jackson2CodecSupport.JSON_VIEW_HINT, MyJacksonView.class).body(...); +``` + +Kotlin + +``` +ServerResponse.ok().hint(Jackson2CodecSupport.JSON_VIEW_HINT, MyJacksonView::class.java).body(...) +``` + +##### Handler Classes + +We can write a handler function as a lambda, as the following example shows: + +Java + +``` +HandlerFunction<ServerResponse> helloWorld = + request -> ServerResponse.ok().bodyValue("Hello World"); +``` + +Kotlin + +``` +val helloWorld = HandlerFunction<ServerResponse> { ServerResponse.ok().bodyValue("Hello World") } +``` + +That is convenient, but in an application we need multiple functions, and multiple inline +lambda’s can get messy. +Therefore, it is useful to group related handler functions together into a handler class, which +has a similar role as `@Controller` in an annotation-based application. +For example, the following class exposes a reactive `Person` repository: + +Java + +``` +import static org.springframework.http.MediaType.APPLICATION_JSON; +import static org.springframework.web.reactive.function.server.ServerResponse.ok; + +public class PersonHandler { + + private final PersonRepository repository; + + public PersonHandler(PersonRepository repository) { + this.repository = repository; + } + + public Mono<ServerResponse> listPeople(ServerRequest request) { (1) + Flux<Person> people = repository.allPeople(); + return ok().contentType(APPLICATION_JSON).body(people, Person.class); + } + + public Mono<ServerResponse> createPerson(ServerRequest request) { (2) + Mono<Person> person = request.bodyToMono(Person.class); + return ok().build(repository.savePerson(person)); + } + + public Mono<ServerResponse> getPerson(ServerRequest request) { (3) + int personId = Integer.valueOf(request.pathVariable("id")); + return repository.getPerson(personId) + .flatMap(person -> ok().contentType(APPLICATION_JSON).bodyValue(person)) + .switchIfEmpty(ServerResponse.notFound().build()); + } +} +``` + +|**1**| `listPeople` is a handler function that returns all `Person` objects found in the repository as<br/>JSON. | +|-----|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +|**2**|`createPerson` is a handler function that stores a new `Person` contained in the request body.<br/>Note that `PersonRepository.savePerson(Person)` returns `Mono<Void>`: an empty `Mono` that emits<br/>a completion signal when the person has been read from the request and stored. So we use the`build(Publisher<Void>)` method to send a response when that completion signal is received (that is,<br/>when the `Person` has been saved).| +|**3**| `getPerson` is a handler function that returns a single person, identified by the `id` path<br/>variable. We retrieve that `Person` from the repository and create a JSON response, if it is<br/>found. If it is not found, we use `switchIfEmpty(Mono<T>)` to return a 404 Not Found response. | + +Kotlin + +``` +class PersonHandler(private val repository: PersonRepository) { + + suspend fun listPeople(request: ServerRequest): ServerResponse { (1) + val people: Flow<Person> = repository.allPeople() + return ok().contentType(APPLICATION_JSON).bodyAndAwait(people); + } + + suspend fun createPerson(request: ServerRequest): ServerResponse { (2) + val person = request.awaitBody<Person>() + repository.savePerson(person) + return ok().buildAndAwait() + } + + suspend fun getPerson(request: ServerRequest): ServerResponse { (3) + val personId = request.pathVariable("id").toInt() + return repository.getPerson(personId)?.let { ok().contentType(APPLICATION_JSON).bodyValueAndAwait(it) } + ?: ServerResponse.notFound().buildAndAwait() + + } +} +``` + +|**1**| `listPeople` is a handler function that returns all `Person` objects found in the repository as<br/>JSON. | +|-----|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +|**2**| `createPerson` is a handler function that stores a new `Person` contained in the request body.<br/>Note that `PersonRepository.savePerson(Person)` is a suspending function with no return type. | +|**3**|`getPerson` is a handler function that returns a single person, identified by the `id` path<br/>variable. We retrieve that `Person` from the repository and create a JSON response, if it is<br/>found. If it is not found, we return a 404 Not Found response.| + +##### Validation + +A functional endpoint can use Spring’s [validation facilities](core.html#validation) to +apply validation to the request body. For example, given a custom Spring[Validator](core.html#validation) implementation for a `Person`: + +Java + +``` +public class PersonHandler { + + private final Validator validator = new PersonValidator(); (1) + + // ... + + public Mono<ServerResponse> createPerson(ServerRequest request) { + Mono<Person> person = request.bodyToMono(Person.class).doOnNext(this::validate); (2) + return ok().build(repository.savePerson(person)); + } + + private void validate(Person person) { + Errors errors = new BeanPropertyBindingResult(person, "person"); + validator.validate(person, errors); + if (errors.hasErrors()) { + throw new ServerWebInputException(errors.toString()); (3) + } + } +} +``` + +|**1**| Create `Validator` instance. | +|-----|-----------------------------------| +|**2**| Apply validation. | +|**3**|Raise exception for a 400 response.| + +Kotlin + +``` +class PersonHandler(private val repository: PersonRepository) { + + private val validator = PersonValidator() (1) + + // ... + + suspend fun createPerson(request: ServerRequest): ServerResponse { + val person = request.awaitBody<Person>() + validate(person) (2) + repository.savePerson(person) + return ok().buildAndAwait() + } + + private fun validate(person: Person) { + val errors: Errors = BeanPropertyBindingResult(person, "person"); + validator.validate(person, errors); + if (errors.hasErrors()) { + throw ServerWebInputException(errors.toString()) (3) + } + } +} +``` + +|**1**| Create `Validator` instance. | +|-----|-----------------------------------| +|**2**| Apply validation. | +|**3**|Raise exception for a 400 response.| + +Handlers can also use the standard bean validation API (JSR-303) by creating and injecting +a global `Validator` instance based on `LocalValidatorFactoryBean`. +See [Spring Validation](core.html#validation-beanvalidation). + +#### 1.5.3. `RouterFunction` + +[Web MVC](web.html#webmvc-fn-router-functions) + +Router functions are used to route the requests to the corresponding `HandlerFunction`. +Typically, you do not write router functions yourself, but rather use a method on the`RouterFunctions` utility class to create one.`RouterFunctions.route()` (no parameters) provides you with a fluent builder for creating a router +function, whereas `RouterFunctions.route(RequestPredicate, HandlerFunction)` offers a direct way +to create a router. + +Generally, it is recommended to use the `route()` builder, as it provides +convenient short-cuts for typical mapping scenarios without requiring hard-to-discover +static imports. +For instance, the router function builder offers the method `GET(String, HandlerFunction)` to create a mapping for GET requests; and `POST(String, HandlerFunction)` for POSTs. + +Besides HTTP method-based mapping, the route builder offers a way to introduce additional +predicates when mapping to requests. +For each HTTP method there is an overloaded variant that takes a `RequestPredicate` as a +parameter, though which additional constraints can be expressed. + +##### Predicates + +You can write your own `RequestPredicate`, but the `RequestPredicates` utility class +offers commonly used implementations, based on the request path, HTTP method, content-type, +and so on. +The following example uses a request predicate to create a constraint based on the `Accept`header: + +Java + +``` +RouterFunction<ServerResponse> route = RouterFunctions.route() + .GET("/hello-world", accept(MediaType.TEXT_PLAIN), + request -> ServerResponse.ok().bodyValue("Hello World")).build(); +``` + +Kotlin + +``` +val route = coRouter { + GET("/hello-world", accept(TEXT_PLAIN)) { + ServerResponse.ok().bodyValueAndAwait("Hello World") + } +} +``` + +You can compose multiple request predicates together by using: + +* `RequestPredicate.and(RequestPredicate)` — both must match. + +* `RequestPredicate.or(RequestPredicate)` — either can match. + +Many of the predicates from `RequestPredicates` are composed. +For example, `RequestPredicates.GET(String)` is composed from `RequestPredicates.method(HttpMethod)`and `RequestPredicates.path(String)`. +The example shown above also uses two request predicates, as the builder uses`RequestPredicates.GET` internally, and composes that with the `accept` predicate. + +##### Routes + +Router functions are evaluated in order: if the first route does not match, the +second is evaluated, and so on. +Therefore, it makes sense to declare more specific routes before general ones. +This is also important when registering router functions as Spring beans, as will +be described later. +Note that this behavior is different from the annotation-based programming model, where the +"most specific" controller method is picked automatically. + +When using the router function builder, all defined routes are composed into one`RouterFunction` that is returned from `build()`. +There are also other ways to compose multiple router functions together: + +* `add(RouterFunction)` on the `RouterFunctions.route()` builder + +* `RouterFunction.and(RouterFunction)` + +* `RouterFunction.andRoute(RequestPredicate, HandlerFunction)` — shortcut for`RouterFunction.and()` with nested `RouterFunctions.route()`. + +The following example shows the composition of four routes: + +Java + +``` +import static org.springframework.http.MediaType.APPLICATION_JSON; +import static org.springframework.web.reactive.function.server.RequestPredicates.*; + +PersonRepository repository = ... +PersonHandler handler = new PersonHandler(repository); + +RouterFunction<ServerResponse> otherRoute = ... + +RouterFunction<ServerResponse> route = route() + .GET("/person/{id}", accept(APPLICATION_JSON), handler::getPerson) (1) + .GET("/person", accept(APPLICATION_JSON), handler::listPeople) (2) + .POST("/person", handler::createPerson) (3) + .add(otherRoute) (4) + .build(); +``` + +|**1**|`GET /person/{id}` with an `Accept` header that matches JSON is routed to`PersonHandler.getPerson`| +|-----|--------------------------------------------------------------------------------------------------| +|**2**| `GET /person` with an `Accept` header that matches JSON is routed to`PersonHandler.listPeople` | +|**3**| `POST /person` with no additional predicates is mapped to`PersonHandler.createPerson`, and | +|**4**| `otherRoute` is a router function that is created elsewhere, and added to the route built. | + +Kotlin + +``` +import org.springframework.http.MediaType.APPLICATION_JSON + +val repository: PersonRepository = ... +val handler = PersonHandler(repository); + +val otherRoute: RouterFunction<ServerResponse> = coRouter { } + +val route = coRouter { + GET("/person/{id}", accept(APPLICATION_JSON), handler::getPerson) (1) + GET("/person", accept(APPLICATION_JSON), handler::listPeople) (2) + POST("/person", handler::createPerson) (3) +}.and(otherRoute) (4) +``` + +|**1**|`GET /person/{id}` with an `Accept` header that matches JSON is routed to`PersonHandler.getPerson`| +|-----|--------------------------------------------------------------------------------------------------| +|**2**| `GET /person` with an `Accept` header that matches JSON is routed to`PersonHandler.listPeople` | +|**3**| `POST /person` with no additional predicates is mapped to`PersonHandler.createPerson`, and | +|**4**| `otherRoute` is a router function that is created elsewhere, and added to the route built. | + +##### Nested Routes + +It is common for a group of router functions to have a shared predicate, for instance a +shared path. In the example above, the shared predicate would be a path predicate that +matches `/person`, used by three of the routes. When using annotations, you would remove +this duplication by using a type-level `@RequestMapping` annotation that maps to`/person`. In WebFlux.fn, path predicates can be shared through the `path` method on the +router function builder. For instance, the last few lines of the example above can be +improved in the following way by using nested routes: + +Java + +``` +RouterFunction<ServerResponse> route = route() + .path("/person", builder -> builder (1) + .GET("/{id}", accept(APPLICATION_JSON), handler::getPerson) + .GET(accept(APPLICATION_JSON), handler::listPeople) + .POST("/person", handler::createPerson)) + .build(); +``` + +|**1**|Note that second parameter of `path` is a consumer that takes the a router builder.| +|-----|-----------------------------------------------------------------------------------| + +Kotlin + +``` +val route = coRouter { + "/person".nest { + GET("/{id}", accept(APPLICATION_JSON), handler::getPerson) + GET(accept(APPLICATION_JSON), handler::listPeople) + POST("/person", handler::createPerson) + } +} +``` + +Though path-based nesting is the most common, you can nest on any kind of predicate by using +the `nest` method on the builder. +The above still contains some duplication in the form of the shared `Accept`-header predicate. +We can further improve by using the `nest` method together with `accept`: + +Java + +``` +RouterFunction<ServerResponse> route = route() + .path("/person", b1 -> b1 + .nest(accept(APPLICATION_JSON), b2 -> b2 + .GET("/{id}", handler::getPerson) + .GET(handler::listPeople)) + .POST("/person", handler::createPerson)) + .build(); +``` + +Kotlin + +``` +val route = coRouter { + "/person".nest { + accept(APPLICATION_JSON).nest { + GET("/{id}", handler::getPerson) + GET(handler::listPeople) + POST("/person", handler::createPerson) + } + } +} +``` + +#### 1.5.4. Running a Server + +[Web MVC](web.html#webmvc-fn-running) + +How do you run a router function in an HTTP server? A simple option is to convert a router +function to an `HttpHandler` by using one of the following: + +* `RouterFunctions.toHttpHandler(RouterFunction)` + +* `RouterFunctions.toHttpHandler(RouterFunction, HandlerStrategies)` + +You can then use the returned `HttpHandler` with a number of server adapters by following[HttpHandler](#webflux-httphandler) for server-specific instructions. + +A more typical option, also used by Spring Boot, is to run with a[`DispatcherHandler`](#webflux-dispatcher-handler)-based setup through the[WebFlux Config](#webflux-config), which uses Spring configuration to declare the +components required to process requests. The WebFlux Java configuration declares the following +infrastructure components to support functional endpoints: + +* `RouterFunctionMapping`: Detects one or more `RouterFunction<?>` beans in the Spring + configuration, [orders them](core.html#beans-factory-ordered), combines them through`RouterFunction.andOther`, and routes requests to the resulting composed `RouterFunction`. + +* `HandlerFunctionAdapter`: Simple adapter that lets `DispatcherHandler` invoke + a `HandlerFunction` that was mapped to a request. + +* `ServerResponseResultHandler`: Handles the result from the invocation of a`HandlerFunction` by invoking the `writeTo` method of the `ServerResponse`. + +The preceding components let functional endpoints fit within the `DispatcherHandler` request +processing lifecycle and also (potentially) run side by side with annotated controllers, if +any are declared. It is also how functional endpoints are enabled by the Spring Boot WebFlux +starter. + +The following example shows a WebFlux Java configuration (see[DispatcherHandler](#webflux-dispatcher-handler) for how to run it): + +Java + +``` +@Configuration +@EnableWebFlux +public class WebConfig implements WebFluxConfigurer { + + @Bean + public RouterFunction<?> routerFunctionA() { + // ... + } + + @Bean + public RouterFunction<?> routerFunctionB() { + // ... + } + + // ... + + @Override + public void configureHttpMessageCodecs(ServerCodecConfigurer configurer) { + // configure message conversion... + } + + @Override + public void addCorsMappings(CorsRegistry registry) { + // configure CORS... + } + + @Override + public void configureViewResolvers(ViewResolverRegistry registry) { + // configure view resolution for HTML rendering... + } +} +``` + +Kotlin + +``` +@Configuration +@EnableWebFlux +class WebConfig : WebFluxConfigurer { + + @Bean + fun routerFunctionA(): RouterFunction<*> { + // ... + } + + @Bean + fun routerFunctionB(): RouterFunction<*> { + // ... + } + + // ... + + override fun configureHttpMessageCodecs(configurer: ServerCodecConfigurer) { + // configure message conversion... + } + + override fun addCorsMappings(registry: CorsRegistry) { + // configure CORS... + } + + override fun configureViewResolvers(registry: ViewResolverRegistry) { + // configure view resolution for HTML rendering... + } +} +``` + +#### 1.5.5. Filtering Handler Functions + +[Web MVC](web.html#webmvc-fn-handler-filter-function) + +You can filter handler functions by using the `before`, `after`, or `filter` methods on the routing +function builder. +With annotations, you can achieve similar functionality by using `@ControllerAdvice`, a `ServletFilter`, or both. +The filter will apply to all routes that are built by the builder. +This means that filters defined in nested routes do not apply to "top-level" routes. +For instance, consider the following example: + +Java + +``` +RouterFunction<ServerResponse> route = route() + .path("/person", b1 -> b1 + .nest(accept(APPLICATION_JSON), b2 -> b2 + .GET("/{id}", handler::getPerson) + .GET(handler::listPeople) + .before(request -> ServerRequest.from(request) (1) + .header("X-RequestHeader", "Value") + .build())) + .POST("/person", handler::createPerson)) + .after((request, response) -> logResponse(response)) (2) + .build(); +``` + +|**1**| The `before` filter that adds a custom request header is only applied to the two GET routes. | +|-----|----------------------------------------------------------------------------------------------| +|**2**|The `after` filter that logs the response is applied to all routes, including the nested ones.| + +Kotlin + +``` +val route = router { + "/person".nest { + GET("/{id}", handler::getPerson) + GET("", handler::listPeople) + before { (1) + ServerRequest.from(it) + .header("X-RequestHeader", "Value").build() + } + POST("/person", handler::createPerson) + after { _, response -> (2) + logResponse(response) + } + } +} +``` + +|**1**| The `before` filter that adds a custom request header is only applied to the two GET routes. | +|-----|----------------------------------------------------------------------------------------------| +|**2**|The `after` filter that logs the response is applied to all routes, including the nested ones.| + +The `filter` method on the router builder takes a `HandlerFilterFunction`: a +function that takes a `ServerRequest` and `HandlerFunction` and returns a `ServerResponse`. +The handler function parameter represents the next element in the chain. +This is typically the handler that is routed to, but it can also be another +filter if multiple are applied. + +Now we can add a simple security filter to our route, assuming that we have a `SecurityManager` that +can determine whether a particular path is allowed. +The following example shows how to do so: + +Java + +``` +SecurityManager securityManager = ... + +RouterFunction<ServerResponse> route = route() + .path("/person", b1 -> b1 + .nest(accept(APPLICATION_JSON), b2 -> b2 + .GET("/{id}", handler::getPerson) + .GET(handler::listPeople)) + .POST("/person", handler::createPerson)) + .filter((request, next) -> { + if (securityManager.allowAccessTo(request.path())) { + return next.handle(request); + } + else { + return ServerResponse.status(UNAUTHORIZED).build(); + } + }) + .build(); +``` + +Kotlin + +``` +val securityManager: SecurityManager = ... + +val route = router { + ("/person" and accept(APPLICATION_JSON)).nest { + GET("/{id}", handler::getPerson) + GET("", handler::listPeople) + POST("/person", handler::createPerson) + filter { request, next -> + if (securityManager.allowAccessTo(request.path())) { + next(request) + } + else { + status(UNAUTHORIZED).build(); + } + } + } + } +``` + +The preceding example demonstrates that invoking the `next.handle(ServerRequest)` is optional. +We only let the handler function be run when access is allowed. + +Besides using the `filter` method on the router function builder, it is possible to apply a +filter to an existing router function via `RouterFunction.filter(HandlerFilterFunction)`. + +| |CORS support for functional endpoints is provided through a dedicated[`CorsWebFilter`](webflux-cors.html#webflux-cors-webfilter).| +|---|---------------------------------------------------------------------------------------------------------------------------------| + +### 1.6. URI Links + +[Web MVC](web.html#mvc-uri-building) + +This section describes various options available in the Spring Framework to prepare URIs. + +#### 1.6.1. UriComponents + +Spring MVC and Spring WebFlux + +`UriComponentsBuilder` helps to build URI’s from URI templates with variables, as the following example shows: + +Java + +``` +UriComponents uriComponents = UriComponentsBuilder + .fromUriString("https://example.com/hotels/{hotel}") (1) + .queryParam("q", "{q}") (2) + .encode() (3) + .build(); (4) + +URI uri = uriComponents.expand("Westin", "123").toUri(); (5) +``` + +|**1**| Static factory method with a URI template. | +|-----|-----------------------------------------------------------| +|**2**| Add or replace URI components. | +|**3**|Request to have the URI template and URI variables encoded.| +|**4**| Build a `UriComponents`. | +|**5**| Expand variables and obtain the `URI`. | + +Kotlin + +``` +val uriComponents = UriComponentsBuilder + .fromUriString("https://example.com/hotels/{hotel}") (1) + .queryParam("q", "{q}") (2) + .encode() (3) + .build() (4) + +val uri = uriComponents.expand("Westin", "123").toUri() (5) +``` + +|**1**| Static factory method with a URI template. | +|-----|-----------------------------------------------------------| +|**2**| Add or replace URI components. | +|**3**|Request to have the URI template and URI variables encoded.| +|**4**| Build a `UriComponents`. | +|**5**| Expand variables and obtain the `URI`. | + +The preceding example can be consolidated into one chain and shortened with `buildAndExpand`, +as the following example shows: + +Java + +``` +URI uri = UriComponentsBuilder + .fromUriString("https://example.com/hotels/{hotel}") + .queryParam("q", "{q}") + .encode() + .buildAndExpand("Westin", "123") + .toUri(); +``` + +Kotlin + +``` +val uri = UriComponentsBuilder + .fromUriString("https://example.com/hotels/{hotel}") + .queryParam("q", "{q}") + .encode() + .buildAndExpand("Westin", "123") + .toUri() +``` + +You can shorten it further by going directly to a URI (which implies encoding), +as the following example shows: + +Java + +``` +URI uri = UriComponentsBuilder + .fromUriString("https://example.com/hotels/{hotel}") + .queryParam("q", "{q}") + .build("Westin", "123"); +``` + +Kotlin + +``` +val uri = UriComponentsBuilder + .fromUriString("https://example.com/hotels/{hotel}") + .queryParam("q", "{q}") + .build("Westin", "123") +``` + +You can shorten it further still with a full URI template, as the following example shows: + +Java + +``` +URI uri = UriComponentsBuilder + .fromUriString("https://example.com/hotels/{hotel}?q={q}") + .build("Westin", "123"); +``` + +Kotlin + +``` +val uri = UriComponentsBuilder + .fromUriString("https://example.com/hotels/{hotel}?q={q}") + .build("Westin", "123") +``` + +#### 1.6.2. UriBuilder + +Spring MVC and Spring WebFlux + +[`UriComponentsBuilder`](#web-uricomponents) implements `UriBuilder`. You can create a`UriBuilder`, in turn, with a `UriBuilderFactory`. Together, `UriBuilderFactory` and`UriBuilder` provide a pluggable mechanism to build URIs from URI templates, based on +shared configuration, such as a base URL, encoding preferences, and other details. + +You can configure `RestTemplate` and `WebClient` with a `UriBuilderFactory`to customize the preparation of URIs. `DefaultUriBuilderFactory` is a default +implementation of `UriBuilderFactory` that uses `UriComponentsBuilder` internally and +exposes shared configuration options. + +The following example shows how to configure a `RestTemplate`: + +Java + +``` +// import org.springframework.web.util.DefaultUriBuilderFactory.EncodingMode; + +String baseUrl = "https://example.org"; +DefaultUriBuilderFactory factory = new DefaultUriBuilderFactory(baseUrl); +factory.setEncodingMode(EncodingMode.TEMPLATE_AND_VALUES); + +RestTemplate restTemplate = new RestTemplate(); +restTemplate.setUriTemplateHandler(factory); +``` + +Kotlin + +``` +// import org.springframework.web.util.DefaultUriBuilderFactory.EncodingMode + +val baseUrl = "https://example.org" +val factory = DefaultUriBuilderFactory(baseUrl) +factory.encodingMode = EncodingMode.TEMPLATE_AND_VALUES + +val restTemplate = RestTemplate() +restTemplate.uriTemplateHandler = factory +``` + +The following example configures a `WebClient`: + +Java + +``` +// import org.springframework.web.util.DefaultUriBuilderFactory.EncodingMode; + +String baseUrl = "https://example.org"; +DefaultUriBuilderFactory factory = new DefaultUriBuilderFactory(baseUrl); +factory.setEncodingMode(EncodingMode.TEMPLATE_AND_VALUES); + +WebClient client = WebClient.builder().uriBuilderFactory(factory).build(); +``` + +Kotlin + +``` +// import org.springframework.web.util.DefaultUriBuilderFactory.EncodingMode + +val baseUrl = "https://example.org" +val factory = DefaultUriBuilderFactory(baseUrl) +factory.encodingMode = EncodingMode.TEMPLATE_AND_VALUES + +val client = WebClient.builder().uriBuilderFactory(factory).build() +``` + +In addition, you can also use `DefaultUriBuilderFactory` directly. It is similar to using`UriComponentsBuilder` but, instead of static factory methods, it is an actual instance +that holds configuration and preferences, as the following example shows: + +Java + +``` +String baseUrl = "https://example.com"; +DefaultUriBuilderFactory uriBuilderFactory = new DefaultUriBuilderFactory(baseUrl); + +URI uri = uriBuilderFactory.uriString("/hotels/{hotel}") + .queryParam("q", "{q}") + .build("Westin", "123"); +``` + +Kotlin + +``` +val baseUrl = "https://example.com" +val uriBuilderFactory = DefaultUriBuilderFactory(baseUrl) + +val uri = uriBuilderFactory.uriString("/hotels/{hotel}") + .queryParam("q", "{q}") + .build("Westin", "123") +``` + +#### 1.6.3. URI Encoding + +Spring MVC and Spring WebFlux + +`UriComponentsBuilder` exposes encoding options at two levels: + +* [UriComponentsBuilder#encode()](https://docs.spring.io/spring-framework/docs/5.3.16/javadoc-api/org/springframework/web/util/UriComponentsBuilder.html#encode--): + Pre-encodes the URI template first and then strictly encodes URI variables when expanded. + +* [UriComponents#encode()](https://docs.spring.io/spring-framework/docs/5.3.16/javadoc-api/org/springframework/web/util/UriComponents.html#encode--): + Encodes URI components *after* URI variables are expanded. + +Both options replace non-ASCII and illegal characters with escaped octets. However, the first option +also replaces characters with reserved meaning that appear in URI variables. + +| |Consider ";", which is legal in a path but has reserved meaning. The first option replaces<br/>";" with "%3B" in URI variables but not in the URI template. By contrast, the second option never<br/>replaces ";", since it is a legal character in a path.| +|---|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +For most cases, the first option is likely to give the expected result, because it treats URI +variables as opaque data to be fully encoded, while the second option is useful if URI +variables do intentionally contain reserved characters. The second option is also useful +when not expanding URI variables at all since that will also encode anything that +incidentally looks like a URI variable. + +The following example uses the first option: + +Java + +``` +URI uri = UriComponentsBuilder.fromPath("/hotel list/{city}") + .queryParam("q", "{q}") + .encode() + .buildAndExpand("New York", "foo+bar") + .toUri(); + +// Result is "/hotel%20list/New%20York?q=foo%2Bbar" +``` + +Kotlin + +``` +val uri = UriComponentsBuilder.fromPath("/hotel list/{city}") + .queryParam("q", "{q}") + .encode() + .buildAndExpand("New York", "foo+bar") + .toUri() + +// Result is "/hotel%20list/New%20York?q=foo%2Bbar" +``` + +You can shorten the preceding example by going directly to the URI (which implies encoding), +as the following example shows: + +Java + +``` +URI uri = UriComponentsBuilder.fromPath("/hotel list/{city}") + .queryParam("q", "{q}") + .build("New York", "foo+bar"); +``` + +Kotlin + +``` +val uri = UriComponentsBuilder.fromPath("/hotel list/{city}") + .queryParam("q", "{q}") + .build("New York", "foo+bar") +``` + +You can shorten it further still with a full URI template, as the following example shows: + +Java + +``` +URI uri = UriComponentsBuilder.fromUriString("/hotel list/{city}?q={q}") + .build("New York", "foo+bar"); +``` + +Kotlin + +``` +val uri = UriComponentsBuilder.fromUriString("/hotel list/{city}?q={q}") + .build("New York", "foo+bar") +``` + +The `WebClient` and the `RestTemplate` expand and encode URI templates internally through +the `UriBuilderFactory` strategy. Both can be configured with a custom strategy, +as the following example shows: + +Java + +``` +String baseUrl = "https://example.com"; +DefaultUriBuilderFactory factory = new DefaultUriBuilderFactory(baseUrl) +factory.setEncodingMode(EncodingMode.TEMPLATE_AND_VALUES); + +// Customize the RestTemplate.. +RestTemplate restTemplate = new RestTemplate(); +restTemplate.setUriTemplateHandler(factory); + +// Customize the WebClient.. +WebClient client = WebClient.builder().uriBuilderFactory(factory).build(); +``` + +Kotlin + +``` +val baseUrl = "https://example.com" +val factory = DefaultUriBuilderFactory(baseUrl).apply { + encodingMode = EncodingMode.TEMPLATE_AND_VALUES +} + +// Customize the RestTemplate.. +val restTemplate = RestTemplate().apply { + uriTemplateHandler = factory +} + +// Customize the WebClient.. +val client = WebClient.builder().uriBuilderFactory(factory).build() +``` + +The `DefaultUriBuilderFactory` implementation uses `UriComponentsBuilder` internally to +expand and encode URI templates. As a factory, it provides a single place to configure +the approach to encoding, based on one of the below encoding modes: + +* `TEMPLATE_AND_VALUES`: Uses `UriComponentsBuilder#encode()`, corresponding to + the first option in the earlier list, to pre-encode the URI template and strictly encode URI variables when + expanded. + +* `VALUES_ONLY`: Does not encode the URI template and, instead, applies strict encoding + to URI variables through `UriUtils#encodeUriVariables` prior to expanding them into the + template. + +* `URI_COMPONENT`: Uses `UriComponents#encode()`, corresponding to the second option in the earlier list, to + encode URI component value *after* URI variables are expanded. + +* `NONE`: No encoding is applied. + +The `RestTemplate` is set to `EncodingMode.URI_COMPONENT` for historic +reasons and for backwards compatibility. The `WebClient` relies on the default value +in `DefaultUriBuilderFactory`, which was changed from `EncodingMode.URI_COMPONENT` in +5.0.x to `EncodingMode.TEMPLATE_AND_VALUES` in 5.1. + +### 1.7. CORS + +[Web MVC](web.html#mvc-cors) + +Spring WebFlux lets you handle CORS (Cross-Origin Resource Sharing). This section +describes how to do so. + +#### 1.7.1. Introduction + +[Web MVC](web.html#mvc-cors-intro) + +For security reasons, browsers prohibit AJAX calls to resources outside the current origin. +For example, you could have your bank account in one tab and evil.com in another. Scripts +from evil.com should not be able to make AJAX requests to your bank API with your +credentials — for example, withdrawing money from your account! + +Cross-Origin Resource Sharing (CORS) is a [W3C specification](https://www.w3.org/TR/cors/)implemented by [most browsers](https://caniuse.com/#feat=cors) that lets you specify +what kind of cross-domain requests are authorized, rather than using less secure and less +powerful workarounds based on IFRAME or JSONP. + +#### 1.7.2. Processing + +[Web MVC](web.html#mvc-cors-processing) + +The CORS specification distinguishes between preflight, simple, and actual requests. +To learn how CORS works, you can read[this article](https://developer.mozilla.org/en-US/docs/Web/HTTP/CORS), among +many others, or see the specification for more details. + +Spring WebFlux `HandlerMapping` implementations provide built-in support for CORS. After successfully +mapping a request to a handler, a `HandlerMapping` checks the CORS configuration for the +given request and handler and takes further actions. Preflight requests are handled +directly, while simple and actual CORS requests are intercepted, validated, and have the +required CORS response headers set. + +In order to enable cross-origin requests (that is, the `Origin` header is present and +differs from the host of the request), you need to have some explicitly declared CORS +configuration. If no matching CORS configuration is found, preflight requests are +rejected. No CORS headers are added to the responses of simple and actual CORS requests +and, consequently, browsers reject them. + +Each `HandlerMapping` can be[configured](https://docs.spring.io/spring-framework/docs/5.3.16/javadoc-api/org/springframework/web/reactive/handler/AbstractHandlerMapping.html#setCorsConfigurations-java.util.Map-)individually with URL pattern-based `CorsConfiguration` mappings. In most cases, applications +use the WebFlux Java configuration to declare such mappings, which results in a single, +global map passed to all `HandlerMapping` implementations. + +You can combine global CORS configuration at the `HandlerMapping` level with more +fine-grained, handler-level CORS configuration. For example, annotated controllers can use +class- or method-level `@CrossOrigin` annotations (other handlers can implement`CorsConfigurationSource`). + +The rules for combining global and local configuration are generally additive — for example, +all global and all local origins. For those attributes where only a single value can be +accepted, such as `allowCredentials` and `maxAge`, the local overrides the global value. See[`CorsConfiguration#combine(CorsConfiguration)`](https://docs.spring.io/spring-framework/docs/5.3.16/javadoc-api/org/springframework/web/cors/CorsConfiguration.html#combine-org.springframework.web.cors.CorsConfiguration-)for more details. + +| |To learn more from the source or to make advanced customizations, see:<br/><br/>* `CorsConfiguration`<br/><br/>* `CorsProcessor` and `DefaultCorsProcessor`<br/><br/>* `AbstractHandlerMapping`| +|---|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +#### 1.7.3. `@CrossOrigin` + +[Web MVC](web.html#mvc-cors-controller) + +The [`@CrossOrigin`](https://docs.spring.io/spring-framework/docs/5.3.16/javadoc-api/org/springframework/web/bind/annotation/CrossOrigin.html)annotation enables cross-origin requests on annotated controller methods, as the +following example shows: + +Java + +``` +@RestController +@RequestMapping("/account") +public class AccountController { + + @CrossOrigin + @GetMapping("/{id}") + public Mono<Account> retrieve(@PathVariable Long id) { + // ... + } + + @DeleteMapping("/{id}") + public Mono<Void> remove(@PathVariable Long id) { + // ... + } +} +``` + +Kotlin + +``` +@RestController +@RequestMapping("/account") +class AccountController { + + @CrossOrigin + @GetMapping("/{id}") + suspend fun retrieve(@PathVariable id: Long): Account { + // ... + } + + @DeleteMapping("/{id}") + suspend fun remove(@PathVariable id: Long) { + // ... + } +} +``` + +By default, `@CrossOrigin` allows: + +* All origins. + +* All headers. + +* All HTTP methods to which the controller method is mapped. + +`allowCredentials` is not enabled by default, since that establishes a trust level +that exposes sensitive user-specific information (such as cookies and CSRF tokens) and +should be used only where appropriate. When it is enabled either `allowOrigins` must be +set to one or more specific domain (but not the special value `"*"`) or alternatively +the `allowOriginPatterns` property may be used to match to a dynamic set of origins. + +`maxAge` is set to 30 minutes. + +`@CrossOrigin` is supported at the class level, too, and inherited by all methods. +The following example specifies a certain domain and sets `maxAge` to an hour: + +Java + +``` +@CrossOrigin(origins = "https://domain2.com", maxAge = 3600) +@RestController +@RequestMapping("/account") +public class AccountController { + + @GetMapping("/{id}") + public Mono<Account> retrieve(@PathVariable Long id) { + // ... + } + + @DeleteMapping("/{id}") + public Mono<Void> remove(@PathVariable Long id) { + // ... + } +} +``` + +Kotlin + +``` +@CrossOrigin("https://domain2.com", maxAge = 3600) +@RestController +@RequestMapping("/account") +class AccountController { + + @GetMapping("/{id}") + suspend fun retrieve(@PathVariable id: Long): Account { + // ... + } + + @DeleteMapping("/{id}") + suspend fun remove(@PathVariable id: Long) { + // ... + } +} +``` + +You can use `@CrossOrigin` at both the class and the method level, +as the following example shows: + +Java + +``` +@CrossOrigin(maxAge = 3600) (1) +@RestController +@RequestMapping("/account") +public class AccountController { + + @CrossOrigin("https://domain2.com") (2) + @GetMapping("/{id}") + public Mono<Account> retrieve(@PathVariable Long id) { + // ... + } + + @DeleteMapping("/{id}") + public Mono<Void> remove(@PathVariable Long id) { + // ... + } +} +``` + +|**1**|Using `@CrossOrigin` at the class level. | +|-----|-----------------------------------------| +|**2**|Using `@CrossOrigin` at the method level.| + +Kotlin + +``` +@CrossOrigin(maxAge = 3600) (1) +@RestController +@RequestMapping("/account") +class AccountController { + + @CrossOrigin("https://domain2.com") (2) + @GetMapping("/{id}") + suspend fun retrieve(@PathVariable id: Long): Account { + // ... + } + + @DeleteMapping("/{id}") + suspend fun remove(@PathVariable id: Long) { + // ... + } +} +``` + +|**1**|Using `@CrossOrigin` at the class level. | +|-----|-----------------------------------------| +|**2**|Using `@CrossOrigin` at the method level.| + +#### 1.7.4. Global Configuration + +[Web MVC](web.html#mvc-cors-global) + +In addition to fine-grained, controller method-level configuration, you probably want to +define some global CORS configuration, too. You can set URL-based `CorsConfiguration`mappings individually on any `HandlerMapping`. Most applications, however, use the +WebFlux Java configuration to do that. + +By default global configuration enables the following: + +* All origins. + +* All headers. + +* `GET`, `HEAD`, and `POST` methods. + +`allowedCredentials` is not enabled by default, since that establishes a trust level +that exposes sensitive user-specific information( such as cookies and CSRF tokens) and +should be used only where appropriate. When it is enabled either `allowOrigins` must be +set to one or more specific domain (but not the special value `"*"`) or alternatively +the `allowOriginPatterns` property may be used to match to a dynamic set of origins. + +`maxAge` is set to 30 minutes. + +To enable CORS in the WebFlux Java configuration, you can use the `CorsRegistry` callback, +as the following example shows: + +Java + +``` +@Configuration +@EnableWebFlux +public class WebConfig implements WebFluxConfigurer { + + @Override + public void addCorsMappings(CorsRegistry registry) { + + registry.addMapping("/api/**") + .allowedOrigins("https://domain2.com") + .allowedMethods("PUT", "DELETE") + .allowedHeaders("header1", "header2", "header3") + .exposedHeaders("header1", "header2") + .allowCredentials(true).maxAge(3600); + + // Add more mappings... + } +} +``` + +Kotlin + +``` +@Configuration +@EnableWebFlux +class WebConfig : WebFluxConfigurer { + + override fun addCorsMappings(registry: CorsRegistry) { + + registry.addMapping("/api/**") + .allowedOrigins("https://domain2.com") + .allowedMethods("PUT", "DELETE") + .allowedHeaders("header1", "header2", "header3") + .exposedHeaders("header1", "header2") + .allowCredentials(true).maxAge(3600) + + // Add more mappings... + } +} +``` + +#### 1.7.5. CORS `WebFilter` + +[Web MVC](web.html#mvc-cors-filter) + +You can apply CORS support through the built-in[`CorsWebFilter`](https://docs.spring.io/spring-framework/docs/5.3.16/javadoc-api/org/springframework/web/cors/reactive/CorsWebFilter.html), which is a +good fit with [functional endpoints](#webflux-fn). + +| |If you try to use the `CorsFilter` with Spring Security, keep in mind that Spring<br/>Security has[built-in support](https://docs.spring.io/spring-security/site/docs/current/reference/htmlsingle/#cors)for CORS.| +|---|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +To configure the filter, you can declare a `CorsWebFilter` bean and pass a`CorsConfigurationSource` to its constructor, as the following example shows: + +Java + +``` +@Bean +CorsWebFilter corsFilter() { + + CorsConfiguration config = new CorsConfiguration(); + + // Possibly... + // config.applyPermitDefaultValues() + + config.setAllowCredentials(true); + config.addAllowedOrigin("https://domain1.com"); + config.addAllowedHeader("*"); + config.addAllowedMethod("*"); + + UrlBasedCorsConfigurationSource source = new UrlBasedCorsConfigurationSource(); + source.registerCorsConfiguration("/**", config); + + return new CorsWebFilter(source); +} +``` + +Kotlin + +``` +@Bean +fun corsFilter(): CorsWebFilter { + + val config = CorsConfiguration() + + // Possibly... + // config.applyPermitDefaultValues() + + config.allowCredentials = true + config.addAllowedOrigin("https://domain1.com") + config.addAllowedHeader("*") + config.addAllowedMethod("*") + + val source = UrlBasedCorsConfigurationSource().apply { + registerCorsConfiguration("/**", config) + } + return CorsWebFilter(source) +} +``` + +### 1.8. Web Security + +[Web MVC](web.html#mvc-web-security) + +The [Spring Security](https://projects.spring.io/spring-security/) project provides support +for protecting web applications from malicious exploits. See the Spring Security +reference documentation, including: + +* [WebFlux Security](https://docs.spring.io/spring-security/site/docs/current/reference/html5/#jc-webflux) + +* [WebFlux Testing Support](https://docs.spring.io/spring-security/site/docs/current/reference/html5/#test-webflux) + +* [CSRF Protection](https://docs.spring.io/spring-security/site/docs/current/reference/html5/#csrf) + +* [Security Response Headers](https://docs.spring.io/spring-security/site/docs/current/reference/html5/#headers) + +### 1.9. View Technologies + +[Web MVC](web.html#mvc-view) + +The use of view technologies in Spring WebFlux is pluggable. Whether you decide to +use Thymeleaf, FreeMarker, or some other view technology is primarily a matter of a +configuration change. This chapter covers the view technologies integrated with Spring +WebFlux. We assume you are already familiar with [View Resolution](#webflux-viewresolution). + +#### 1.9.1. Thymeleaf + +[Web MVC](web.html#mvc-view-thymeleaf) + +Thymeleaf is a modern server-side Java template engine that emphasizes natural HTML +templates that can be previewed in a browser by double-clicking, which is very +helpful for independent work on UI templates (for example, by a designer) without the need for a +running server. Thymeleaf offers an extensive set of features, and it is actively developed +and maintained. For a more complete introduction, see the[Thymeleaf](https://www.thymeleaf.org/) project home page. + +The Thymeleaf integration with Spring WebFlux is managed by the Thymeleaf project. The +configuration involves a few bean declarations, such as`SpringResourceTemplateResolver`, `SpringWebFluxTemplateEngine`, and`ThymeleafReactiveViewResolver`. For more details, see[Thymeleaf+Spring](https://www.thymeleaf.org/documentation.html) and the WebFlux integration[announcement](http://forum.thymeleaf.org/Thymeleaf-3-0-8-JUST-PUBLISHED-td4030687.html). + +#### 1.9.2. FreeMarker + +[Web MVC](web.html#mvc-view-freemarker) + +[Apache FreeMarker](https://freemarker.apache.org/) is a template engine for generating any +kind of text output from HTML to email and others. The Spring Framework has built-in +integration for using Spring WebFlux with FreeMarker templates. + +##### View Configuration + +[Web MVC](web.html#mvc-view-freemarker-contextconfig) + +The following example shows how to configure FreeMarker as a view technology: + +Java + +``` +@Configuration +@EnableWebFlux +public class WebConfig implements WebFluxConfigurer { + + @Override + public void configureViewResolvers(ViewResolverRegistry registry) { + registry.freeMarker(); + } + + // Configure FreeMarker... + + @Bean + public FreeMarkerConfigurer freeMarkerConfigurer() { + FreeMarkerConfigurer configurer = new FreeMarkerConfigurer(); + configurer.setTemplateLoaderPath("classpath:/templates/freemarker"); + return configurer; + } +} +``` + +Kotlin + +``` +@Configuration +@EnableWebFlux +class WebConfig : WebFluxConfigurer { + + override fun configureViewResolvers(registry: ViewResolverRegistry) { + registry.freeMarker() + } + + // Configure FreeMarker... + + @Bean + fun freeMarkerConfigurer() = FreeMarkerConfigurer().apply { + setTemplateLoaderPath("classpath:/templates/freemarker") + } +} +``` + +Your templates need to be stored in the directory specified by the `FreeMarkerConfigurer`, +shown in the preceding example. Given the preceding configuration, if your controller +returns the view name, `welcome`, the resolver looks for the`classpath:/templates/freemarker/welcome.ftl` template. + +##### FreeMarker Configuration + +[Web MVC](web.html#mvc-views-freemarker) + +You can pass FreeMarker 'Settings' and 'SharedVariables' directly to the FreeMarker`Configuration` object (which is managed by Spring) by setting the appropriate bean +properties on the `FreeMarkerConfigurer` bean. The `freemarkerSettings` property requires +a `java.util.Properties` object, and the `freemarkerVariables` property requires a`java.util.Map`. The following example shows how to use a `FreeMarkerConfigurer`: + +Java + +``` +@Configuration +@EnableWebFlux +public class WebConfig implements WebFluxConfigurer { + + // ... + + @Bean + public FreeMarkerConfigurer freeMarkerConfigurer() { + Map<String, Object> variables = new HashMap<>(); + variables.put("xml_escape", new XmlEscape()); + + FreeMarkerConfigurer configurer = new FreeMarkerConfigurer(); + configurer.setTemplateLoaderPath("classpath:/templates"); + configurer.setFreemarkerVariables(variables); + return configurer; + } +} +``` + +Kotlin + +``` +@Configuration +@EnableWebFlux +class WebConfig : WebFluxConfigurer { + + // ... + + @Bean + fun freeMarkerConfigurer() = FreeMarkerConfigurer().apply { + setTemplateLoaderPath("classpath:/templates") + setFreemarkerVariables(mapOf("xml_escape" to XmlEscape())) + } +} +``` + +See the FreeMarker documentation for details of settings and variables as they apply to +the `Configuration` object. + +##### Form Handling + +[Web MVC](web.html#mvc-view-freemarker-forms) + +Spring provides a tag library for use in JSPs that contains, among others, a`<spring:bind/>` element. This element primarily lets forms display values from +form-backing objects and show the results of failed validations from a `Validator` in the +web or business tier. Spring also has support for the same functionality in FreeMarker, +with additional convenience macros for generating form input elements themselves. + +##### The Bind Macros# + +[Web MVC](web.html#mvc-view-bind-macros) + +A standard set of macros are maintained within the `spring-webflux.jar` file for +FreeMarker, so they are always available to a suitably configured application. + +Some of the macros defined in the Spring templating libraries are considered internal +(private), but no such scoping exists in the macro definitions, making all macros visible +to calling code and user templates. The following sections concentrate only on the macros +you need to directly call from within your templates. If you wish to view the macro code +directly, the file is called `spring.ftl` and is in the`org.springframework.web.reactive.result.view.freemarker` package. + +For additional details on binding support, see [Simple +Binding](web.html#mvc-view-simple-binding) for Spring MVC. + +##### Form Macros# + +For details on Spring’s form macro support for FreeMarker templates, consult the following +sections of the Spring MVC documentation. + +* [Input Macros](web.html#mvc-views-form-macros) + +* [Input Fields](web.html#mvc-views-form-macros-input) + +* [Selection Fields](web.html#mvc-views-form-macros-select) + +* [HTML Escaping](web.html#mvc-views-form-macros-html-escaping) + +#### 1.9.3. Script Views + +[Web MVC](web.html#mvc-view-script) + +The Spring Framework has a built-in integration for using Spring WebFlux with any +templating library that can run on top of the[JSR-223](https://www.jcp.org/en/jsr/detail?id=223) Java scripting engine. +The following table shows the templating libraries that we have tested on different script engines: + +| Scripting Library | Scripting Engine | +|----------------------------------------------------------------------------------|-----------------------------------------------------| +| [Handlebars](https://handlebarsjs.com/) |[Nashorn](https://openjdk.java.net/projects/nashorn/)| +| [Mustache](https://mustache.github.io/) |[Nashorn](https://openjdk.java.net/projects/nashorn/)| +| [React](https://facebook.github.io/react/) |[Nashorn](https://openjdk.java.net/projects/nashorn/)| +| [EJS](https://www.embeddedjs.com/) |[Nashorn](https://openjdk.java.net/projects/nashorn/)| +| [ERB](https://www.stuartellis.name/articles/erb/) | [JRuby](https://www.jruby.org) | +|[String templates](https://docs.python.org/2/library/string.html#template-strings)| [Jython](https://www.jython.org/) | +| [Kotlin Script templating](https://github.com/sdeleuze/kotlin-script-templating) | [Kotlin](https://kotlinlang.org/) | + +| |The basic rule for integrating any other script engine is that it must implement the`ScriptEngine` and `Invocable` interfaces.| +|---|------------------------------------------------------------------------------------------------------------------------------| + +##### Requirements + +[Web MVC](web.html#mvc-view-script-dependencies) + +You need to have the script engine on your classpath, the details of which vary by script engine: + +* The [Nashorn](https://openjdk.java.net/projects/nashorn/) JavaScript engine is provided with + Java 8+. Using the latest update release available is highly recommended. + +* [JRuby](https://www.jruby.org) should be added as a dependency for Ruby support. + +* [Jython](https://www.jython.org) should be added as a dependency for Python support. + +* `org.jetbrains.kotlin:kotlin-script-util` dependency and a `META-INF/services/javax.script.ScriptEngineFactory`file containing a `org.jetbrains.kotlin.script.jsr223.KotlinJsr223JvmLocalScriptEngineFactory`line should be added for Kotlin script support. See[this example](https://github.com/sdeleuze/kotlin-script-templating) for more detail. + +You need to have the script templating library. One way to do that for JavaScript is +through [WebJars](https://www.webjars.org/). + +##### Script Templates + +[Web MVC](web.html#mvc-view-script-integrate) + +You can declare a `ScriptTemplateConfigurer` bean to specify the script engine to use, +the script files to load, what function to call to render templates, and so on. +The following example uses Mustache templates and the Nashorn JavaScript engine: + +Java + +``` +@Configuration +@EnableWebFlux +public class WebConfig implements WebFluxConfigurer { + + @Override + public void configureViewResolvers(ViewResolverRegistry registry) { + registry.scriptTemplate(); + } + + @Bean + public ScriptTemplateConfigurer configurer() { + ScriptTemplateConfigurer configurer = new ScriptTemplateConfigurer(); + configurer.setEngineName("nashorn"); + configurer.setScripts("mustache.js"); + configurer.setRenderObject("Mustache"); + configurer.setRenderFunction("render"); + return configurer; + } +} +``` + +Kotlin + +``` +@Configuration +@EnableWebFlux +class WebConfig : WebFluxConfigurer { + + override fun configureViewResolvers(registry: ViewResolverRegistry) { + registry.scriptTemplate() + } + + @Bean + fun configurer() = ScriptTemplateConfigurer().apply { + engineName = "nashorn" + setScripts("mustache.js") + renderObject = "Mustache" + renderFunction = "render" + } +} +``` + +The `render` function is called with the following parameters: + +* `String template`: The template content + +* `Map model`: The view model + +* `RenderingContext renderingContext`: The[`RenderingContext`](https://docs.spring.io/spring-framework/docs/5.3.16/javadoc-api/org/springframework/web/servlet/view/script/RenderingContext.html)that gives access to the application context, the locale, the template loader, and the + URL (since 5.0) + +`Mustache.render()` is natively compatible with this signature, so you can call it directly. + +If your templating technology requires some customization, you can provide a script that +implements a custom render function. For example, [Handlerbars](https://handlebarsjs.com)needs to compile templates before using them and requires a[polyfill](https://en.wikipedia.org/wiki/Polyfill) in order to emulate some +browser facilities not available in the server-side script engine. +The following example shows how to set a custom render function: + +Java + +``` +@Configuration +@EnableWebFlux +public class WebConfig implements WebFluxConfigurer { + + @Override + public void configureViewResolvers(ViewResolverRegistry registry) { + registry.scriptTemplate(); + } + + @Bean + public ScriptTemplateConfigurer configurer() { + ScriptTemplateConfigurer configurer = new ScriptTemplateConfigurer(); + configurer.setEngineName("nashorn"); + configurer.setScripts("polyfill.js", "handlebars.js", "render.js"); + configurer.setRenderFunction("render"); + configurer.setSharedEngine(false); + return configurer; + } +} +``` + +Kotlin + +``` +@Configuration +@EnableWebFlux +class WebConfig : WebFluxConfigurer { + + override fun configureViewResolvers(registry: ViewResolverRegistry) { + registry.scriptTemplate() + } + + @Bean + fun configurer() = ScriptTemplateConfigurer().apply { + engineName = "nashorn" + setScripts("polyfill.js", "handlebars.js", "render.js") + renderFunction = "render" + isSharedEngine = false + } +} +``` + +| |Setting the `sharedEngine` property to `false` is required when using non-thread-safe<br/>script engines with templating libraries not designed for concurrency, such as Handlebars or<br/>React running on Nashorn. In that case, Java SE 8 update 60 is required, due to[this bug](https://bugs.openjdk.java.net/browse/JDK-8076099), but it is generally<br/>recommended to use a recent Java SE patch release in any case.| +|---|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +`polyfill.js` defines only the `window` object needed by Handlebars to run properly, +as the following snippet shows: + +``` +var window = {}; +``` + +This basic `render.js` implementation compiles the template before using it. A production +ready implementation should also store and reused cached templates or pre-compiled templates. +This can be done on the script side, as well as any customization you need (managing +template engine configuration for example). +The following example shows how compile a template: + +``` +function render(template, model) { + var compiledTemplate = Handlebars.compile(template); + return compiledTemplate(model); +} +``` + +Check out the Spring Framework unit tests,[Java](https://github.com/spring-projects/spring-framework/tree/main/spring-webflux/src/test/java/org/springframework/web/reactive/result/view/script), and[resources](https://github.com/spring-projects/spring-framework/tree/main/spring-webflux/src/test/resources/org/springframework/web/reactive/result/view/script), +for more configuration examples. + +#### 1.9.4. JSON and XML + +[Web MVC](web.html#mvc-view-jackson) + +For [Content Negotiation](#webflux-multiple-representations) purposes, it is useful to be able to alternate +between rendering a model with an HTML template or as other formats (such as JSON or XML), +depending on the content type requested by the client. To support doing so, Spring WebFlux +provides the `HttpMessageWriterView`, which you can use to plug in any of the available[Codecs](#webflux-codecs) from `spring-web`, such as `Jackson2JsonEncoder`, `Jackson2SmileEncoder`, +or `Jaxb2XmlEncoder`. + +Unlike other view technologies, `HttpMessageWriterView` does not require a `ViewResolver`but is instead [configured](#webflux-config-view-resolvers) as a default view. You can +configure one or more such default views, wrapping different `HttpMessageWriter` instances +or `Encoder` instances. The one that matches the requested content type is used at runtime. + +In most cases, a model contains multiple attributes. To determine which one to serialize, +you can configure `HttpMessageWriterView` with the name of the model attribute to use for +rendering. If the model contains only one attribute, that one is used. + +### 1.10. HTTP Caching + +[Web MVC](web.html#mvc-caching) + +HTTP caching can significantly improve the performance of a web application. HTTP caching +revolves around the `Cache-Control` response header and subsequent conditional request +headers, such as `Last-Modified` and `ETag`. `Cache-Control` advises private (for example, browser) +and public (for example, proxy) caches how to cache and re-use responses. An `ETag` header is used +to make a conditional request that may result in a 304 (NOT\_MODIFIED) without a body, +if the content has not changed. `ETag` can be seen as a more sophisticated successor to +the `Last-Modified` header. + +This section describes the HTTP caching related options available in Spring WebFlux. + +#### 1.10.1. `CacheControl` + +[Web MVC](web.html#mvc-caching-cachecontrol) + +[`CacheControl`](https://docs.spring.io/spring-framework/docs/5.3.16/javadoc-api/org/springframework/http/CacheControl.html) provides support for +configuring settings related to the `Cache-Control` header and is accepted as an argument +in a number of places: + +* [Controllers](#webflux-caching-etag-lastmodified) + +* [Static Resources](#webflux-caching-static-resources) + +While [RFC 7234](https://tools.ietf.org/html/rfc7234#section-5.2.2) describes all possible +directives for the `Cache-Control` response header, the `CacheControl` type takes a +use case-oriented approach that focuses on the common scenarios, as the following example shows: + +Java + +``` +// Cache for an hour - "Cache-Control: max-age=3600" +CacheControl ccCacheOneHour = CacheControl.maxAge(1, TimeUnit.HOURS); + +// Prevent caching - "Cache-Control: no-store" +CacheControl ccNoStore = CacheControl.noStore(); + +// Cache for ten days in public and private caches, +// public caches should not transform the response +// "Cache-Control: max-age=864000, public, no-transform" +CacheControl ccCustom = CacheControl.maxAge(10, TimeUnit.DAYS).noTransform().cachePublic(); +``` + +Kotlin + +``` +// Cache for an hour - "Cache-Control: max-age=3600" +val ccCacheOneHour = CacheControl.maxAge(1, TimeUnit.HOURS) + +// Prevent caching - "Cache-Control: no-store" +val ccNoStore = CacheControl.noStore() + +// Cache for ten days in public and private caches, +// public caches should not transform the response +// "Cache-Control: max-age=864000, public, no-transform" +val ccCustom = CacheControl.maxAge(10, TimeUnit.DAYS).noTransform().cachePublic() +``` + +#### 1.10.2. Controllers + +[Web MVC](web.html#mvc-caching-etag-lastmodified) + +Controllers can add explicit support for HTTP caching. We recommend doing so, since the`lastModified` or `ETag` value for a resource needs to be calculated before it can be compared +against conditional request headers. A controller can add an `ETag` and `Cache-Control`settings to a `ResponseEntity`, as the following example shows: + +Java + +``` +@GetMapping("/book/{id}") +public ResponseEntity<Book> showBook(@PathVariable Long id) { + + Book book = findBook(id); + String version = book.getVersion(); + + return ResponseEntity + .ok() + .cacheControl(CacheControl.maxAge(30, TimeUnit.DAYS)) + .eTag(version) // lastModified is also available + .body(book); +} +``` + +Kotlin + +``` +@GetMapping("/book/{id}") +fun showBook(@PathVariable id: Long): ResponseEntity<Book> { + + val book = findBook(id) + val version = book.getVersion() + + return ResponseEntity + .ok() + .cacheControl(CacheControl.maxAge(30, TimeUnit.DAYS)) + .eTag(version) // lastModified is also available + .body(book) +} +``` + +The preceding example sends a 304 (NOT\_MODIFIED) response with an empty body if the comparison +to the conditional request headers indicates the content has not changed. Otherwise, the`ETag` and `Cache-Control` headers are added to the response. + +You can also make the check against conditional request headers in the controller, +as the following example shows: + +Java + +``` +@RequestMapping +public String myHandleMethod(ServerWebExchange exchange, Model model) { + + long eTag = ... (1) + + if (exchange.checkNotModified(eTag)) { + return null; (2) + } + + model.addAttribute(...); (3) + return "myViewName"; +} +``` + +|**1**| Application-specific calculation. | +|-----|--------------------------------------------------------------------| +|**2**|Response has been set to 304 (NOT\_MODIFIED). No further processing.| +|**3**| Continue with request processing. | + +Kotlin + +``` +@RequestMapping +fun myHandleMethod(exchange: ServerWebExchange, model: Model): String? { + + val eTag: Long = ... (1) + + if (exchange.checkNotModified(eTag)) { + return null(2) + } + + model.addAttribute(...) (3) + return "myViewName" +} +``` + +|**1**| Application-specific calculation. | +|-----|--------------------------------------------------------------------| +|**2**|Response has been set to 304 (NOT\_MODIFIED). No further processing.| +|**3**| Continue with request processing. | + +There are three variants for checking conditional requests against `eTag` values, `lastModified`values, or both. For conditional `GET` and `HEAD` requests, you can set the response to +304 (NOT\_MODIFIED). For conditional `POST`, `PUT`, and `DELETE`, you can instead set the response +to 412 (PRECONDITION\_FAILED) to prevent concurrent modification. + +#### 1.10.3. Static Resources + +[Web MVC](web.html#mvc-caching-static-resources) + +You should serve static resources with a `Cache-Control` and conditional response headers +for optimal performance. See the section on configuring [Static Resources](#webflux-config-static-resources). + +### 1.11. WebFlux Config + +[Web MVC](web.html#mvc-config) + +The WebFlux Java configuration declares the components that are required to process +requests with annotated controllers or functional endpoints, and it offers an API to +customize the configuration. That means you do not need to understand the underlying +beans created by the Java configuration. However, if you want to understand them, +you can see them in `WebFluxConfigurationSupport` or read more about what they are +in [Special Bean Types](#webflux-special-bean-types). + +For more advanced customizations, not available in the configuration API, you can +gain full control over the configuration through the[Advanced Configuration Mode](#webflux-config-advanced-java). + +#### 1.11.1. Enabling WebFlux Config + +[Web MVC](web.html#mvc-config-enable) + +You can use the `@EnableWebFlux` annotation in your Java config, as the following example shows: + +Java + +``` +@Configuration +@EnableWebFlux +public class WebConfig { +} +``` + +Kotlin + +``` +@Configuration +@EnableWebFlux +class WebConfig +``` + +The preceding example registers a number of Spring WebFlux[infrastructure beans](#webflux-special-bean-types) and adapts to dependencies +available on the classpath — for JSON, XML, and others. + +#### 1.11.2. WebFlux config API + +[Web MVC](web.html#mvc-config-customize) + +In your Java configuration, you can implement the `WebFluxConfigurer` interface, +as the following example shows: + +Java + +``` +@Configuration +@EnableWebFlux +public class WebConfig implements WebFluxConfigurer { + + // Implement configuration methods... +} +``` + +Kotlin + +``` +@Configuration +@EnableWebFlux +class WebConfig : WebFluxConfigurer { + + // Implement configuration methods... +} +``` + +#### 1.11.3. Conversion, formatting + +[Web MVC](web.html#mvc-config-conversion) + +By default, formatters for various number and date types are installed, along with support +for customization via `@NumberFormat` and `@DateTimeFormat` on fields. + +To register custom formatters and converters in Java config, use the following: + +Java + +``` +@Configuration +@EnableWebFlux +public class WebConfig implements WebFluxConfigurer { + + @Override + public void addFormatters(FormatterRegistry registry) { + // ... + } + +} +``` + +Kotlin + +``` +@Configuration +@EnableWebFlux +class WebConfig : WebFluxConfigurer { + + override fun addFormatters(registry: FormatterRegistry) { + // ... + } +} +``` + +By default Spring WebFlux considers the request Locale when parsing and formatting date +values. This works for forms where dates are represented as Strings with "input" form +fields. For "date" and "time" form fields, however, browsers use a fixed format defined +in the HTML spec. For such cases date and time formatting can be customized as follows: + +Java + +``` +@Configuration +@EnableWebFlux +public class WebConfig implements WebFluxConfigurer { + + @Override + public void addFormatters(FormatterRegistry registry) { + DateTimeFormatterRegistrar registrar = new DateTimeFormatterRegistrar(); + registrar.setUseIsoFormat(true); + registrar.registerFormatters(registry); + } +} +``` + +Kotlin + +``` +@Configuration +@EnableWebFlux +class WebConfig : WebFluxConfigurer { + + override fun addFormatters(registry: FormatterRegistry) { + val registrar = DateTimeFormatterRegistrar() + registrar.setUseIsoFormat(true) + registrar.registerFormatters(registry) + } +} +``` + +| |See [`FormatterRegistrar` SPI](core.html#format-FormatterRegistrar-SPI)and the `FormattingConversionServiceFactoryBean` for more information on when to<br/>use `FormatterRegistrar` implementations.| +|---|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +#### 1.11.4. Validation + +[Web MVC](web.html#mvc-config-validation) + +By default, if [Bean Validation](core.html#validation-beanvalidation-overview) is present +on the classpath (for example, the Hibernate Validator), the `LocalValidatorFactoryBean`is registered as a global [validator](core.html#validator) for use with `@Valid` and`@Validated` on `@Controller` method arguments. + +In your Java configuration, you can customize the global `Validator` instance, +as the following example shows: + +Java + +``` +@Configuration +@EnableWebFlux +public class WebConfig implements WebFluxConfigurer { + + @Override + public Validator getValidator() { + // ... + } + +} +``` + +Kotlin + +``` +@Configuration +@EnableWebFlux +class WebConfig : WebFluxConfigurer { + + override fun getValidator(): Validator { + // ... + } + +} +``` + +Note that you can also register `Validator` implementations locally, +as the following example shows: + +Java + +``` +@Controller +public class MyController { + + @InitBinder + protected void initBinder(WebDataBinder binder) { + binder.addValidators(new FooValidator()); + } + +} +``` + +Kotlin + +``` +@Controller +class MyController { + + @InitBinder + protected fun initBinder(binder: WebDataBinder) { + binder.addValidators(FooValidator()) + } +} +``` + +| |If you need to have a `LocalValidatorFactoryBean` injected somewhere, create a bean and<br/>mark it with `@Primary` in order to avoid conflict with the one declared in the MVC config.| +|---|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +#### 1.11.5. Content Type Resolvers + +[Web MVC](web.html#mvc-config-content-negotiation) + +You can configure how Spring WebFlux determines the requested media types for`@Controller` instances from the request. By default, only the `Accept` header is checked, +but you can also enable a query parameter-based strategy. + +The following example shows how to customize the requested content type resolution: + +Java + +``` +@Configuration +@EnableWebFlux +public class WebConfig implements WebFluxConfigurer { + + @Override + public void configureContentTypeResolver(RequestedContentTypeResolverBuilder builder) { + // ... + } +} +``` + +Kotlin + +``` +@Configuration +@EnableWebFlux +class WebConfig : WebFluxConfigurer { + + override fun configureContentTypeResolver(builder: RequestedContentTypeResolverBuilder) { + // ... + } +} +``` + +#### 1.11.6. HTTP message codecs + +[Web MVC](web.html#mvc-config-message-converters) + +The following example shows how to customize how the request and response body are read and written: + +Java + +``` +@Configuration +@EnableWebFlux +public class WebConfig implements WebFluxConfigurer { + + @Override + public void configureHttpMessageCodecs(ServerCodecConfigurer configurer) { + configurer.defaultCodecs().maxInMemorySize(512 * 1024); + } +} +``` + +Kotlin + +``` +@Configuration +@EnableWebFlux +class WebConfig : WebFluxConfigurer { + + override fun configureHttpMessageCodecs(configurer: ServerCodecConfigurer) { + // ... + } +} +``` + +`ServerCodecConfigurer` provides a set of default readers and writers. You can use it to add +more readers and writers, customize the default ones, or replace the default ones completely. + +For Jackson JSON and XML, consider using[`Jackson2ObjectMapperBuilder`](https://docs.spring.io/spring-framework/docs/5.3.16/javadoc-api/org/springframework/http/converter/json/Jackson2ObjectMapperBuilder.html), +which customizes Jackson’s default properties with the following ones: + +* [`DeserializationFeature.FAIL_ON_UNKNOWN_PROPERTIES`](https://fasterxml.github.io/jackson-databind/javadoc/2.6/com/fasterxml/jackson/databind/DeserializationFeature.html#FAIL_ON_UNKNOWN_PROPERTIES) is disabled. + +* [`MapperFeature.DEFAULT_VIEW_INCLUSION`](https://fasterxml.github.io/jackson-databind/javadoc/2.6/com/fasterxml/jackson/databind/MapperFeature.html#DEFAULT_VIEW_INCLUSION) is disabled. + +It also automatically registers the following well-known modules if they are detected on the classpath: + +* [`jackson-datatype-joda`](https://github.com/FasterXML/jackson-datatype-joda): Support for Joda-Time types. + +* [`jackson-datatype-jsr310`](https://github.com/FasterXML/jackson-datatype-jsr310): Support for Java 8 Date and Time API types. + +* [`jackson-datatype-jdk8`](https://github.com/FasterXML/jackson-datatype-jdk8): Support for other Java 8 types, such as `Optional`. + +* [`jackson-module-kotlin`](https://github.com/FasterXML/jackson-module-kotlin): Support for Kotlin classes and data classes. + +#### 1.11.7. View Resolvers + +[Web MVC](web.html#mvc-config-view-resolvers) + +The following example shows how to configure view resolution: + +Java + +``` +@Configuration +@EnableWebFlux +public class WebConfig implements WebFluxConfigurer { + + @Override + public void configureViewResolvers(ViewResolverRegistry registry) { + // ... + } +} +``` + +Kotlin + +``` +@Configuration +@EnableWebFlux +class WebConfig : WebFluxConfigurer { + + override fun configureViewResolvers(registry: ViewResolverRegistry) { + // ... + } +} +``` + +The `ViewResolverRegistry` has shortcuts for view technologies with which the Spring Framework +integrates. The following example uses FreeMarker (which also requires configuring the +underlying FreeMarker view technology): + +Java + +``` +@Configuration +@EnableWebFlux +public class WebConfig implements WebFluxConfigurer { + + @Override + public void configureViewResolvers(ViewResolverRegistry registry) { + registry.freeMarker(); + } + + // Configure Freemarker... + + @Bean + public FreeMarkerConfigurer freeMarkerConfigurer() { + FreeMarkerConfigurer configurer = new FreeMarkerConfigurer(); + configurer.setTemplateLoaderPath("classpath:/templates"); + return configurer; + } +} +``` + +Kotlin + +``` +@Configuration +@EnableWebFlux +class WebConfig : WebFluxConfigurer { + + override fun configureViewResolvers(registry: ViewResolverRegistry) { + registry.freeMarker() + } + + // Configure Freemarker... + + @Bean + fun freeMarkerConfigurer() = FreeMarkerConfigurer().apply { + setTemplateLoaderPath("classpath:/templates") + } +} +``` + +You can also plug in any `ViewResolver` implementation, as the following example shows: + +Java + +``` +@Configuration +@EnableWebFlux +public class WebConfig implements WebFluxConfigurer { + + @Override + public void configureViewResolvers(ViewResolverRegistry registry) { + ViewResolver resolver = ... ; + registry.viewResolver(resolver); + } +} +``` + +Kotlin + +``` +@Configuration +@EnableWebFlux +class WebConfig : WebFluxConfigurer { + + override fun configureViewResolvers(registry: ViewResolverRegistry) { + val resolver: ViewResolver = ... + registry.viewResolver(resolver + } +} +``` + +To support [Content Negotiation](#webflux-multiple-representations) and rendering other formats +through view resolution (besides HTML), you can configure one or more default views based +on the `HttpMessageWriterView` implementation, which accepts any of the available[Codecs](#webflux-codecs) from `spring-web`. The following example shows how to do so: + +Java + +``` +@Configuration +@EnableWebFlux +public class WebConfig implements WebFluxConfigurer { + + @Override + public void configureViewResolvers(ViewResolverRegistry registry) { + registry.freeMarker(); + + Jackson2JsonEncoder encoder = new Jackson2JsonEncoder(); + registry.defaultViews(new HttpMessageWriterView(encoder)); + } + + // ... +} +``` + +Kotlin + +``` +@Configuration +@EnableWebFlux +class WebConfig : WebFluxConfigurer { + + override fun configureViewResolvers(registry: ViewResolverRegistry) { + registry.freeMarker() + + val encoder = Jackson2JsonEncoder() + registry.defaultViews(HttpMessageWriterView(encoder)) + } + + // ... +} +``` + +See [View Technologies](#webflux-view) for more on the view technologies that are integrated with Spring WebFlux. + +#### 1.11.8. Static Resources + +[Web MVC](web.html#mvc-config-static-resources) + +This option provides a convenient way to serve static resources from a list of[`Resource`](https://docs.spring.io/spring-framework/docs/5.3.16/javadoc-api/org/springframework/core/io/Resource.html)-based locations. + +In the next example, given a request that starts with `/resources`, the relative path is +used to find and serve static resources relative to `/static` on the classpath. Resources +are served with a one-year future expiration to ensure maximum use of the browser cache +and a reduction in HTTP requests made by the browser. The `Last-Modified` header is also +evaluated and, if present, a `304` status code is returned. The following list shows +the example: + +Java + +``` +@Configuration +@EnableWebFlux +public class WebConfig implements WebFluxConfigurer { + + @Override + public void addResourceHandlers(ResourceHandlerRegistry registry) { + registry.addResourceHandler("/resources/**") + .addResourceLocations("/public", "classpath:/static/") + .setCacheControl(CacheControl.maxAge(365, TimeUnit.DAYS)); + } + +} +``` + +Kotlin + +``` +@Configuration +@EnableWebFlux +class WebConfig : WebFluxConfigurer { + + override fun addResourceHandlers(registry: ResourceHandlerRegistry) { + registry.addResourceHandler("/resources/**") + .addResourceLocations("/public", "classpath:/static/") + .setCacheControl(CacheControl.maxAge(365, TimeUnit.DAYS)) + } +} +``` + +The resource handler also supports a chain of[`ResourceResolver`](https://docs.spring.io/spring-framework/docs/5.3.16/javadoc-api/org/springframework/web/reactive/resource/ResourceResolver.html) implementations and[`ResourceTransformer`](https://docs.spring.io/spring-framework/docs/5.3.16/javadoc-api/org/springframework/web/reactive/resource/ResourceTransformer.html) implementations, +which can be used to create a toolchain for working with optimized resources. + +You can use the `VersionResourceResolver` for versioned resource URLs based on an MD5 hash +computed from the content, a fixed application version, or other information. A`ContentVersionStrategy` (MD5 hash) is a good choice with some notable exceptions (such as +JavaScript resources used with a module loader). + +The following example shows how to use `VersionResourceResolver` in your Java configuration: + +Java + +``` +@Configuration +@EnableWebFlux +public class WebConfig implements WebFluxConfigurer { + + @Override + public void addResourceHandlers(ResourceHandlerRegistry registry) { + registry.addResourceHandler("/resources/**") + .addResourceLocations("/public/") + .resourceChain(true) + .addResolver(new VersionResourceResolver().addContentVersionStrategy("/**")); + } + +} +``` + +Kotlin + +``` +@Configuration +@EnableWebFlux +class WebConfig : WebFluxConfigurer { + + override fun addResourceHandlers(registry: ResourceHandlerRegistry) { + registry.addResourceHandler("/resources/**") + .addResourceLocations("/public/") + .resourceChain(true) + .addResolver(VersionResourceResolver().addContentVersionStrategy("/**")) + } + +} +``` + +You can use `ResourceUrlProvider` to rewrite URLs and apply the full chain of resolvers and +transformers (for example, to insert versions). The WebFlux configuration provides a `ResourceUrlProvider`so that it can be injected into others. + +Unlike Spring MVC, at present, in WebFlux, there is no way to transparently rewrite static +resource URLs, since there are no view technologies that can make use of a non-blocking chain +of resolvers and transformers. When serving only local resources, the workaround is to use`ResourceUrlProvider` directly (for example, through a custom element) and block. + +Note that, when using both `EncodedResourceResolver` (for example, Gzip, Brotli encoded) and`VersionedResourceResolver`, they must be registered in that order, to ensure content-based +versions are always computed reliably based on the unencoded file. + +[WebJars](https://www.webjars.org/documentation) are also supported through the`WebJarsResourceResolver` which is automatically registered when the`org.webjars:webjars-locator-core` library is present on the classpath. The resolver can +re-write URLs to include the version of the jar and can also match against incoming URLs +without versions — for example, from `/jquery/jquery.min.js` to`/jquery/1.2.0/jquery.min.js`. + +| |The Java configuration based on `ResourceHandlerRegistry` provides further options<br/>for fine-grained control, e.g. last-modified behavior and optimized resource resolution.| +|---|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +#### 1.11.9. Path Matching + +[Web MVC](web.html#mvc-config-path-matching) + +You can customize options related to path matching. For details on the individual options, see the[`PathMatchConfigurer`](https://docs.spring.io/spring-framework/docs/5.3.16/javadoc-api/org/springframework/web/reactive/config/PathMatchConfigurer.html) javadoc. +The following example shows how to use `PathMatchConfigurer`: + +Java + +``` +@Configuration +@EnableWebFlux +public class WebConfig implements WebFluxConfigurer { + + @Override + public void configurePathMatch(PathMatchConfigurer configurer) { + configurer + .setUseCaseSensitiveMatch(true) + .setUseTrailingSlashMatch(false) + .addPathPrefix("/api", + HandlerTypePredicate.forAnnotation(RestController.class)); + } +} +``` + +Kotlin + +``` +@Configuration +@EnableWebFlux +class WebConfig : WebFluxConfigurer { + + @Override + fun configurePathMatch(configurer: PathMatchConfigurer) { + configurer + .setUseCaseSensitiveMatch(true) + .setUseTrailingSlashMatch(false) + .addPathPrefix("/api", + HandlerTypePredicate.forAnnotation(RestController::class.java)) + } +} +``` + +| |Spring WebFlux relies on a parsed representation of the request path called`RequestPath` for access to decoded path segment values, with semicolon content removed<br/>(that is, path or matrix variables). That means, unlike in Spring MVC, you need not indicate<br/>whether to decode the request path nor whether to remove semicolon content for<br/>path matching purposes.<br/><br/>Spring WebFlux also does not support suffix pattern matching, unlike in Spring MVC, where we<br/>are also [recommend](web.html#mvc-ann-requestmapping-suffix-pattern-match) moving away from<br/>reliance on it.| +|---|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +#### 1.11.10. WebSocketService + +The WebFlux Java config declares of a `WebSocketHandlerAdapter` bean which provides +support for the invocation of WebSocket handlers. That means all that remains to do in +order to handle a WebSocket handshake request is to map a `WebSocketHandler` to a URL +via `SimpleUrlHandlerMapping`. + +In some cases it may be necessary to create the `WebSocketHandlerAdapter` bean with a +provided `WebSocketService` service which allows configuring WebSocket server properties. +For example: + +Java + +``` +@Configuration +@EnableWebFlux +public class WebConfig implements WebFluxConfigurer { + + @Override + public WebSocketService getWebSocketService() { + TomcatRequestUpgradeStrategy strategy = new TomcatRequestUpgradeStrategy(); + strategy.setMaxSessionIdleTimeout(0L); + return new HandshakeWebSocketService(strategy); + } +} +``` + +Kotlin + +``` +@Configuration +@EnableWebFlux +class WebConfig : WebFluxConfigurer { + + @Override + fun webSocketService(): WebSocketService { + val strategy = TomcatRequestUpgradeStrategy().apply { + setMaxSessionIdleTimeout(0L) + } + return HandshakeWebSocketService(strategy) + } +} +``` + +#### 1.11.11. Advanced Configuration Mode + +[Web MVC](web.html#mvc-config-advanced-java) + +`@EnableWebFlux` imports `DelegatingWebFluxConfiguration` that: + +* Provides default Spring configuration for WebFlux applications + +* detects and delegates to `WebFluxConfigurer` implementations to customize that configuration. + +For advanced mode, you can remove `@EnableWebFlux` and extend directly from`DelegatingWebFluxConfiguration` instead of implementing `WebFluxConfigurer`, +as the following example shows: + +Java + +``` +@Configuration +public class WebConfig extends DelegatingWebFluxConfiguration { + + // ... +} +``` + +Kotlin + +``` +@Configuration +class WebConfig : DelegatingWebFluxConfiguration { + + // ... +} +``` + +You can keep existing methods in `WebConfig`, but you can now also override bean declarations +from the base class and still have any number of other `WebMvcConfigurer` implementations on +the classpath. + +### 1.12. HTTP/2 + +[Web MVC](web.html#mvc-http2) + +HTTP/2 is supported with Reactor Netty, Tomcat, Jetty, and Undertow. However, there are +considerations related to server configuration. For more details, see the[HTTP/2 wiki page](https://github.com/spring-projects/spring-framework/wiki/HTTP-2-support). + +## 2. WebClient + +Spring WebFlux includes a client to perform HTTP requests with. `WebClient` has a +functional, fluent API based on Reactor, see [Reactive Libraries](#webflux-reactive-libraries), +which enables declarative composition of asynchronous logic without the need to deal with +threads or concurrency. It is fully non-blocking, it supports streaming, and relies on +the same [codecs](#webflux-codecs) that are also used to encode and +decode request and response content on the server side. + +`WebClient` needs an HTTP client library to perform requests with. There is built-in +support for the following: + +* [Reactor Netty](https://github.com/reactor/reactor-netty) + +* [Jetty Reactive HttpClient](https://github.com/jetty-project/jetty-reactive-httpclient) + +* [Apache HttpComponents](https://hc.apache.org/index.html) + +* Others can be plugged via `ClientHttpConnector`. + +### 2.1. Configuration + +The simplest way to create a `WebClient` is through one of the static factory methods: + +* `WebClient.create()` + +* `WebClient.create(String baseUrl)` + +You can also use `WebClient.builder()` with further options: + +* `uriBuilderFactory`: Customized `UriBuilderFactory` to use as a base URL. + +* `defaultUriVariables`: default values to use when expanding URI templates. + +* `defaultHeader`: Headers for every request. + +* `defaultCookie`: Cookies for every request. + +* `defaultRequest`: `Consumer` to customize every request. + +* `filter`: Client filter for every request. + +* `exchangeStrategies`: HTTP message reader/writer customizations. + +* `clientConnector`: HTTP client library settings. + +For example: + +Java + +``` +WebClient client = WebClient.builder() + .codecs(configurer -> ... ) + .build(); +``` + +Kotlin + +``` +val webClient = WebClient.builder() + .codecs { configurer -> ... } + .build() +``` + +Once built, a `WebClient` is immutable. However, you can clone it and build a +modified copy as follows: + +Java + +``` +WebClient client1 = WebClient.builder() + .filter(filterA).filter(filterB).build(); + +WebClient client2 = client1.mutate() + .filter(filterC).filter(filterD).build(); + +// client1 has filterA, filterB + +// client2 has filterA, filterB, filterC, filterD +``` + +Kotlin + +``` +val client1 = WebClient.builder() + .filter(filterA).filter(filterB).build() + +val client2 = client1.mutate() + .filter(filterC).filter(filterD).build() + +// client1 has filterA, filterB + +// client2 has filterA, filterB, filterC, filterD +``` + +#### 2.1.1. MaxInMemorySize + +Codecs have [limits](#webflux-codecs-limits) for buffering data in +memory to avoid application memory issues. By default those are set to 256KB. +If that’s not enough you’ll get the following error: + +``` +org.springframework.core.io.buffer.DataBufferLimitException: Exceeded limit on max bytes to buffer +``` + +To change the limit for default codecs, use the following: + +Java + +``` +WebClient webClient = WebClient.builder() + .codecs(configurer -> configurer.defaultCodecs().maxInMemorySize(2 * 1024 * 1024)) + .build(); +``` + +Kotlin + +``` +val webClient = WebClient.builder() + .codecs { configurer -> configurer.defaultCodecs().maxInMemorySize(2 * 1024 * 1024) } + .build() +``` + +#### 2.1.2. Reactor Netty + +To customize Reactor Netty settings, provide a pre-configured `HttpClient`: + +Java + +``` +HttpClient httpClient = HttpClient.create().secure(sslSpec -> ...); + +WebClient webClient = WebClient.builder() + .clientConnector(new ReactorClientHttpConnector(httpClient)) + .build(); +``` + +Kotlin + +``` +val httpClient = HttpClient.create().secure { ... } + +val webClient = WebClient.builder() + .clientConnector(ReactorClientHttpConnector(httpClient)) + .build() +``` + +##### Resources + +By default, `HttpClient` participates in the global Reactor Netty resources held in`reactor.netty.http.HttpResources`, including event loop threads and a connection pool. +This is the recommended mode, since fixed, shared resources are preferred for event loop +concurrency. In this mode global resources remain active until the process exits. + +If the server is timed with the process, there is typically no need for an explicit +shutdown. However, if the server can start or stop in-process (for example, a Spring MVC +application deployed as a WAR), you can declare a Spring-managed bean of type`ReactorResourceFactory` with `globalResources=true` (the default) to ensure that the Reactor +Netty global resources are shut down when the Spring `ApplicationContext` is closed, +as the following example shows: + +Java + +``` +@Bean +public ReactorResourceFactory reactorResourceFactory() { + return new ReactorResourceFactory(); +} +``` + +Kotlin + +``` +@Bean +fun reactorResourceFactory() = ReactorResourceFactory() +``` + +You can also choose not to participate in the global Reactor Netty resources. However, +in this mode, the burden is on you to ensure that all Reactor Netty client and server +instances use shared resources, as the following example shows: + +Java + +``` +@Bean +public ReactorResourceFactory resourceFactory() { + ReactorResourceFactory factory = new ReactorResourceFactory(); + factory.setUseGlobalResources(false); (1) + return factory; +} + +@Bean +public WebClient webClient() { + + Function<HttpClient, HttpClient> mapper = client -> { + // Further customizations... + }; + + ClientHttpConnector connector = + new ReactorClientHttpConnector(resourceFactory(), mapper); (2) + + return WebClient.builder().clientConnector(connector).build(); (3) +} +``` + +|**1**| Create resources independent of global ones. | +|-----|-----------------------------------------------------------------------| +|**2**|Use the `ReactorClientHttpConnector` constructor with resource factory.| +|**3**| Plug the connector into the `WebClient.Builder`. | + +Kotlin + +``` +@Bean +fun resourceFactory() = ReactorResourceFactory().apply { + isUseGlobalResources = false (1) +} + +@Bean +fun webClient(): WebClient { + + val mapper: (HttpClient) -> HttpClient = { + // Further customizations... + } + + val connector = ReactorClientHttpConnector(resourceFactory(), mapper) (2) + + return WebClient.builder().clientConnector(connector).build() (3) +} +``` + +|**1**| Create resources independent of global ones. | +|-----|-----------------------------------------------------------------------| +|**2**|Use the `ReactorClientHttpConnector` constructor with resource factory.| +|**3**| Plug the connector into the `WebClient.Builder`. | + +##### Timeouts + +To configure a connection timeout: + +Java + +``` +import io.netty.channel.ChannelOption; + +HttpClient httpClient = HttpClient.create() + .option(ChannelOption.CONNECT_TIMEOUT_MILLIS, 10000); + +WebClient webClient = WebClient.builder() + .clientConnector(new ReactorClientHttpConnector(httpClient)) + .build(); +``` + +Kotlin + +``` +import io.netty.channel.ChannelOption + +val httpClient = HttpClient.create() + .option(ChannelOption.CONNECT_TIMEOUT_MILLIS, 10000); + +val webClient = WebClient.builder() + .clientConnector(new ReactorClientHttpConnector(httpClient)) + .build(); +``` + +To configure a read or write timeout: + +Java + +``` +import io.netty.handler.timeout.ReadTimeoutHandler; +import io.netty.handler.timeout.WriteTimeoutHandler; + +HttpClient httpClient = HttpClient.create() + .doOnConnected(conn -> conn + .addHandlerLast(new ReadTimeoutHandler(10)) + .addHandlerLast(new WriteTimeoutHandler(10))); + +// Create WebClient... +``` + +Kotlin + +``` +import io.netty.handler.timeout.ReadTimeoutHandler +import io.netty.handler.timeout.WriteTimeoutHandler + +val httpClient = HttpClient.create() + .doOnConnected { conn -> conn + .addHandlerLast(new ReadTimeoutHandler(10)) + .addHandlerLast(new WriteTimeoutHandler(10)) + } + +// Create WebClient... +``` + +To configure a response timeout for all requests: + +Java + +``` +HttpClient httpClient = HttpClient.create() + .responseTimeout(Duration.ofSeconds(2)); + +// Create WebClient... +``` + +Kotlin + +``` +val httpClient = HttpClient.create() + .responseTimeout(Duration.ofSeconds(2)); + +// Create WebClient... +``` + +To configure a response timeout for a specific request: + +Java + +``` +WebClient.create().get() + .uri("https://example.org/path") + .httpRequest(httpRequest -> { + HttpClientRequest reactorRequest = httpRequest.getNativeRequest(); + reactorRequest.responseTimeout(Duration.ofSeconds(2)); + }) + .retrieve() + .bodyToMono(String.class); +``` + +Kotlin + +``` +WebClient.create().get() + .uri("https://example.org/path") + .httpRequest { httpRequest: ClientHttpRequest -> + val reactorRequest = httpRequest.getNativeRequest<HttpClientRequest>() + reactorRequest.responseTimeout(Duration.ofSeconds(2)) + } + .retrieve() + .bodyToMono(String::class.java) +``` + +#### 2.1.3. Jetty + +The following example shows how to customize Jetty `HttpClient` settings: + +Java + +``` +HttpClient httpClient = new HttpClient(); +httpClient.setCookieStore(...); + +WebClient webClient = WebClient.builder() + .clientConnector(new JettyClientHttpConnector(httpClient)) + .build(); +``` + +Kotlin + +``` +val httpClient = HttpClient() +httpClient.cookieStore = ... + +val webClient = WebClient.builder() + .clientConnector(new JettyClientHttpConnector(httpClient)) + .build(); +``` + +By default, `HttpClient` creates its own resources (`Executor`, `ByteBufferPool`, `Scheduler`), +which remain active until the process exits or `stop()` is called. + +You can share resources between multiple instances of the Jetty client (and server) and +ensure that the resources are shut down when the Spring `ApplicationContext` is closed by +declaring a Spring-managed bean of type `JettyResourceFactory`, as the following example +shows: + +Java + +``` +@Bean +public JettyResourceFactory resourceFactory() { + return new JettyResourceFactory(); +} + +@Bean +public WebClient webClient() { + + HttpClient httpClient = new HttpClient(); + // Further customizations... + + ClientHttpConnector connector = + new JettyClientHttpConnector(httpClient, resourceFactory()); (1) + + return WebClient.builder().clientConnector(connector).build(); (2) +} +``` + +|**1**|Use the `JettyClientHttpConnector` constructor with resource factory.| +|-----|---------------------------------------------------------------------| +|**2**| Plug the connector into the `WebClient.Builder`. | + +Kotlin + +``` +@Bean +fun resourceFactory() = JettyResourceFactory() + +@Bean +fun webClient(): WebClient { + + val httpClient = HttpClient() + // Further customizations... + + val connector = JettyClientHttpConnector(httpClient, resourceFactory()) (1) + + return WebClient.builder().clientConnector(connector).build() (2) +} +``` + +|**1**|Use the `JettyClientHttpConnector` constructor with resource factory.| +|-----|---------------------------------------------------------------------| +|**2**| Plug the connector into the `WebClient.Builder`. | + +#### 2.1.4. HttpComponents + +The following example shows how to customize Apache HttpComponents `HttpClient` settings: + +Java + +``` +HttpAsyncClientBuilder clientBuilder = HttpAsyncClients.custom(); +clientBuilder.setDefaultRequestConfig(...); +CloseableHttpAsyncClient client = clientBuilder.build(); +ClientHttpConnector connector = new HttpComponentsClientHttpConnector(client); + +WebClient webClient = WebClient.builder().clientConnector(connector).build(); +``` + +Kotlin + +``` +val client = HttpAsyncClients.custom().apply { + setDefaultRequestConfig(...) +}.build() +val connector = HttpComponentsClientHttpConnector(client) +val webClient = WebClient.builder().clientConnector(connector).build() +``` + +### 2.2. `retrieve()` + +The `retrieve()` method can be used to declare how to extract the response. For example: + +Java + +``` +WebClient client = WebClient.create("https://example.org"); + +Mono<ResponseEntity<Person>> result = client.get() + .uri("/persons/{id}", id).accept(MediaType.APPLICATION_JSON) + .retrieve() + .toEntity(Person.class); +``` + +Kotlin + +``` +val client = WebClient.create("https://example.org") + +val result = client.get() + .uri("/persons/{id}", id).accept(MediaType.APPLICATION_JSON) + .retrieve() + .toEntity<Person>().awaitSingle() +``` + +Or to get only the body: + +Java + +``` +WebClient client = WebClient.create("https://example.org"); + +Mono<Person> result = client.get() + .uri("/persons/{id}", id).accept(MediaType.APPLICATION_JSON) + .retrieve() + .bodyToMono(Person.class); +``` + +Kotlin + +``` +val client = WebClient.create("https://example.org") + +val result = client.get() + .uri("/persons/{id}", id).accept(MediaType.APPLICATION_JSON) + .retrieve() + .awaitBody<Person>() +``` + +To get a stream of decoded objects: + +Java + +``` +Flux<Quote> result = client.get() + .uri("/quotes").accept(MediaType.TEXT_EVENT_STREAM) + .retrieve() + .bodyToFlux(Quote.class); +``` + +Kotlin + +``` +val result = client.get() + .uri("/quotes").accept(MediaType.TEXT_EVENT_STREAM) + .retrieve() + .bodyToFlow<Quote>() +``` + +By default, 4xx or 5xx responses result in an `WebClientResponseException`, including +sub-classes for specific HTTP status codes. To customize the handling of error +responses, use `onStatus` handlers as follows: + +Java + +``` +Mono<Person> result = client.get() + .uri("/persons/{id}", id).accept(MediaType.APPLICATION_JSON) + .retrieve() + .onStatus(HttpStatus::is4xxClientError, response -> ...) + .onStatus(HttpStatus::is5xxServerError, response -> ...) + .bodyToMono(Person.class); +``` + +Kotlin + +``` +val result = client.get() + .uri("/persons/{id}", id).accept(MediaType.APPLICATION_JSON) + .retrieve() + .onStatus(HttpStatus::is4xxClientError) { ... } + .onStatus(HttpStatus::is5xxServerError) { ... } + .awaitBody<Person>() +``` + +### 2.3. Exchange + +The `exchangeToMono()` and `exchangeToFlux()` methods (or `awaitExchange { }` and `exchangeToFlow { }` in Kotlin) +are useful for more advanced cases that require more control, such as to decode the response differently +depending on the response status: + +Java + +``` +Mono<Person> entityMono = client.get() + .uri("/persons/1") + .accept(MediaType.APPLICATION_JSON) + .exchangeToMono(response -> { + if (response.statusCode().equals(HttpStatus.OK)) { + return response.bodyToMono(Person.class); + } + else { + // Turn to error + return response.createException().flatMap(Mono::error); + } + }); +``` + +Kotlin + +``` +val entity = client.get() + .uri("/persons/1") + .accept(MediaType.APPLICATION_JSON) + .awaitExchange { + if (response.statusCode() == HttpStatus.OK) { + return response.awaitBody<Person>() + } + else { + throw response.createExceptionAndAwait() + } + } +``` + +When using the above, after the returned `Mono` or `Flux` completes, the response body +is checked and if not consumed it is released to prevent memory and connection leaks. +Therefore the response cannot be decoded further downstream. It is up to the provided +function to declare how to decode the response if needed. + +### 2.4. Request Body + +The request body can be encoded from any asynchronous type handled by `ReactiveAdapterRegistry`, +like `Mono` or Kotlin Coroutines `Deferred` as the following example shows: + +Java + +``` +Mono<Person> personMono = ... ; + +Mono<Void> result = client.post() + .uri("/persons/{id}", id) + .contentType(MediaType.APPLICATION_JSON) + .body(personMono, Person.class) + .retrieve() + .bodyToMono(Void.class); +``` + +Kotlin + +``` +val personDeferred: Deferred<Person> = ... + +client.post() + .uri("/persons/{id}", id) + .contentType(MediaType.APPLICATION_JSON) + .body<Person>(personDeferred) + .retrieve() + .awaitBody<Unit>() +``` + +You can also have a stream of objects be encoded, as the following example shows: + +Java + +``` +Flux<Person> personFlux = ... ; + +Mono<Void> result = client.post() + .uri("/persons/{id}", id) + .contentType(MediaType.APPLICATION_STREAM_JSON) + .body(personFlux, Person.class) + .retrieve() + .bodyToMono(Void.class); +``` + +Kotlin + +``` +val people: Flow<Person> = ... + +client.post() + .uri("/persons/{id}", id) + .contentType(MediaType.APPLICATION_JSON) + .body(people) + .retrieve() + .awaitBody<Unit>() +``` + +Alternatively, if you have the actual value, you can use the `bodyValue` shortcut method, +as the following example shows: + +Java + +``` +Person person = ... ; + +Mono<Void> result = client.post() + .uri("/persons/{id}", id) + .contentType(MediaType.APPLICATION_JSON) + .bodyValue(person) + .retrieve() + .bodyToMono(Void.class); +``` + +Kotlin + +``` +val person: Person = ... + +client.post() + .uri("/persons/{id}", id) + .contentType(MediaType.APPLICATION_JSON) + .bodyValue(person) + .retrieve() + .awaitBody<Unit>() +``` + +#### 2.4.1. Form Data + +To send form data, you can provide a `MultiValueMap<String, String>` as the body. Note that the +content is automatically set to `application/x-www-form-urlencoded` by the`FormHttpMessageWriter`. The following example shows how to use `MultiValueMap<String, String>`: + +Java + +``` +MultiValueMap<String, String> formData = ... ; + +Mono<Void> result = client.post() + .uri("/path", id) + .bodyValue(formData) + .retrieve() + .bodyToMono(Void.class); +``` + +Kotlin + +``` +val formData: MultiValueMap<String, String> = ... + +client.post() + .uri("/path", id) + .bodyValue(formData) + .retrieve() + .awaitBody<Unit>() +``` + +You can also supply form data in-line by using `BodyInserters`, as the following example shows: + +Java + +``` +import static org.springframework.web.reactive.function.BodyInserters.*; + +Mono<Void> result = client.post() + .uri("/path", id) + .body(fromFormData("k1", "v1").with("k2", "v2")) + .retrieve() + .bodyToMono(Void.class); +``` + +Kotlin + +``` +import org.springframework.web.reactive.function.BodyInserters.* + +client.post() + .uri("/path", id) + .body(fromFormData("k1", "v1").with("k2", "v2")) + .retrieve() + .awaitBody<Unit>() +``` + +#### 2.4.2. Multipart Data + +To send multipart data, you need to provide a `MultiValueMap<String, ?>` whose values are +either `Object` instances that represent part content or `HttpEntity` instances that represent the content and +headers for a part. `MultipartBodyBuilder` provides a convenient API to prepare a +multipart request. The following example shows how to create a `MultiValueMap<String, ?>`: + +Java + +``` +MultipartBodyBuilder builder = new MultipartBodyBuilder(); +builder.part("fieldPart", "fieldValue"); +builder.part("filePart1", new FileSystemResource("...logo.png")); +builder.part("jsonPart", new Person("Jason")); +builder.part("myPart", part); // Part from a server request + +MultiValueMap<String, HttpEntity<?>> parts = builder.build(); +``` + +Kotlin + +``` +val builder = MultipartBodyBuilder().apply { + part("fieldPart", "fieldValue") + part("filePart1", new FileSystemResource("...logo.png")) + part("jsonPart", new Person("Jason")) + part("myPart", part) // Part from a server request +} + +val parts = builder.build() +``` + +In most cases, you do not have to specify the `Content-Type` for each part. The content +type is determined automatically based on the `HttpMessageWriter` chosen to serialize it +or, in the case of a `Resource`, based on the file extension. If necessary, you can +explicitly provide the `MediaType` to use for each part through one of the overloaded +builder `part` methods. + +Once a `MultiValueMap` is prepared, the easiest way to pass it to the `WebClient` is +through the `body` method, as the following example shows: + +Java + +``` +MultipartBodyBuilder builder = ...; + +Mono<Void> result = client.post() + .uri("/path", id) + .body(builder.build()) + .retrieve() + .bodyToMono(Void.class); +``` + +Kotlin + +``` +val builder: MultipartBodyBuilder = ... + +client.post() + .uri("/path", id) + .body(builder.build()) + .retrieve() + .awaitBody<Unit>() +``` + +If the `MultiValueMap` contains at least one non-`String` value, which could also +represent regular form data (that is, `application/x-www-form-urlencoded`), you need not +set the `Content-Type` to `multipart/form-data`. This is always the case when using`MultipartBodyBuilder`, which ensures an `HttpEntity` wrapper. + +As an alternative to `MultipartBodyBuilder`, you can also provide multipart content, +inline-style, through the built-in `BodyInserters`, as the following example shows: + +Java + +``` +import static org.springframework.web.reactive.function.BodyInserters.*; + +Mono<Void> result = client.post() + .uri("/path", id) + .body(fromMultipartData("fieldPart", "value").with("filePart", resource)) + .retrieve() + .bodyToMono(Void.class); +``` + +Kotlin + +``` +import org.springframework.web.reactive.function.BodyInserters.* + +client.post() + .uri("/path", id) + .body(fromMultipartData("fieldPart", "value").with("filePart", resource)) + .retrieve() + .awaitBody<Unit>() +``` + +### 2.5. Filters + +You can register a client filter (`ExchangeFilterFunction`) through the `WebClient.Builder`in order to intercept and modify requests, as the following example shows: + +Java + +``` +WebClient client = WebClient.builder() + .filter((request, next) -> { + + ClientRequest filtered = ClientRequest.from(request) + .header("foo", "bar") + .build(); + + return next.exchange(filtered); + }) + .build(); +``` + +Kotlin + +``` +val client = WebClient.builder() + .filter { request, next -> + + val filtered = ClientRequest.from(request) + .header("foo", "bar") + .build() + + next.exchange(filtered) + } + .build() +``` + +This can be used for cross-cutting concerns, such as authentication. The following example uses +a filter for basic authentication through a static factory method: + +Java + +``` +import static org.springframework.web.reactive.function.client.ExchangeFilterFunctions.basicAuthentication; + +WebClient client = WebClient.builder() + .filter(basicAuthentication("user", "password")) + .build(); +``` + +Kotlin + +``` +import org.springframework.web.reactive.function.client.ExchangeFilterFunctions.basicAuthentication + +val client = WebClient.builder() + .filter(basicAuthentication("user", "password")) + .build() +``` + +Filters can be added or removed by mutating an existing `WebClient` instance, resulting +in a new `WebClient` instance that does not affect the original one. For example: + +Java + +``` +import static org.springframework.web.reactive.function.client.ExchangeFilterFunctions.basicAuthentication; + +WebClient client = webClient.mutate() + .filters(filterList -> { + filterList.add(0, basicAuthentication("user", "password")); + }) + .build(); +``` + +Kotlin + +``` +val client = webClient.mutate() + .filters { it.add(0, basicAuthentication("user", "password")) } + .build() +``` + +`WebClient` is a thin facade around the chain of filters followed by an`ExchangeFunction`. It provides a workflow to make requests, to encode to and from higher +level objects, and it helps to ensure that response content is always consumed. +When filters handle the response in some way, extra care must be taken to always consume +its content or to otherwise propagate it downstream to the `WebClient` which will ensure +the same. Below is a filter that handles the `UNAUTHORIZED` status code but ensures that +any response content, whether expected or not, is released: + +Java + +``` +public ExchangeFilterFunction renewTokenFilter() { + return (request, next) -> next.exchange(request).flatMap(response -> { + if (response.statusCode().value() == HttpStatus.UNAUTHORIZED.value()) { + return response.releaseBody() + .then(renewToken()) + .flatMap(token -> { + ClientRequest newRequest = ClientRequest.from(request).build(); + return next.exchange(newRequest); + }); + } else { + return Mono.just(response); + } + }); +} +``` + +Kotlin + +``` +fun renewTokenFilter(): ExchangeFilterFunction? { + return ExchangeFilterFunction { request: ClientRequest?, next: ExchangeFunction -> + next.exchange(request!!).flatMap { response: ClientResponse -> + if (response.statusCode().value() == HttpStatus.UNAUTHORIZED.value()) { + [email protected] response.releaseBody() + .then(renewToken()) + .flatMap { token: String? -> + val newRequest = ClientRequest.from(request).build() + next.exchange(newRequest) + } + } else { + [email protected] Mono.just(response) + } + } + } +} +``` + +### 2.6. Attributes + +You can add attributes to a request. This is convenient if you want to pass information +through the filter chain and influence the behavior of filters for a given request. +For example: + +Java + +``` +WebClient client = WebClient.builder() + .filter((request, next) -> { + Optional<Object> usr = request.attribute("myAttribute"); + // ... + }) + .build(); + +client.get().uri("https://example.org/") + .attribute("myAttribute", "...") + .retrieve() + .bodyToMono(Void.class); + + } +``` + +Kotlin + +``` +val client = WebClient.builder() + .filter { request, _ -> + val usr = request.attributes()["myAttribute"]; + // ... + } + .build() + + client.get().uri("https://example.org/") + .attribute("myAttribute", "...") + .retrieve() + .awaitBody<Unit>() +``` + +Note that you can configure a `defaultRequest` callback globally at the`WebClient.Builder` level which lets you insert attributes into all requests, +which could be used for example in a Spring MVC application to populate +request attributes based on `ThreadLocal` data. + +### 2.7. Context + +[Attributes](#webflux-client-attributes) provide a convenient way to pass information to the filter +chain but they only influence the current request. If you want to pass information that +propagates to additional requests that are nested, e.g. via `flatMap`, or executed after, +e.g. via `concatMap`, then you’ll need to use the Reactor `Context`. + +The Reactor `Context` needs to be populated at the end of a reactive chain in order to +apply to all operations. For example: + +Java + +``` +WebClient client = WebClient.builder() + .filter((request, next) -> + Mono.deferContextual(contextView -> { + String value = contextView.get("foo"); + // ... + })) + .build(); + +client.get().uri("https://example.org/") + .retrieve() + .bodyToMono(String.class) + .flatMap(body -> { + // perform nested request (context propagates automatically)... + }) + .contextWrite(context -> context.put("foo", ...)); +``` + +### 2.8. Synchronous Use + +`WebClient` can be used in synchronous style by blocking at the end for the result: + +Java + +``` +Person person = client.get().uri("/person/{id}", i).retrieve() + .bodyToMono(Person.class) + .block(); + +List<Person> persons = client.get().uri("/persons").retrieve() + .bodyToFlux(Person.class) + .collectList() + .block(); +``` + +Kotlin + +``` +val person = runBlocking { + client.get().uri("/person/{id}", i).retrieve() + .awaitBody<Person>() +} + +val persons = runBlocking { + client.get().uri("/persons").retrieve() + .bodyToFlow<Person>() + .toList() +} +``` + +However if multiple calls need to be made, it’s more efficient to avoid blocking on each +response individually, and instead wait for the combined result: + +Java + +``` +Mono<Person> personMono = client.get().uri("/person/{id}", personId) + .retrieve().bodyToMono(Person.class); + +Mono<List<Hobby>> hobbiesMono = client.get().uri("/person/{id}/hobbies", personId) + .retrieve().bodyToFlux(Hobby.class).collectList(); + +Map<String, Object> data = Mono.zip(personMono, hobbiesMono, (person, hobbies) -> { + Map<String, String> map = new LinkedHashMap<>(); + map.put("person", person); + map.put("hobbies", hobbies); + return map; + }) + .block(); +``` + +Kotlin + +``` +val data = runBlocking { + val personDeferred = async { + client.get().uri("/person/{id}", personId) + .retrieve().awaitBody<Person>() + } + + val hobbiesDeferred = async { + client.get().uri("/person/{id}/hobbies", personId) + .retrieve().bodyToFlow<Hobby>().toList() + } + + mapOf("person" to personDeferred.await(), "hobbies" to hobbiesDeferred.await()) + } +``` + +The above is merely one example. There are lots of other patterns and operators for putting +together a reactive pipeline that makes many remote calls, potentially some nested, +inter-dependent, without ever blocking until the end. + +| |With `Flux` or `Mono`, you should never have to block in a Spring MVC or Spring WebFlux controller.<br/>Simply return the resulting reactive type from the controller method. The same principle apply to<br/>Kotlin Coroutines and Spring WebFlux, just use suspending function or return `Flow` in your<br/>controller method .| +|---|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +### 2.9. Testing + +To test code that uses the `WebClient`, you can use a mock web server, such as the[OkHttp MockWebServer](https://github.com/square/okhttp#mockwebserver). To see an example +of its use, check out[`WebClientIntegrationTests`](https://github.com/spring-projects/spring-framework/tree/main/spring-webflux/src/test/java/org/springframework/web/reactive/function/client/WebClientIntegrationTests.java)in the Spring Framework test suite or the[`static-server`](https://github.com/square/okhttp/tree/master/samples/static-server)sample in the OkHttp repository. + +## 3. WebSockets + +[Same as in the Servlet stack](web.html#websocket) + +This part of the reference documentation covers support for reactive-stack WebSocket +messaging. + +### 3.1. Introduction to WebSocket + +The WebSocket protocol, [RFC 6455](https://tools.ietf.org/html/rfc6455), provides a standardized +way to establish a full-duplex, two-way communication channel between client and server +over a single TCP connection. It is a different TCP protocol from HTTP but is designed to +work over HTTP, using ports 80 and 443 and allowing re-use of existing firewall rules. + +A WebSocket interaction begins with an HTTP request that uses the HTTP `Upgrade` header +to upgrade or, in this case, to switch to the WebSocket protocol. The following example +shows such an interaction: + +``` +GET /spring-websocket-portfolio/portfolio HTTP/1.1 +Host: localhost:8080 +Upgrade: websocket (1) +Connection: Upgrade (2) +Sec-WebSocket-Key: Uc9l9TMkWGbHFD2qnFHltg== +Sec-WebSocket-Protocol: v10.stomp, v11.stomp +Sec-WebSocket-Version: 13 +Origin: http://localhost:8080 +``` + +|**1**| The `Upgrade` header. | +|-----|-------------------------------| +|**2**|Using the `Upgrade` connection.| + +Instead of the usual 200 status code, a server with WebSocket support returns output +similar to the following: + +``` +HTTP/1.1 101 Switching Protocols (1) +Upgrade: websocket +Connection: Upgrade +Sec-WebSocket-Accept: 1qVdfYHU9hPOl4JYYNXF623Gzn0= +Sec-WebSocket-Protocol: v10.stomp +``` + +|**1**|Protocol switch| +|-----|---------------| + +After a successful handshake, the TCP socket underlying the HTTP upgrade request remains +open for both the client and the server to continue to send and receive messages. + +A complete introduction of how WebSockets work is beyond the scope of this document. +See RFC 6455, the WebSocket chapter of HTML5, or any of the many introductions and +tutorials on the Web. + +Note that, if a WebSocket server is running behind a web server (e.g. nginx), you +likely need to configure it to pass WebSocket upgrade requests on to the WebSocket +server. Likewise, if the application runs in a cloud environment, check the +instructions of the cloud provider related to WebSocket support. + +#### 3.1.1. HTTP Versus WebSocket + +Even though WebSocket is designed to be HTTP-compatible and starts with an HTTP request, +it is important to understand that the two protocols lead to very different +architectures and application programming models. + +In HTTP and REST, an application is modeled as many URLs. To interact with the application, +clients access those URLs, request-response style. Servers route requests to the +appropriate handler based on the HTTP URL, method, and headers. + +By contrast, in WebSockets, there is usually only one URL for the initial connect. +Subsequently, all application messages flow on that same TCP connection. This points to +an entirely different asynchronous, event-driven, messaging architecture. + +WebSocket is also a low-level transport protocol, which, unlike HTTP, does not prescribe +any semantics to the content of messages. That means that there is no way to route or process +a message unless the client and the server agree on message semantics. + +WebSocket clients and servers can negotiate the use of a higher-level, messaging protocol +(for example, STOMP), through the `Sec-WebSocket-Protocol` header on the HTTP handshake request. +In the absence of that, they need to come up with their own conventions. + +#### 3.1.2. When to Use WebSockets + +WebSockets can make a web page be dynamic and interactive. However, in many cases, +a combination of Ajax and HTTP streaming or long polling can provide a simple and +effective solution. + +For example, news, mail, and social feeds need to update dynamically, but it may be +perfectly okay to do so every few minutes. Collaboration, games, and financial apps, on +the other hand, need to be much closer to real-time. + +Latency alone is not a deciding factor. If the volume of messages is relatively low (for example, +monitoring network failures) HTTP streaming or polling can provide an effective solution. +It is the combination of low latency, high frequency, and high volume that make the best +case for the use of WebSocket. + +Keep in mind also that over the Internet, restrictive proxies that are outside of your control +may preclude WebSocket interactions, either because they are not configured to pass on the`Upgrade` header or because they close long-lived connections that appear idle. This +means that the use of WebSocket for internal applications within the firewall is a more +straightforward decision than it is for public facing applications. + +### 3.2. WebSocket API + +[Same as in the Servlet stack](web.html#websocket-server) + +The Spring Framework provides a WebSocket API that you can use to write client- and +server-side applications that handle WebSocket messages. + +#### 3.2.1. Server + +[Same as in the Servlet stack](web.html#websocket-server-handler) + +To create a WebSocket server, you can first create a `WebSocketHandler`. +The following example shows how to do so: + +Java + +``` +import org.springframework.web.reactive.socket.WebSocketHandler; +import org.springframework.web.reactive.socket.WebSocketSession; + +public class MyWebSocketHandler implements WebSocketHandler { + + @Override + public Mono<Void> handle(WebSocketSession session) { + // ... + } +} +``` + +Kotlin + +``` +import org.springframework.web.reactive.socket.WebSocketHandler +import org.springframework.web.reactive.socket.WebSocketSession + +class MyWebSocketHandler : WebSocketHandler { + + override fun handle(session: WebSocketSession): Mono<Void> { + // ... + } +} +``` + +Then you can map it to a URL: + +Java + +``` +@Configuration +class WebConfig { + + @Bean + public HandlerMapping handlerMapping() { + Map<String, WebSocketHandler> map = new HashMap<>(); + map.put("/path", new MyWebSocketHandler()); + int order = -1; // before annotated controllers + + return new SimpleUrlHandlerMapping(map, order); + } +} +``` + +Kotlin + +``` +@Configuration +class WebConfig { + + @Bean + fun handlerMapping(): HandlerMapping { + val map = mapOf("/path" to MyWebSocketHandler()) + val order = -1 // before annotated controllers + + return SimpleUrlHandlerMapping(map, order) + } +} +``` + +If using the [WebFlux Config](#webflux-config) there is nothing +further to do, or otherwise if not using the WebFlux config you’ll need to declare a`WebSocketHandlerAdapter` as shown below: + +Java + +``` +@Configuration +class WebConfig { + + // ... + + @Bean + public WebSocketHandlerAdapter handlerAdapter() { + return new WebSocketHandlerAdapter(); + } +} +``` + +Kotlin + +``` +@Configuration +class WebConfig { + + // ... + + @Bean + fun handlerAdapter() = WebSocketHandlerAdapter() +} +``` + +#### 3.2.2. `WebSocketHandler` + +The `handle` method of `WebSocketHandler` takes `WebSocketSession` and returns `Mono<Void>`to indicate when application handling of the session is complete. The session is handled +through two streams, one for inbound and one for outbound messages. The following table +describes the two methods that handle the streams: + +| `WebSocketSession` method | Description | +|----------------------------------------------|-------------------------------------------------------------------------------------------------------------------------------------------------------| +| `Flux<WebSocketMessage> receive()` | Provides access to the inbound message stream and completes when the connection is closed. | +|`Mono<Void> send(Publisher<WebSocketMessage>)`|Takes a source for outgoing messages, writes the messages, and returns a `Mono<Void>` that<br/>completes when the source completes and writing is done.| + +A `WebSocketHandler` must compose the inbound and outbound streams into a unified flow and +return a `Mono<Void>` that reflects the completion of that flow. Depending on application +requirements, the unified flow completes when: + +* Either the inbound or the outbound message stream completes. + +* The inbound stream completes (that is, the connection closed), while the outbound stream is infinite. + +* At a chosen point, through the `close` method of `WebSocketSession`. + +When inbound and outbound message streams are composed together, there is no need to +check if the connection is open, since Reactive Streams signals end activity. +The inbound stream receives a completion or error signal, and the outbound stream +receives a cancellation signal. + +The most basic implementation of a handler is one that handles the inbound stream. The +following example shows such an implementation: + +Java + +``` +class ExampleHandler implements WebSocketHandler { + + @Override + public Mono<Void> handle(WebSocketSession session) { + return session.receive() (1) + .doOnNext(message -> { + // ... (2) + }) + .concatMap(message -> { + // ... (3) + }) + .then(); (4) + } +} +``` + +|**1**| Access the stream of inbound messages. | +|-----|--------------------------------------------------------------------| +|**2**| Do something with each message. | +|**3**|Perform nested asynchronous operations that use the message content.| +|**4**| Return a `Mono<Void>` that completes when receiving completes. | + +Kotlin + +``` +class ExampleHandler : WebSocketHandler { + + override fun handle(session: WebSocketSession): Mono<Void> { + return session.receive() (1) + .doOnNext { + // ... (2) + } + .concatMap { + // ... (3) + } + .then() (4) + } +} +``` + +|**1**| Access the stream of inbound messages. | +|-----|--------------------------------------------------------------------| +|**2**| Do something with each message. | +|**3**|Perform nested asynchronous operations that use the message content.| +|**4**| Return a `Mono<Void>` that completes when receiving completes. | + +| |For nested, asynchronous operations, you may need to call `message.retain()` on underlying<br/>servers that use pooled data buffers (for example, Netty). Otherwise, the data buffer may be<br/>released before you have had a chance to read the data. For more background, see[Data Buffers and Codecs](core.html#databuffers).| +|---|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +The following implementation combines the inbound and outbound streams: + +Java + +``` +class ExampleHandler implements WebSocketHandler { + + @Override + public Mono<Void> handle(WebSocketSession session) { + + Flux<WebSocketMessage> output = session.receive() (1) + .doOnNext(message -> { + // ... + }) + .concatMap(message -> { + // ... + }) + .map(value -> session.textMessage("Echo " + value)); (2) + + return session.send(output); (3) + } +} +``` + +|**1**| Handle the inbound message stream. | +|-----|--------------------------------------------------------------------------| +|**2**| Create the outbound message, producing a combined flow. | +|**3**|Return a `Mono<Void>` that does not complete while we continue to receive.| + +Kotlin + +``` +class ExampleHandler : WebSocketHandler { + + override fun handle(session: WebSocketSession): Mono<Void> { + + val output = session.receive() (1) + .doOnNext { + // ... + } + .concatMap { + // ... + } + .map { session.textMessage("Echo $it") } (2) + + return session.send(output) (3) + } +} +``` + +|**1**| Handle the inbound message stream. | +|-----|--------------------------------------------------------------------------| +|**2**| Create the outbound message, producing a combined flow. | +|**3**|Return a `Mono<Void>` that does not complete while we continue to receive.| + +Inbound and outbound streams can be independent and be joined only for completion, +as the following example shows: + +Java + +``` +class ExampleHandler implements WebSocketHandler { + + @Override + public Mono<Void> handle(WebSocketSession session) { + + Mono<Void> input = session.receive() (1) + .doOnNext(message -> { + // ... + }) + .concatMap(message -> { + // ... + }) + .then(); + + Flux<String> source = ... ; + Mono<Void> output = session.send(source.map(session::textMessage)); (2) + + return Mono.zip(input, output).then(); (3) + } +} +``` + +|**1**| Handle inbound message stream. | +|-----|----------------------------------------------------------------------------------| +|**2**| Send outgoing messages. | +|**3**|Join the streams and return a `Mono<Void>` that completes when either stream ends.| + +Kotlin + +``` +class ExampleHandler : WebSocketHandler { + + override fun handle(session: WebSocketSession): Mono<Void> { + + val input = session.receive() (1) + .doOnNext { + // ... + } + .concatMap { + // ... + } + .then() + + val source: Flux<String> = ... + val output = session.send(source.map(session::textMessage)) (2) + + return Mono.zip(input, output).then() (3) + } +} +``` + +|**1**| Handle inbound message stream. | +|-----|----------------------------------------------------------------------------------| +|**2**| Send outgoing messages. | +|**3**|Join the streams and return a `Mono<Void>` that completes when either stream ends.| + +#### 3.2.3. `DataBuffer` + +`DataBuffer` is the representation for a byte buffer in WebFlux. The Spring Core part of +the reference has more on that in the section on[Data Buffers and Codecs](core.html#databuffers). The key point to understand is that on some +servers like Netty, byte buffers are pooled and reference counted, and must be released +when consumed to avoid memory leaks. + +When running on Netty, applications must use `DataBufferUtils.retain(dataBuffer)` if they +wish to hold on input data buffers in order to ensure they are not released, and +subsequently use `DataBufferUtils.release(dataBuffer)` when the buffers are consumed. + +#### 3.2.4. Handshake + +[Same as in the Servlet stack](web.html#websocket-server-handshake) + +`WebSocketHandlerAdapter` delegates to a `WebSocketService`. By default, that is an instance +of `HandshakeWebSocketService`, which performs basic checks on the WebSocket request and +then uses `RequestUpgradeStrategy` for the server in use. Currently, there is built-in +support for Reactor Netty, Tomcat, Jetty, and Undertow. + +`HandshakeWebSocketService` exposes a `sessionAttributePredicate` property that allows +setting a `Predicate<String>` to extract attributes from the `WebSession` and insert them +into the attributes of the `WebSocketSession`. + +#### 3.2.5. Server Configation + +[Same as in the Servlet stack](web.html#websocket-server-runtime-configuration) + +The `RequestUpgradeStrategy` for each server exposes configuration specific to the +underlying WebSocket server engine. When using the WebFlux Java config you can customize +such properties as shown in the corresponding section of the[WebFlux Config](#webflux-config-websocket-service), or otherwise if +not using the WebFlux config, use the below: + +Java + +``` +@Configuration +class WebConfig { + + @Bean + public WebSocketHandlerAdapter handlerAdapter() { + return new WebSocketHandlerAdapter(webSocketService()); + } + + @Bean + public WebSocketService webSocketService() { + TomcatRequestUpgradeStrategy strategy = new TomcatRequestUpgradeStrategy(); + strategy.setMaxSessionIdleTimeout(0L); + return new HandshakeWebSocketService(strategy); + } +} +``` + +Kotlin + +``` +@Configuration +class WebConfig { + + @Bean + fun handlerAdapter() = + WebSocketHandlerAdapter(webSocketService()) + + @Bean + fun webSocketService(): WebSocketService { + val strategy = TomcatRequestUpgradeStrategy().apply { + setMaxSessionIdleTimeout(0L) + } + return HandshakeWebSocketService(strategy) + } +} +``` + +Check the upgrade strategy for your server to see what options are available. Currently, +only Tomcat and Jetty expose such options. + +#### 3.2.6. CORS + +[Same as in the Servlet stack](web.html#websocket-server-allowed-origins) + +The easiest way to configure CORS and restrict access to a WebSocket endpoint is to +have your `WebSocketHandler` implement `CorsConfigurationSource` and return a`CorsConfiguration` with allowed origins, headers, and other details. If you cannot do +that, you can also set the `corsConfigurations` property on the `SimpleUrlHandler` to +specify CORS settings by URL pattern. If both are specified, they are combined by using the`combine` method on `CorsConfiguration`. + +#### 3.2.7. Client + +Spring WebFlux provides a `WebSocketClient` abstraction with implementations for +Reactor Netty, Tomcat, Jetty, Undertow, and standard Java (that is, JSR-356). + +| |The Tomcat client is effectively an extension of the standard Java one with some extra<br/>functionality in the `WebSocketSession` handling to take advantage of the Tomcat-specific<br/>API to suspend receiving messages for back pressure.| +|---|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +To start a WebSocket session, you can create an instance of the client and use its `execute`methods: + +Java + +``` +WebSocketClient client = new ReactorNettyWebSocketClient(); + +URI url = new URI("ws://localhost:8080/path"); +client.execute(url, session -> + session.receive() + .doOnNext(System.out::println) + .then()); +``` + +Kotlin + +``` +val client = ReactorNettyWebSocketClient() + + val url = URI("ws://localhost:8080/path") + client.execute(url) { session -> + session.receive() + .doOnNext(::println) + .then() + } +``` + +Some clients, such as Jetty, implement `Lifecycle` and need to be stopped and started +before you can use them. All clients have constructor options related to configuration +of the underlying WebSocket client. + +## 4. Testing + +[Same in Spring MVC](web.html#testing) + +The `spring-test` module provides mock implementations of `ServerHttpRequest`,`ServerHttpResponse`, and `ServerWebExchange`. +See [Spring Web Reactive](testing.html#mock-objects-web-reactive) for a +discussion of mock objects. + +[`WebTestClient`](testing.html#webtestclient) builds on these mock request and +response objects to provide support for testing WebFlux applications without an HTTP +server. You can use the `WebTestClient` for end-to-end integration tests, too. + +## 5. RSocket + +This section describes Spring Framework’s support for the RSocket protocol. + +### 5.1. Overview + +RSocket is an application protocol for multiplexed, duplex communication over TCP, +WebSocket, and other byte stream transports, using one of the following interaction +models: + +* `Request-Response` — send one message and receive one back. + +* `Request-Stream` — send one message and receive a stream of messages back. + +* `Channel` — send streams of messages in both directions. + +* `Fire-and-Forget` — send a one-way message. + +Once the initial connection is made, the "client" vs "server" distinction is lost as +both sides become symmetrical and each side can initiate one of the above interactions. +This is why in the protocol calls the participating sides "requester" and "responder" +while the above interactions are called "request streams" or simply "requests". + +These are the key features and benefits of the RSocket protocol: + +* [Reactive Streams](https://www.reactive-streams.org/) semantics across network boundary — for streaming requests such as `Request-Stream` and `Channel`, back pressure signals + travel between requester and responder, allowing a requester to slow down a responder at + the source, hence reducing reliance on network layer congestion control, and the need + for buffering at the network level or at any level. + +* Request throttling — this feature is named "Leasing" after the `LEASE` frame that + can be sent from each end to limit the total number of requests allowed by other end + for a given time. Leases are renewed periodically. + +* Session resumption — this is designed for loss of connectivity and requires some state + to be maintained. The state management is transparent for applications, and works well + in combination with back pressure which can stop a producer when possible and reduce + the amount of state required. + +* Fragmentation and re-assembly of large messages. + +* Keepalive (heartbeats). + +RSocket has [implementations](https://github.com/rsocket) in multiple languages. The[Java library](https://github.com/rsocket/rsocket-java) is built on [Project Reactor](https://projectreactor.io/), +and [Reactor Netty](https://github.com/reactor/reactor-netty) for the transport. That means +signals from Reactive Streams Publishers in your application propagate transparently +through RSocket across the network. + +#### 5.1.1. The Protocol + +One of the benefits of RSocket is that it has well defined behavior on the wire and an +easy to read [specification](https://rsocket.io/docs/Protocol) along with some protocol[extensions](https://github.com/rsocket/rsocket/tree/master/Extensions). Therefore it is +a good idea to read the spec, independent of language implementations and higher level +framework APIs. This section provides a succinct overview to establish some context. + +**Connecting** + +Initially a client connects to a server via some low level streaming transport such +as TCP or WebSocket and sends a `SETUP` frame to the server to set parameters for the +connection. + +The server may reject the `SETUP` frame, but generally after it is sent (for the client) +and received (for the server), both sides can begin to make requests, unless `SETUP`indicates use of leasing semantics to limit the number of requests, in which case +both sides must wait for a `LEASE` frame from the other end to permit making requests. + +**Making Requests** + +Once a connection is established, both sides may initiate a request through one of the +frames `REQUEST_RESPONSE`, `REQUEST_STREAM`, `REQUEST_CHANNEL`, or `REQUEST_FNF`. Each of +those frames carries one message from the requester to the responder. + +The responder may then return `PAYLOAD` frames with response messages, and in the case +of `REQUEST_CHANNEL` the requester may also send `PAYLOAD` frames with more request +messages. + +When a request involves a stream of messages such as `Request-Stream` and `Channel`, +the responder must respect demand signals from the requester. Demand is expressed as a +number of messages. Initial demand is specified in `REQUEST_STREAM` and`REQUEST_CHANNEL` frames. Subsequent demand is signaled via `REQUEST_N` frames. + +Each side may also send metadata notifications, via the `METADATA_PUSH` frame, that do not +pertain to any individual request but rather to the connection as a whole. + +**Message Format** + +RSocket messages contain data and metadata. Metadata can be used to send a route, a +security token, etc. Data and metadata can be formatted differently. Mime types for each +are declared in the `SETUP` frame and apply to all requests on a given connection. + +While all messages can have metadata, typically metadata such as a route are per-request +and therefore only included in the first message on a request, i.e. with one of the frames`REQUEST_RESPONSE`, `REQUEST_STREAM`, `REQUEST_CHANNEL`, or `REQUEST_FNF`. + +Protocol extensions define common metadata formats for use in applications: + +* [Composite Metadata](https://github.com/rsocket/rsocket/blob/master/Extensions/CompositeMetadata.md)-- multiple, + independently formatted metadata entries. + +* [Routing](https://github.com/rsocket/rsocket/blob/master/Extensions/Routing.md) — the route for a request. + +#### 5.1.2. Java Implementation + +The [Java implementation](https://github.com/rsocket/rsocket-java) for RSocket is built on[Project Reactor](https://projectreactor.io/). The transports for TCP and WebSocket are +built on [Reactor Netty](https://github.com/reactor/reactor-netty). As a Reactive Streams +library, Reactor simplifies the job of implementing the protocol. For applications it is +a natural fit to use `Flux` and `Mono` with declarative operators and transparent back +pressure support. + +The API in RSocket Java is intentionally minimal and basic. It focuses on protocol +features and leaves the application programming model (e.g. RPC codegen vs other) as a +higher level, independent concern. + +The main contract[io.rsocket.RSocket](https://github.com/rsocket/rsocket-java/blob/master/rsocket-core/src/main/java/io/rsocket/RSocket.java)models the four request interaction types with `Mono` representing a promise for a +single message, `Flux` a stream of messages, and `io.rsocket.Payload` the actual +message with access to data and metadata as byte buffers. The `RSocket` contract is used +symmetrically. For requesting, the application is given an `RSocket` to perform +requests with. For responding, the application implements `RSocket` to handle requests. + +This is not meant to be a thorough introduction. For the most part, Spring applications +will not have to use its API directly. However it may be important to see or experiment +with RSocket independent of Spring. The RSocket Java repository contains a number of[sample apps](https://github.com/rsocket/rsocket-java/tree/master/rsocket-examples) that +demonstrate its API and protocol features. + +#### 5.1.3. Spring Support + +The `spring-messaging` module contains the following: + +* [RSocketRequester](#rsocket-requester) — fluent API to make requests through an `io.rsocket.RSocket`with data and metadata encoding/decoding. + +* [Annotated Responders](#rsocket-annot-responders) — `@MessageMapping` annotated handler methods for + responding. + +The `spring-web` module contains `Encoder` and `Decoder` implementations such as Jackson +CBOR/JSON, and Protobuf that RSocket applications will likely need. It also contains the`PathPatternParser` that can be plugged in for efficient route matching. + +Spring Boot 2.2 supports standing up an RSocket server over TCP or WebSocket, including +the option to expose RSocket over WebSocket in a WebFlux server. There is also client +support and auto-configuration for an `RSocketRequester.Builder` and `RSocketStrategies`. +See the[RSocket section](https://docs.spring.io/spring-boot/docs/current/reference/htmlsingle/#boot-features-rsocket)in the Spring Boot reference for more details. + +Spring Security 5.2 provides RSocket support. + +Spring Integration 5.2 provides inbound and outbound gateways to interact with RSocket +clients and servers. See the Spring Integration Reference Manual for more details. + +Spring Cloud Gateway supports RSocket connections. + +### 5.2. RSocketRequester + +`RSocketRequester` provides a fluent API to perform RSocket requests, accepting and +returning objects for data and metadata instead of low level data buffers. It can be used +symmetrically, to make requests from clients and to make requests from servers. + +#### 5.2.1. Client Requester + +To obtain an `RSocketRequester` on the client side is to connect to a server which involves +sending an RSocket `SETUP` frame with connection settings. `RSocketRequester` provides a +builder that helps to prepare an `io.rsocket.core.RSocketConnector` including connection +settings for the `SETUP` frame. + +This is the most basic way to connect with default settings: + +Java + +``` +RSocketRequester requester = RSocketRequester.builder().tcp("localhost", 7000); + +URI url = URI.create("https://example.org:8080/rsocket"); +RSocketRequester requester = RSocketRequester.builder().webSocket(url); +``` + +Kotlin + +``` +val requester = RSocketRequester.builder().tcp("localhost", 7000) + +URI url = URI.create("https://example.org:8080/rsocket"); +val requester = RSocketRequester.builder().webSocket(url) +``` + +The above does not connect immediately. When requests are made, a shared connection is +established transparently and used. + +##### Connection Setup + +`RSocketRequester.Builder` provides the following to customize the initial `SETUP` frame: + +* `dataMimeType(MimeType)` — set the mime type for data on the connection. + +* `metadataMimeType(MimeType)` — set the mime type for metadata on the connection. + +* `setupData(Object)` — data to include in the `SETUP`. + +* `setupRoute(String, Object…​)` — route in the metadata to include in the `SETUP`. + +* `setupMetadata(Object, MimeType)` — other metadata to include in the `SETUP`. + +For data, the default mime type is derived from the first configured `Decoder`. For +metadata, the default mime type is[composite metadata](https://github.com/rsocket/rsocket/blob/master/Extensions/CompositeMetadata.md) which allows multiple +metadata value and mime type pairs per request. Typically both don’t need to be changed. + +Data and metadata in the `SETUP` frame is optional. On the server side,[@ConnectMapping](#rsocket-annot-connectmapping) methods can be used to handle the start of a +connection and the content of the `SETUP` frame. Metadata may be used for connection +level security. + +##### Strategies + +`RSocketRequester.Builder` accepts `RSocketStrategies` to configure the requester. +You’ll need to use this to provide encoders and decoders for (de)-serialization of data and +metadata values. By default only the basic codecs from `spring-core` for `String`,`byte[]`, and `ByteBuffer` are registered. Adding `spring-web` provides access to more that +can be registered as follows: + +Java + +``` +RSocketStrategies strategies = RSocketStrategies.builder() + .encoders(encoders -> encoders.add(new Jackson2CborEncoder())) + .decoders(decoders -> decoders.add(new Jackson2CborDecoder())) + .build(); + +RSocketRequester requester = RSocketRequester.builder() + .rsocketStrategies(strategies) + .tcp("localhost", 7000); +``` + +Kotlin + +``` +val strategies = RSocketStrategies.builder() + .encoders { it.add(Jackson2CborEncoder()) } + .decoders { it.add(Jackson2CborDecoder()) } + .build() + +val requester = RSocketRequester.builder() + .rsocketStrategies(strategies) + .tcp("localhost", 7000) +``` + +`RSocketStrategies` is designed for re-use. In some scenarios, e.g. client and server in +the same application, it may be preferable to declare it in Spring configuration. + +##### Client Responders + +`RSocketRequester.Builder` can be used to configure responders to requests from the +server. + +You can use annotated handlers for client-side responding based on the same +infrastructure that’s used on a server, but registered programmatically as follows: + +Java + +``` +RSocketStrategies strategies = RSocketStrategies.builder() + .routeMatcher(new PathPatternRouteMatcher()) (1) + .build(); + +SocketAcceptor responder = + RSocketMessageHandler.responder(strategies, new ClientHandler()); (2) + +RSocketRequester requester = RSocketRequester.builder() + .rsocketConnector(connector -> connector.acceptor(responder)) (3) + .tcp("localhost", 7000); +``` + +|**1**|Use `PathPatternRouteMatcher`, if `spring-web` is present, for efficient<br/>route matching.| +|-----|--------------------------------------------------------------------------------------------| +|**2**| Create a responder from a class with `@MessageMaping` and/or `@ConnectMapping` methods. | +|**3**| Register the responder. | + +Kotlin + +``` +val strategies = RSocketStrategies.builder() + .routeMatcher(PathPatternRouteMatcher()) (1) + .build() + +val responder = + RSocketMessageHandler.responder(strategies, new ClientHandler()); (2) + +val requester = RSocketRequester.builder() + .rsocketConnector { it.acceptor(responder) } (3) + .tcp("localhost", 7000) +``` + +|**1**|Use `PathPatternRouteMatcher`, if `spring-web` is present, for efficient<br/>route matching.| +|-----|--------------------------------------------------------------------------------------------| +|**2**| Create a responder from a class with `@MessageMaping` and/or `@ConnectMapping` methods. | +|**3**| Register the responder. | + +Note the above is only a shortcut designed for programmatic registration of client +responders. For alternative scenarios, where client responders are in Spring configuration, +you can still declare `RSocketMessageHandler` as a Spring bean and then apply as follows: + +Java + +``` +ApplicationContext context = ... ; +RSocketMessageHandler handler = context.getBean(RSocketMessageHandler.class); + +RSocketRequester requester = RSocketRequester.builder() + .rsocketConnector(connector -> connector.acceptor(handler.responder())) + .tcp("localhost", 7000); +``` + +Kotlin + +``` +import org.springframework.beans.factory.getBean + +val context: ApplicationContext = ... +val handler = context.getBean<RSocketMessageHandler>() + +val requester = RSocketRequester.builder() + .rsocketConnector { it.acceptor(handler.responder()) } + .tcp("localhost", 7000) +``` + +For the above you may also need to use `setHandlerPredicate` in `RSocketMessageHandler` to +switch to a different strategy for detecting client responders, e.g. based on a custom +annotation such as `@RSocketClientResponder` vs the default `@Controller`. This +is necessary in scenarios with client and server, or multiple clients in the same +application. + +See also [Annotated Responders](#rsocket-annot-responders), for more on the programming model. + +##### Advanced + +`RSocketRequesterBuilder` provides a callback to expose the underlying`io.rsocket.core.RSocketConnector` for further configuration options for keepalive +intervals, session resumption, interceptors, and more. You can configure options +at that level as follows: + +Java + +``` +RSocketRequester requester = RSocketRequester.builder() + .rsocketConnector(connector -> { + // ... + }) + .tcp("localhost", 7000); +``` + +Kotlin + +``` +val requester = RSocketRequester.builder() + .rsocketConnector { + //... + } + .tcp("localhost", 7000) +``` + +#### 5.2.2. Server Requester + +To make requests from a server to connected clients is a matter of obtaining the +requester for the connected client from the server. + +In [Annotated Responders](#rsocket-annot-responders), `@ConnectMapping` and `@MessageMapping` methods support an`RSocketRequester` argument. Use it to access the requester for the connection. Keep in +mind that `@ConnectMapping` methods are essentially handlers of the `SETUP` frame which +must be handled before requests can begin. Therefore, requests at the very start must be +decoupled from handling. For example: + +Java + +``` +@ConnectMapping +Mono<Void> handle(RSocketRequester requester) { + requester.route("status").data("5") + .retrieveFlux(StatusReport.class) + .subscribe(bar -> { (1) + // ... + }); + return ... (2) +} +``` + +|**1**|Start the request asynchronously, independent from handling.| +|-----|------------------------------------------------------------| +|**2**| Perform handling and return completion `Mono<Void>`. | + +Kotlin + +``` +@ConnectMapping +suspend fun handle(requester: RSocketRequester) { + GlobalScope.launch { + requester.route("status").data("5").retrieveFlow<StatusReport>().collect { (1) + // ... + } + } + /// ... (2) +} +``` + +|**1**|Start the request asynchronously, independent from handling.| +|-----|------------------------------------------------------------| +|**2**| Perform handling in the suspending function. | + +#### 5.2.3. Requests + +Once you have a [client](#rsocket-requester-client) or[server](#rsocket-requester-server) requester, you can make requests as follows: + +Java + +``` +ViewBox viewBox = ... ; + +Flux<AirportLocation> locations = requester.route("locate.radars.within") (1) + .data(viewBox) (2) + .retrieveFlux(AirportLocation.class); (3) +``` + +|**1**|Specify a route to include in the metadata of the request message.| +|-----|------------------------------------------------------------------| +|**2**| Provide data for the request message. | +|**3**| Declare the expected response. | + +Kotlin + +``` +val viewBox: ViewBox = ... + +val locations = requester.route("locate.radars.within") (1) + .data(viewBox) (2) + .retrieveFlow<AirportLocation>() (3) +``` + +|**1**|Specify a route to include in the metadata of the request message.| +|-----|------------------------------------------------------------------| +|**2**| Provide data for the request message. | +|**3**| Declare the expected response. | + +The interaction type is determined implicitly from the cardinality of the input and +output. The above example is a `Request-Stream` because one value is sent and a stream +of values is received. For the most part you don’t need to think about this as long as the +choice of input and output matches an RSocket interaction type and the types of input and +output expected by the responder. The only example of an invalid combination is many-to-one. + +The `data(Object)` method also accepts any Reactive Streams `Publisher`, including`Flux` and `Mono`, as well as any other producer of value(s) that is registered in the`ReactiveAdapterRegistry`. For a multi-value `Publisher` such as `Flux` which produces the +same types of values, consider using one of the overloaded `data` methods to avoid having +type checks and `Encoder` lookup on every element: + +``` +data(Object producer, Class<?> elementClass); +data(Object producer, ParameterizedTypeReference<?> elementTypeRef); +``` + +The `data(Object)` step is optional. Skip it for requests that don’t send data: + +Java + +``` +Mono<AirportLocation> location = requester.route("find.radar.EWR")) + .retrieveMono(AirportLocation.class); +``` + +Kotlin + +``` +import org.springframework.messaging.rsocket.retrieveAndAwait + +val location = requester.route("find.radar.EWR") + .retrieveAndAwait<AirportLocation>() +``` + +Extra metadata values can be added if using[composite metadata](https://github.com/rsocket/rsocket/blob/master/Extensions/CompositeMetadata.md) (the default) and if the +values are supported by a registered `Encoder`. For example: + +Java + +``` +String securityToken = ... ; +ViewBox viewBox = ... ; +MimeType mimeType = MimeType.valueOf("message/x.rsocket.authentication.bearer.v0"); + +Flux<AirportLocation> locations = requester.route("locate.radars.within") + .metadata(securityToken, mimeType) + .data(viewBox) + .retrieveFlux(AirportLocation.class); +``` + +Kotlin + +``` +import org.springframework.messaging.rsocket.retrieveFlow + +val requester: RSocketRequester = ... + +val securityToken: String = ... +val viewBox: ViewBox = ... +val mimeType = MimeType.valueOf("message/x.rsocket.authentication.bearer.v0") + +val locations = requester.route("locate.radars.within") + .metadata(securityToken, mimeType) + .data(viewBox) + .retrieveFlow<AirportLocation>() +``` + +For `Fire-and-Forget` use the `send()` method that returns `Mono<Void>`. Note that the `Mono`indicates only that the message was successfully sent, and not that it was handled. + +For `Metadata-Push` use the `sendMetadata()` method with a `Mono<Void>` return value. + +### 5.3. Annotated Responders + +RSocket responders can be implemented as `@MessageMapping` and `@ConnectMapping` methods.`@MessageMapping` methods handle individual requests while `@ConnectMapping` methods handle +connection-level events (setup and metadata push). Annotated responders are supported +symmetrically, for responding from the server side and for responding from the client side. + +#### 5.3.1. Server Responders + +To use annotated responders on the server side, add `RSocketMessageHandler` to your Spring +configuration to detect `@Controller` beans with `@MessageMapping` and `@ConnectMapping`methods: + +Java + +``` +@Configuration +static class ServerConfig { + + @Bean + public RSocketMessageHandler rsocketMessageHandler() { + RSocketMessageHandler handler = new RSocketMessageHandler(); + handler.routeMatcher(new PathPatternRouteMatcher()); + return handler; + } +} +``` + +Kotlin + +``` +@Configuration +class ServerConfig { + + @Bean + fun rsocketMessageHandler() = RSocketMessageHandler().apply { + routeMatcher = PathPatternRouteMatcher() + } +} +``` + +Then start an RSocket server through the Java RSocket API and plug the`RSocketMessageHandler` for the responder as follows: + +Java + +``` +ApplicationContext context = ... ; +RSocketMessageHandler handler = context.getBean(RSocketMessageHandler.class); + +CloseableChannel server = + RSocketServer.create(handler.responder()) + .bind(TcpServerTransport.create("localhost", 7000)) + .block(); +``` + +Kotlin + +``` +import org.springframework.beans.factory.getBean + +val context: ApplicationContext = ... +val handler = context.getBean<RSocketMessageHandler>() + +val server = RSocketServer.create(handler.responder()) + .bind(TcpServerTransport.create("localhost", 7000)) + .awaitSingle() +``` + +`RSocketMessageHandler` supports[composite](https://github.com/rsocket/rsocket/blob/master/Extensions/CompositeMetadata.md) and[routing](https://github.com/rsocket/rsocket/blob/master/Extensions/Routing.md) metadata by default. You can set its[MetadataExtractor](#rsocket-metadata-extractor) if you need to switch to a +different mime type or register additional metadata mime types. + +You’ll need to set the `Encoder` and `Decoder` instances required for metadata and data +formats to support. You’ll likely need the `spring-web` module for codec implementations. + +By default `SimpleRouteMatcher` is used for matching routes via `AntPathMatcher`. +We recommend plugging in the `PathPatternRouteMatcher` from `spring-web` for +efficient route matching. RSocket routes can be hierarchical but are not URL paths. +Both route matchers are configured to use "." as separator by default and there is no URL +decoding as with HTTP URLs. + +`RSocketMessageHandler` can be configured via `RSocketStrategies` which may be useful if +you need to share configuration between a client and a server in the same process: + +Java + +``` +@Configuration +static class ServerConfig { + + @Bean + public RSocketMessageHandler rsocketMessageHandler() { + RSocketMessageHandler handler = new RSocketMessageHandler(); + handler.setRSocketStrategies(rsocketStrategies()); + return handler; + } + + @Bean + public RSocketStrategies rsocketStrategies() { + return RSocketStrategies.builder() + .encoders(encoders -> encoders.add(new Jackson2CborEncoder())) + .decoders(decoders -> decoders.add(new Jackson2CborDecoder())) + .routeMatcher(new PathPatternRouteMatcher()) + .build(); + } +} +``` + +Kotlin + +``` +@Configuration +class ServerConfig { + + @Bean + fun rsocketMessageHandler() = RSocketMessageHandler().apply { + rSocketStrategies = rsocketStrategies() + } + + @Bean + fun rsocketStrategies() = RSocketStrategies.builder() + .encoders { it.add(Jackson2CborEncoder()) } + .decoders { it.add(Jackson2CborDecoder()) } + .routeMatcher(PathPatternRouteMatcher()) + .build() +} +``` + +#### 5.3.2. Client Responders + +Annotated responders on the client side need to be configured in the`RSocketRequester.Builder`. For details, see[Client Responders](#rsocket-requester-client-responder). + +#### 5.3.3. @MessageMapping + +Once [server](#rsocket-annot-responders-server) or[client](#rsocket-annot-responders-client) responder configuration is in place,`@MessageMapping` methods can be used as follows: + +Java + +``` +@Controller +public class RadarsController { + + @MessageMapping("locate.radars.within") + public Flux<AirportLocation> radars(MapRequest request) { + // ... + } +} +``` + +Kotlin + +``` +@Controller +class RadarsController { + + @MessageMapping("locate.radars.within") + fun radars(request: MapRequest): Flow<AirportLocation> { + // ... + } +} +``` + +The above `@MessageMapping` method responds to a Request-Stream interaction having the +route "locate.radars.within". It supports a flexible method signature with the option to +use the following method arguments: + +| Method Argument | Description | +|------------------------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| `@Payload` |The payload of the request. This can be a concrete value of asynchronous types like`Mono` or `Flux`.<br/><br/>**Note:** Use of the annotation is optional. A method argument that is not a simple type<br/>and is not any of the other supported arguments, is assumed to be the expected payload.| +| `RSocketRequester` | Requester for making requests to the remote end. | +| `@DestinationVariable` | Value extracted from the route based on variables in the mapping pattern, e.g.`@MessageMapping("find.radar.{id}")`. | +| `@Header` | Metadata value registered for extraction as described in [MetadataExtractor](#rsocket-metadata-extractor). | +|`@Headers Map<String, Object>`| All metadata values registered for extraction as described in [MetadataExtractor](#rsocket-metadata-extractor). | + +The return value is expected to be one or more Objects to be serialized as response +payloads. That can be asynchronous types like `Mono` or `Flux`, a concrete value, or +either `void` or a no-value asynchronous type such as `Mono<Void>`. + +The RSocket interaction type that an `@MessageMapping` method supports is determined from +the cardinality of the input (i.e. `@Payload` argument) and of the output, where +cardinality means the following: + +|Cardinality| Description | +|-----------|---------------------------------------------------------------------------------------------------------------------------------------------------------------| +| 1 | Either an explicit value, or a single-value asynchronous type such as `Mono<T>`. | +| Many | A multi-value asynchronous type such as `Flux<T>`. | +| 0 |For input this means the method does not have an `@Payload` argument.<br/><br/> For output this is `void` or a no-value asynchronous type such as `Mono<Void>`.| + +The table below shows all input and output cardinality combinations and the corresponding +interaction type(s): + +|Input Cardinality|Output Cardinality| Interaction Types | +|-----------------|------------------|---------------------------------| +| 0, 1 | 0 |Fire-and-Forget, Request-Response| +| 0, 1 | 1 | Request-Response | +| 0, 1 | Many | Request-Stream | +| Many | 0, 1, Many | Request-Channel | + +#### 5.3.4. @ConnectMapping + +`@ConnectMapping` handles the `SETUP` frame at the start of an RSocket connection, and +any subsequent metadata push notifications through the `METADATA_PUSH` frame, i.e.`metadataPush(Payload)` in `io.rsocket.RSocket`. + +`@ConnectMapping` methods support the same arguments as[@MessageMapping](#rsocket-annot-messagemapping) but based on metadata and data from the `SETUP` and`METADATA_PUSH` frames. `@ConnectMapping` can have a pattern to narrow handling to +specific connections that have a route in the metadata, or if no patterns are declared +then all connections match. + +`@ConnectMapping` methods cannot return data and must be declared with `void` or`Mono<Void>` as the return value. If handling returns an error for a new +connection then the connection is rejected. Handling must not be held up to make +requests to the `RSocketRequester` for the connection. See[Server Requester](#rsocket-requester-server) for details. + +### 5.4. MetadataExtractor + +Responders must interpret metadata.[Composite metadata](https://github.com/rsocket/rsocket/blob/master/Extensions/CompositeMetadata.md) allows independently +formatted metadata values (e.g. for routing, security, tracing) each with its own mime +type. Applications need a way to configure metadata mime types to support, and a way +to access extracted values. + +`MetadataExtractor` is a contract to take serialized metadata and return decoded +name-value pairs that can then be accessed like headers by name, for example via `@Header`in annotated handler methods. + +`DefaultMetadataExtractor` can be given `Decoder` instances to decode metadata. Out of +the box it has built-in support for["message/x.rsocket.routing.v0"](https://github.com/rsocket/rsocket/blob/master/Extensions/Routing.md) which it decodes to`String` and saves under the "route" key. For any other mime type you’ll need to provide +a `Decoder` and register the mime type as follows: + +Java + +``` +DefaultMetadataExtractor extractor = new DefaultMetadataExtractor(metadataDecoders); +extractor.metadataToExtract(fooMimeType, Foo.class, "foo"); +``` + +Kotlin + +``` +import org.springframework.messaging.rsocket.metadataToExtract + +val extractor = DefaultMetadataExtractor(metadataDecoders) +extractor.metadataToExtract<Foo>(fooMimeType, "foo") +``` + +Composite metadata works well to combine independent metadata values. However the +requester might not support composite metadata, or may choose not to use it. For this,`DefaultMetadataExtractor` may needs custom logic to map the decoded value to the output +map. Here is an example where JSON is used for metadata: + +Java + +``` +DefaultMetadataExtractor extractor = new DefaultMetadataExtractor(metadataDecoders); +extractor.metadataToExtract( + MimeType.valueOf("application/vnd.myapp.metadata+json"), + new ParameterizedTypeReference<Map<String,String>>() {}, + (jsonMap, outputMap) -> { + outputMap.putAll(jsonMap); + }); +``` + +Kotlin + +``` +import org.springframework.messaging.rsocket.metadataToExtract + +val extractor = DefaultMetadataExtractor(metadataDecoders) +extractor.metadataToExtract<Map<String, String>>(MimeType.valueOf("application/vnd.myapp.metadata+json")) { jsonMap, outputMap -> + outputMap.putAll(jsonMap) +} +``` + +When configuring `MetadataExtractor` through `RSocketStrategies`, you can let`RSocketStrategies.Builder` create the extractor with the configured decoders, and +simply use a callback to customize registrations as follows: + +Java + +``` +RSocketStrategies strategies = RSocketStrategies.builder() + .metadataExtractorRegistry(registry -> { + registry.metadataToExtract(fooMimeType, Foo.class, "foo"); + // ... + }) + .build(); +``` + +Kotlin + +``` +import org.springframework.messaging.rsocket.metadataToExtract + +val strategies = RSocketStrategies.builder() + .metadataExtractorRegistry { registry: MetadataExtractorRegistry -> + registry.metadataToExtract<Foo>(fooMimeType, "foo") + // ... + } + .build() +``` + +## 6. Reactive Libraries + +`spring-webflux` depends on `reactor-core` and uses it internally to compose asynchronous +logic and to provide Reactive Streams support. Generally, WebFlux APIs return `Flux` or`Mono` (since those are used internally) and leniently accept any Reactive Streams`Publisher` implementation as input. The use of `Flux` versus `Mono` is important, because +it helps to express cardinality — for example, whether a single or multiple asynchronous +values are expected, and that can be essential for making decisions (for example, when +encoding or decoding HTTP messages). + +For annotated controllers, WebFlux transparently adapts to the reactive library chosen +by the application. This is done with the help of the[`ReactiveAdapterRegistry`](https://docs.spring.io/spring-framework/docs/5.3.16/javadoc-api/org/springframework/core/ReactiveAdapterRegistry.html)which provides pluggable support for reactive library and other asynchronous types. +The registry has built-in support for RxJava 3, Kotlin coroutines and SmallRye Mutiny, +but you can register other third-party adapters as well. + +| |As of Spring Framework 5.3.11, support for RxJava 1 and 2 is deprecated, following<br/>RxJava’s own EOL advice and the upgrade recommendation towards RxJava 3.| +|---|---------------------------------------------------------------------------------------------------------------------------------------------------------------| + +For functional APIs (such as [Functional Endpoints](#webflux-fn), the `WebClient`, and others), the general +rules for WebFlux APIs apply — `Flux` and `Mono` as return values and a Reactive Streams`Publisher` as input. When a `Publisher`, whether custom or from another reactive library, +is provided, it can be treated only as a stream with unknown semantics (0..N). If, however, +the semantics are known, you can wrap it with `Flux` or `Mono.from(Publisher)` instead +of passing the raw `Publisher`. + +For example, given a `Publisher` that is not a `Mono`, the Jackson JSON message writer +expects multiple values. If the media type implies an infinite stream (for example,`application/json+stream`), values are written and flushed individually. Otherwise, +values are buffered into a list and rendered as a JSON array. diff --git a/docs/en/spring-framework/web-servlet.md b/docs/en/spring-framework/web-servlet.md new file mode 100644 index 0000000000000000000000000000000000000000..af5bb2de949eed2decb691624a37ebdcfb1a9f9d --- /dev/null +++ b/docs/en/spring-framework/web-servlet.md @@ -0,0 +1,11379 @@ +# Web on Servlet Stack + +This part of the documentation covers support for Servlet-stack web applications built on the +Servlet API and deployed to Servlet containers. Individual chapters include [Spring MVC](#mvc),[View Technologies](#mvc-view), [CORS Support](#mvc-cors), and [WebSocket Support](#websocket). +For reactive-stack web applications, see [Web on Reactive Stack](web-reactive.html#spring-web-reactive). + +## 1. Spring Web MVC + +Spring Web MVC is the original web framework built on the Servlet API and has been included +in the Spring Framework from the very beginning. The formal name, “Spring Web MVC,” +comes from the name of its source module +([`spring-webmvc`](https://github.com/spring-projects/spring-framework/tree/main/spring-webmvc)), +but it is more commonly known as “Spring MVC”. + +Parallel to Spring Web MVC, Spring Framework 5.0 introduced a reactive-stack web framework +whose name, “Spring WebFlux,” is also based on its source module +([`spring-webflux`](https://github.com/spring-projects/spring-framework/tree/main/spring-webflux)). +This section covers Spring Web MVC. The [next section](web-reactive.html#spring-web-reactive)covers Spring WebFlux. + +For baseline information and compatibility with Servlet container and Java EE version +ranges, see the Spring Framework[Wiki](https://github.com/spring-projects/spring-framework/wiki/Spring-Framework-Versions). + +### 1.1. DispatcherServlet + +[WebFlux](web-reactive.html#webflux-dispatcher-handler) + +Spring MVC, as many other web frameworks, is designed around the front controller +pattern where a central `Servlet`, the `DispatcherServlet`, provides a shared algorithm +for request processing, while actual work is performed by configurable delegate components. +This model is flexible and supports diverse workflows. + +The `DispatcherServlet`, as any `Servlet`, needs to be declared and mapped according +to the Servlet specification by using Java configuration or in `web.xml`. +In turn, the `DispatcherServlet` uses Spring configuration to discover +the delegate components it needs for request mapping, view resolution, exception +handling, [and more](#mvc-servlet-special-bean-types). + +The following example of the Java configuration registers and initializes +the `DispatcherServlet`, which is auto-detected by the Servlet container +(see [Servlet Config](#mvc-container-config)): + +Java + +``` +public class MyWebApplicationInitializer implements WebApplicationInitializer { + + @Override + public void onStartup(ServletContext servletContext) { + + // Load Spring web application configuration + AnnotationConfigWebApplicationContext context = new AnnotationConfigWebApplicationContext(); + context.register(AppConfig.class); + + // Create and register the DispatcherServlet + DispatcherServlet servlet = new DispatcherServlet(context); + ServletRegistration.Dynamic registration = servletContext.addServlet("app", servlet); + registration.setLoadOnStartup(1); + registration.addMapping("/app/*"); + } +} +``` + +Kotlin + +``` +class MyWebApplicationInitializer : WebApplicationInitializer { + + override fun onStartup(servletContext: ServletContext) { + + // Load Spring web application configuration + val context = AnnotationConfigWebApplicationContext() + context.register(AppConfig::class.java) + + // Create and register the DispatcherServlet + val servlet = DispatcherServlet(context) + val registration = servletContext.addServlet("app", servlet) + registration.setLoadOnStartup(1) + registration.addMapping("/app/*") + } +} +``` + +| |In addition to using the ServletContext API directly, you can also extend`AbstractAnnotationConfigDispatcherServletInitializer` and override specific methods<br/>(see the example under [Context Hierarchy](#mvc-servlet-context-hierarchy)).| +|---|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +| |For programmatic use cases, a `GenericWebApplicationContext` can be used as an<br/>alternative to `AnnotationConfigWebApplicationContext`. See the[`GenericWebApplicationContext`](https://docs.spring.io/spring-framework/docs/5.3.16/javadoc-api/org/springframework/web/context/support/GenericWebApplicationContext.html)javadoc for details.| +|---|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +The following example of `web.xml` configuration registers and initializes the `DispatcherServlet`: + +``` +<web-app> + + <listener> + <listener-class>org.springframework.web.context.ContextLoaderListener</listener-class> + </listener> + + <context-param> + <param-name>contextConfigLocation</param-name> + <param-value>/WEB-INF/app-context.xml</param-value> + </context-param> + + <servlet> + <servlet-name>app</servlet-name> + <servlet-class>org.springframework.web.servlet.DispatcherServlet</servlet-class> + <init-param> + <param-name>contextConfigLocation</param-name> + <param-value></param-value> + </init-param> + <load-on-startup>1</load-on-startup> + </servlet> + + <servlet-mapping> + <servlet-name>app</servlet-name> + <url-pattern>/app/*</url-pattern> + </servlet-mapping> + +</web-app> +``` + +| |Spring Boot follows a different initialization sequence. Rather than hooking into<br/>the lifecycle of the Servlet container, Spring Boot uses Spring configuration to<br/>bootstrap itself and the embedded Servlet container. `Filter` and `Servlet` declarations<br/>are detected in Spring configuration and registered with the Servlet container.<br/>For more details, see the[Spring Boot documentation](https://docs.spring.io/spring-boot/docs/current/reference/htmlsingle/#boot-features-embedded-container).| +|---|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +#### 1.1.1. Context Hierarchy + +`DispatcherServlet` expects a `WebApplicationContext` (an extension of a plain`ApplicationContext`) for its own configuration. `WebApplicationContext` has a link to the`ServletContext` and the `Servlet` with which it is associated. It is also bound to the `ServletContext`such that applications can use static methods on `RequestContextUtils` to look up the`WebApplicationContext` if they need access to it. + +For many applications, having a single `WebApplicationContext` is simple and suffices. +It is also possible to have a context hierarchy where one root `WebApplicationContext`is shared across multiple `DispatcherServlet` (or other `Servlet`) instances, each with +its own child `WebApplicationContext` configuration. +See [Additional Capabilities of the `ApplicationContext`](core.html#context-introduction)for more on the context hierarchy feature. + +The root `WebApplicationContext` typically contains infrastructure beans, such as data repositories and +business services that need to be shared across multiple `Servlet` instances. Those beans +are effectively inherited and can be overridden (that is, re-declared) in the Servlet-specific +child `WebApplicationContext`, which typically contains beans local to the given `Servlet`. +The following image shows this relationship: + +![mvc context hierarchy](images/mvc-context-hierarchy.png) + +The following example configures a `WebApplicationContext` hierarchy: + +Java + +``` +public class MyWebAppInitializer extends AbstractAnnotationConfigDispatcherServletInitializer { + + @Override + protected Class<?>[] getRootConfigClasses() { + return new Class<?>[] { RootConfig.class }; + } + + @Override + protected Class<?>[] getServletConfigClasses() { + return new Class<?>[] { App1Config.class }; + } + + @Override + protected String[] getServletMappings() { + return new String[] { "/app1/*" }; + } +} +``` + +Kotlin + +``` +class MyWebAppInitializer : AbstractAnnotationConfigDispatcherServletInitializer() { + + override fun getRootConfigClasses(): Array<Class<*>> { + return arrayOf(RootConfig::class.java) + } + + override fun getServletConfigClasses(): Array<Class<*>> { + return arrayOf(App1Config::class.java) + } + + override fun getServletMappings(): Array<String> { + return arrayOf("/app1/*") + } +} +``` + +| |If an application context hierarchy is not required, applications can return all<br/>configuration through `getRootConfigClasses()` and `null` from `getServletConfigClasses()`.| +|---|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +The following example shows the `web.xml` equivalent: + +``` +<web-app> + + <listener> + <listener-class>org.springframework.web.context.ContextLoaderListener</listener-class> + </listener> + + <context-param> + <param-name>contextConfigLocation</param-name> + <param-value>/WEB-INF/root-context.xml</param-value> + </context-param> + + <servlet> + <servlet-name>app1</servlet-name> + <servlet-class>org.springframework.web.servlet.DispatcherServlet</servlet-class> + <init-param> + <param-name>contextConfigLocation</param-name> + <param-value>/WEB-INF/app1-context.xml</param-value> + </init-param> + <load-on-startup>1</load-on-startup> + </servlet> + + <servlet-mapping> + <servlet-name>app1</servlet-name> + <url-pattern>/app1/*</url-pattern> + </servlet-mapping> + +</web-app> +``` + +| |If an application context hierarchy is not required, applications may configure a<br/>“root” context only and leave the `contextConfigLocation` Servlet parameter empty.| +|---|------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +#### 1.1.2. Special Bean Types + +[WebFlux](web-reactive.html#webflux-special-bean-types) + +The `DispatcherServlet` delegates to special beans to process requests and render the +appropriate responses. By “special beans” we mean Spring-managed `Object` instances that +implement framework contracts. Those usually come with built-in contracts, but +you can customize their properties and extend or replace them. + +The following table lists the special beans detected by the `DispatcherServlet`: + +| Bean type | Explanation | +|-------------------------------------------------------------------------------|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| `HandlerMapping` |Map a request to a handler along with a list of[interceptors](#mvc-handlermapping-interceptor) for pre- and post-processing.<br/>The mapping is based on some criteria, the details of which vary by `HandlerMapping`implementation.<br/><br/> The two main `HandlerMapping` implementations are `RequestMappingHandlerMapping`(which supports `@RequestMapping` annotated methods) and `SimpleUrlHandlerMapping`(which maintains explicit registrations of URI path patterns to handlers).| +| `HandlerAdapter` | Help the `DispatcherServlet` to invoke a handler mapped to a request, regardless of<br/>how the handler is actually invoked. For example, invoking an annotated controller<br/>requires resolving annotations. The main purpose of a `HandlerAdapter` is<br/>to shield the `DispatcherServlet` from such details. | +| [`HandlerExceptionResolver`](#mvc-exceptionhandlers) | Strategy to resolve exceptions, possibly mapping them to handlers, to HTML error<br/>views, or other targets. See [Exceptions](#mvc-exceptionhandlers). | +| [`ViewResolver`](#mvc-viewresolver) | Resolve logical `String`-based view names returned from a handler to an actual `View`with which to render to the response. See [View Resolution](#mvc-viewresolver) and [View Technologies](#mvc-view). | +|[`LocaleResolver`](#mvc-localeresolver), [LocaleContextResolver](#mvc-timezone)| Resolve the `Locale` a client is using and possibly their time zone, in order to be able<br/>to offer internationalized views. See [Locale](#mvc-localeresolver). | +| [`ThemeResolver`](#mvc-themeresolver) | Resolve themes your web application can use — for example, to offer personalized layouts.<br/>See [Themes](#mvc-themeresolver). | +| [`MultipartResolver`](#mvc-multipart) | Abstraction for parsing a multi-part request (for example, browser form file upload) with<br/>the help of some multipart parsing library. See [Multipart Resolver](#mvc-multipart). | +| [`FlashMapManager`](#mvc-flash-attributes) | Store and retrieve the “input” and the “output” `FlashMap` that can be used to pass<br/>attributes from one request to another, usually across a redirect.<br/>See [Flash Attributes](#mvc-flash-attributes). | + +#### 1.1.3. Web MVC Config + +[WebFlux](web-reactive.html#webflux-framework-config) + +Applications can declare the infrastructure beans listed in [Special Bean Types](#mvc-servlet-special-bean-types)that are required to process requests. The `DispatcherServlet` checks the`WebApplicationContext` for each special bean. If there are no matching bean types, +it falls back on the default types listed in[`DispatcherServlet.properties`](https://github.com/spring-projects/spring-framework/tree/main/spring-webmvc/src/main/resources/org/springframework/web/servlet/DispatcherServlet.properties). + +In most cases, the [MVC Config](#mvc-config) is the best starting point. It declares the required +beans in either Java or XML and provides a higher-level configuration callback API to +customize it. + +| |Spring Boot relies on the MVC Java configuration to configure Spring MVC and<br/>provides many extra convenient options.| +|---|------------------------------------------------------------------------------------------------------------------------| + +#### 1.1.4. Servlet Config + +In a Servlet 3.0+ environment, you have the option of configuring the Servlet container +programmatically as an alternative or in combination with a `web.xml` file. The following +example registers a `DispatcherServlet`: + +Java + +``` +import org.springframework.web.WebApplicationInitializer; + +public class MyWebApplicationInitializer implements WebApplicationInitializer { + + @Override + public void onStartup(ServletContext container) { + XmlWebApplicationContext appContext = new XmlWebApplicationContext(); + appContext.setConfigLocation("/WEB-INF/spring/dispatcher-config.xml"); + + ServletRegistration.Dynamic registration = container.addServlet("dispatcher", new DispatcherServlet(appContext)); + registration.setLoadOnStartup(1); + registration.addMapping("/"); + } +} +``` + +Kotlin + +``` +import org.springframework.web.WebApplicationInitializer + +class MyWebApplicationInitializer : WebApplicationInitializer { + + override fun onStartup(container: ServletContext) { + val appContext = XmlWebApplicationContext() + appContext.setConfigLocation("/WEB-INF/spring/dispatcher-config.xml") + + val registration = container.addServlet("dispatcher", DispatcherServlet(appContext)) + registration.setLoadOnStartup(1) + registration.addMapping("/") + } +} +``` + +`WebApplicationInitializer` is an interface provided by Spring MVC that ensures your +implementation is detected and automatically used to initialize any Servlet 3 container. +An abstract base class implementation of `WebApplicationInitializer` named`AbstractDispatcherServletInitializer` makes it even easier to register the`DispatcherServlet` by overriding methods to specify the servlet mapping and the +location of the `DispatcherServlet` configuration. + +This is recommended for applications that use Java-based Spring configuration, as the +following example shows: + +Java + +``` +public class MyWebAppInitializer extends AbstractAnnotationConfigDispatcherServletInitializer { + + @Override + protected Class<?>[] getRootConfigClasses() { + return null; + } + + @Override + protected Class<?>[] getServletConfigClasses() { + return new Class<?>[] { MyWebConfig.class }; + } + + @Override + protected String[] getServletMappings() { + return new String[] { "/" }; + } +} +``` + +Kotlin + +``` +class MyWebAppInitializer : AbstractAnnotationConfigDispatcherServletInitializer() { + + override fun getRootConfigClasses(): Array<Class<*>>? { + return null + } + + override fun getServletConfigClasses(): Array<Class<*>>? { + return arrayOf(MyWebConfig::class.java) + } + + override fun getServletMappings(): Array<String> { + return arrayOf("/") + } +} +``` + +If you use XML-based Spring configuration, you should extend directly from`AbstractDispatcherServletInitializer`, as the following example shows: + +Java + +``` +public class MyWebAppInitializer extends AbstractDispatcherServletInitializer { + + @Override + protected WebApplicationContext createRootApplicationContext() { + return null; + } + + @Override + protected WebApplicationContext createServletApplicationContext() { + XmlWebApplicationContext cxt = new XmlWebApplicationContext(); + cxt.setConfigLocation("/WEB-INF/spring/dispatcher-config.xml"); + return cxt; + } + + @Override + protected String[] getServletMappings() { + return new String[] { "/" }; + } +} +``` + +Kotlin + +``` +class MyWebAppInitializer : AbstractDispatcherServletInitializer() { + + override fun createRootApplicationContext(): WebApplicationContext? { + return null + } + + override fun createServletApplicationContext(): WebApplicationContext { + return XmlWebApplicationContext().apply { + setConfigLocation("/WEB-INF/spring/dispatcher-config.xml") + } + } + + override fun getServletMappings(): Array<String> { + return arrayOf("/") + } +} +``` + +`AbstractDispatcherServletInitializer` also provides a convenient way to add `Filter`instances and have them be automatically mapped to the `DispatcherServlet`, as the +following example shows: + +Java + +``` +public class MyWebAppInitializer extends AbstractDispatcherServletInitializer { + + // ... + + @Override + protected Filter[] getServletFilters() { + return new Filter[] { + new HiddenHttpMethodFilter(), new CharacterEncodingFilter() }; + } +} +``` + +Kotlin + +``` +class MyWebAppInitializer : AbstractDispatcherServletInitializer() { + + // ... + + override fun getServletFilters(): Array<Filter> { + return arrayOf(HiddenHttpMethodFilter(), CharacterEncodingFilter()) + } +} +``` + +Each filter is added with a default name based on its concrete type and automatically +mapped to the `DispatcherServlet`. + +The `isAsyncSupported` protected method of `AbstractDispatcherServletInitializer`provides a single place to enable async support on the `DispatcherServlet` and all +filters mapped to it. By default, this flag is set to `true`. + +Finally, if you need to further customize the `DispatcherServlet` itself, you can +override the `createDispatcherServlet` method. + +#### 1.1.5. Processing + +[WebFlux](web-reactive.html#webflux-dispatcher-handler-sequence) + +The `DispatcherServlet` processes requests as follows: + +* The `WebApplicationContext` is searched for and bound in the request as an attribute + that the controller and other elements in the process can use. It is bound by default + under the `DispatcherServlet.WEB_APPLICATION_CONTEXT_ATTRIBUTE` key. + +* The locale resolver is bound to the request to let elements in the process + resolve the locale to use when processing the request (rendering the view, preparing + data, and so on). If you do not need locale resolving, you do not need the locale resolver. + +* The theme resolver is bound to the request to let elements such as views determine + which theme to use. If you do not use themes, you can ignore it. + +* If you specify a multipart file resolver, the request is inspected for multiparts. If + multiparts are found, the request is wrapped in a `MultipartHttpServletRequest` for + further processing by other elements in the process. See [Multipart Resolver](#mvc-multipart) for further + information about multipart handling. + +* An appropriate handler is searched for. If a handler is found, the execution chain + associated with the handler (preprocessors, postprocessors, and controllers) is + run to prepare a model for rendering. Alternatively, for annotated + controllers, the response can be rendered (within the `HandlerAdapter`) instead of + returning a view. + +* If a model is returned, the view is rendered. If no model is returned (maybe due to + a preprocessor or postprocessor intercepting the request, perhaps for security + reasons), no view is rendered, because the request could already have been fulfilled. + +The `HandlerExceptionResolver` beans declared in the `WebApplicationContext` are used to +resolve exceptions thrown during request processing. Those exception resolvers allow +customizing the logic to address exceptions. See [Exceptions](#mvc-exceptionhandlers) for more details. + +For HTTP caching support, handlers can use the `checkNotModified` methods of `WebRequest`, +along with further options for annoated controllers as described in[HTTP Caching for Controllers](#mvc-caching-etag-lastmodified). + +You can customize individual `DispatcherServlet` instances by adding Servlet +initialization parameters (`init-param` elements) to the Servlet declaration in the`web.xml` file. The following table lists the supported parameters: + +| Parameter | Explanation | +|--------------------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| `contextClass` | Class that implements `ConfigurableWebApplicationContext`, to be instantiated and<br/>locally configured by this Servlet. By default, `XmlWebApplicationContext` is used. | +| `contextConfigLocation` | String that is passed to the context instance (specified by `contextClass`) to<br/>indicate where contexts can be found. The string consists potentially of multiple<br/>strings (using a comma as a delimiter) to support multiple contexts. In the case of<br/>multiple context locations with beans that are defined twice, the latest location<br/>takes precedence. | +| `namespace` | Namespace of the `WebApplicationContext`. Defaults to `[servlet-name]-servlet`. | +|`throwExceptionIfNoHandlerFound`|Whether to throw a `NoHandlerFoundException` when no handler was found for a request.<br/>The exception can then be caught with a `HandlerExceptionResolver` (for example, by using an`@ExceptionHandler` controller method) and handled as any others.<br/><br/> By default, this is set to `false`, in which case the `DispatcherServlet` sets the<br/>response status to 404 (NOT\_FOUND) without raising an exception.<br/><br/> Note that, if [default servlet handling](#mvc-default-servlet-handler) is<br/>also configured, unresolved requests are always forwarded to the default servlet<br/>and a 404 is never raised.| + +#### 1.1.6. Path Matching + +The Servlet API exposes the full request path as `requestURI` and further sub-divides it +into `contextPath`, `servletPath`, and `pathInfo` whose values vary depending on how a +Servlet is mapped. From these inputs, Spring MVC needs to determine the lookup path to +use for handler mapping, which is the path within the mapping of the `DispatcherServlet`itself, excluding the `contextPath` and any `servletMapping` prefix, if present. + +The `servletPath` and `pathInfo` are decoded and that makes them impossible to compare +directly to the full `requestURI` in order to derive the lookupPath and that makes it +necessary to decode the `requestURI`. However this introduces its own issues because the +path may contain encoded reserved characters such as `"/"` or `";"` that can in turn +alter the structure of the path after they are decoded which can also lead to security +issues. In addition, Servlet containers may normalize the `servletPath` to varying +degrees which makes it further impossible to perform `startsWith` comparisons against +the `requestURI`. + +This is why it is best to avoid reliance on the `servletPath` which comes with the +prefix-based `servletPath` mapping type. If the `DispatcherServlet` is mapped as the +default Servlet with `"/"` or otherwise without a prefix with `"/*"` and the Servlet +container is 4.0+ then Spring MVC is able to detect the Servlet mapping type and avoid +use of the `servletPath` and `pathInfo` altogether. On a 3.1 Servlet container, +assuming the same Servlet mapping types, the equivalent can be achieved by providing +a `UrlPathHelper` with `alwaysUseFullPath=true` via [Path Matching](#mvc-config-path-matching) in +the MVC config. + +Fortunately the default Servlet mapping `"/"` is a good choice. However, there is still +an issue in that the `requestURI` needs to be decoded to make it possible to compare to +controller mappings. This is again undesirable because of the potential to decode +reserved characters that alter the path structure. If such characters are not expected, +then you can reject them (like the Spring Security HTTP firewall), or you can configure`UrlPathHelper` with `urlDecode=false` but controller mappings will need to match to the +encoded path which may not always work well. Furthermore, sometimes the`DispatcherServlet` needs to share the URL space with another Servlet and may need to +be mapped by prefix. + +The above issues can be addressed more comprehensively by switching from `PathMatcher` to +the parsed `PathPattern` available in 5.3 or higher, see[Pattern Comparison](#mvc-ann-requestmapping-pattern-comparison). Unlike `AntPathMatcher` which needs +either the lookup path decoded or the controller mapping encoded, a parsed `PathPattern`matches to a parsed representation of the path called `RequestPath`, one path segment +at a time. This allows decoding and sanitizing path segment values individually without +the risk of altering the structure of the path. Parsed `PathPattern` also supports +the use of `servletPath` prefix mapping as long as the prefix is kept simple and does +not have any characters that need to be encoded. + +#### 1.1.7. Interception + +All `HandlerMapping` implementations support handler interceptors that are useful when +you want to apply specific functionality to certain requests — for example, checking for +a principal. Interceptors must implement `HandlerInterceptor` from the`org.springframework.web.servlet` package with three methods that should provide enough +flexibility to do all kinds of pre-processing and post-processing: + +* `preHandle(..)`: Before the actual handler is run + +* `postHandle(..)`: After the handler is run + +* `afterCompletion(..)`: After the complete request has finished + +The `preHandle(..)` method returns a boolean value. You can use this method to break or +continue the processing of the execution chain. When this method returns `true`, the +handler execution chain continues. When it returns false, the `DispatcherServlet`assumes the interceptor itself has taken care of requests (and, for example, rendered an +appropriate view) and does not continue executing the other interceptors and the actual +handler in the execution chain. + +See [Interceptors](#mvc-config-interceptors) in the section on MVC configuration for examples of how to +configure interceptors. You can also register them directly by using setters on individual`HandlerMapping` implementations. + +Note that `postHandle` is less useful with `@ResponseBody` and `ResponseEntity` methods for +which the response is written and committed within the `HandlerAdapter` and before`postHandle`. That means it is too late to make any changes to the response, such as adding +an extra header. For such scenarios, you can implement `ResponseBodyAdvice` and either +declare it as an [Controller Advice](#mvc-ann-controller-advice) bean or configure it directly on`RequestMappingHandlerAdapter`. + +#### 1.1.8. Exceptions + +[WebFlux](web-reactive.html#webflux-dispatcher-exceptions) + +If an exception occurs during request mapping or is thrown from a request handler (such as +a `@Controller`), the `DispatcherServlet` delegates to a chain of `HandlerExceptionResolver`beans to resolve the exception and provide alternative handling, which is typically an +error response. + +The following table lists the available `HandlerExceptionResolver` implementations: + +| `HandlerExceptionResolver` | Description | +|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| `SimpleMappingExceptionResolver` | A mapping between exception class names and error view names. Useful for rendering<br/>error pages in a browser application. | +|[`DefaultHandlerExceptionResolver`](https://docs.spring.io/spring-framework/docs/5.3.16/javadoc-api/org/springframework/web/servlet/mvc/support/DefaultHandlerExceptionResolver.html)|Resolves exceptions raised by Spring MVC and maps them to HTTP status codes.<br/>See also alternative `ResponseEntityExceptionHandler` and [REST API exceptions](#mvc-ann-rest-exceptions).| +| `ResponseStatusExceptionResolver` | Resolves exceptions with the `@ResponseStatus` annotation and maps them to HTTP status<br/>codes based on the value in the annotation. | +| `ExceptionHandlerExceptionResolver` | Resolves exceptions by invoking an `@ExceptionHandler` method in a `@Controller` or a`@ControllerAdvice` class. See [@ExceptionHandler methods](#mvc-ann-exceptionhandler). | + +##### Chain of Resolvers + +You can form an exception resolver chain by declaring multiple `HandlerExceptionResolver`beans in your Spring configuration and setting their `order` properties as needed. +The higher the order property, the later the exception resolver is positioned. + +The contract of `HandlerExceptionResolver` specifies that it can return: + +* a `ModelAndView` that points to an error view. + +* An empty `ModelAndView` if the exception was handled within the resolver. + +* `null` if the exception remains unresolved, for subsequent resolvers to try, and, if the + exception remains at the end, it is allowed to bubble up to the Servlet container. + +The [MVC Config](#mvc-config) automatically declares built-in resolvers for default Spring MVC +exceptions, for `@ResponseStatus` annotated exceptions, and for support of`@ExceptionHandler` methods. You can customize that list or replace it. + +##### Container Error Page + +If an exception remains unresolved by any `HandlerExceptionResolver` and is, therefore, +left to propagate or if the response status is set to an error status (that is, 4xx, 5xx), +Servlet containers can render a default error page in HTML. To customize the default +error page of the container, you can declare an error page mapping in `web.xml`. +The following example shows how to do so: + +``` +<error-page> + <location>/error</location> +</error-page> +``` + +Given the preceding example, when an exception bubbles up or the response has an error status, the +Servlet container makes an ERROR dispatch within the container to the configured URL +(for example, `/error`). This is then processed by the `DispatcherServlet`, possibly mapping it +to a `@Controller`, which could be implemented to return an error view name with a model +or to render a JSON response, as the following example shows: + +Java + +``` +@RestController +public class ErrorController { + + @RequestMapping(path = "/error") + public Map<String, Object> handle(HttpServletRequest request) { + Map<String, Object> map = new HashMap<String, Object>(); + map.put("status", request.getAttribute("javax.servlet.error.status_code")); + map.put("reason", request.getAttribute("javax.servlet.error.message")); + return map; + } +} +``` + +Kotlin + +``` +@RestController +class ErrorController { + + @RequestMapping(path = ["/error"]) + fun handle(request: HttpServletRequest): Map<String, Any> { + val map = HashMap<String, Any>() + map["status"] = request.getAttribute("javax.servlet.error.status_code") + map["reason"] = request.getAttribute("javax.servlet.error.message") + return map + } +} +``` + +| |The Servlet API does not provide a way to create error page mappings in Java. You can,<br/>however, use both a `WebApplicationInitializer` and a minimal `web.xml`.| +|---|-------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +#### 1.1.9. View Resolution + +[WebFlux](web-reactive.html#webflux-viewresolution) + +Spring MVC defines the `ViewResolver` and `View` interfaces that let you render +models in a browser without tying you to a specific view technology. `ViewResolver`provides a mapping between view names and actual views. `View` addresses the preparation +of data before handing over to a specific view technology. + +The following table provides more details on the `ViewResolver` hierarchy: + +| ViewResolver | Description | +|--------------------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| `AbstractCachingViewResolver` | Subclasses of `AbstractCachingViewResolver` cache view instances that they resolve.<br/>Caching improves performance of certain view technologies. You can turn off the<br/>cache by setting the `cache` property to `false`. Furthermore, if you must refresh<br/>a certain view at runtime (for example, when a FreeMarker template is modified),<br/>you can use the `removeFromCache(String viewName, Locale loc)` method. | +| `UrlBasedViewResolver` | Simple implementation of the `ViewResolver` interface that effects the direct<br/>resolution of logical view names to URLs without an explicit mapping definition.<br/>This is appropriate if your logical names match the names of your view resources<br/>in a straightforward manner, without the need for arbitrary mappings. | +| `InternalResourceViewResolver` |Convenient subclass of `UrlBasedViewResolver` that supports `InternalResourceView` (in<br/>effect, Servlets and JSPs) and subclasses such as `JstlView` and `TilesView`. You can<br/>specify the view class for all views generated by this resolver by using `setViewClass(..)`.<br/>See the [`UrlBasedViewResolver`](https://docs.spring.io/spring-framework/docs/5.3.16/javadoc-api/org/springframework/web/reactive/result/view/UrlBasedViewResolver.html)javadoc for details.| +| `FreeMarkerViewResolver` | Convenient subclass of `UrlBasedViewResolver` that supports `FreeMarkerView` and<br/>custom subclasses of them. | +|`ContentNegotiatingViewResolver`| Implementation of the `ViewResolver` interface that resolves a view based on the<br/>request file name or `Accept` header. See [Content Negotiation](#mvc-multiple-representations). | +| `BeanNameViewResolver` | Implementation of the `ViewResolver` interface that interprets a view name as a<br/>bean name in the current application context. This is a very flexible variant which<br/>allows for mixing and matching different view types based on distinct view names.<br/>Each such `View` can be defined as a bean e.g. in XML or in configuration classes. | + +##### Handling + +[WebFlux](web-reactive.html#webflux-viewresolution-handling) + +You can chain view resolvers by declaring more than one resolver bean and, if necessary, by +setting the `order` property to specify ordering. Remember, the higher the order property, +the later the view resolver is positioned in the chain. + +The contract of a `ViewResolver` specifies that it can return null to indicate that the +view could not be found. However, in the case of JSPs and `InternalResourceViewResolver`, +the only way to figure out if a JSP exists is to perform a dispatch through`RequestDispatcher`. Therefore, you must always configure an `InternalResourceViewResolver`to be last in the overall order of view resolvers. + +Configuring view resolution is as simple as adding `ViewResolver` beans to your Spring +configuration. The [MVC Config](#mvc-config) provides a dedicated configuration API for[View Resolvers](#mvc-config-view-resolvers) and for adding logic-less[View Controllers](#mvc-config-view-controller) which are useful for HTML template +rendering without controller logic. + +##### Redirecting + +[WebFlux](web-reactive.html#webflux-redirecting-redirect-prefix) + +The special `redirect:` prefix in a view name lets you perform a redirect. The`UrlBasedViewResolver` (and its subclasses) recognize this as an instruction that a +redirect is needed. The rest of the view name is the redirect URL. + +The net effect is the same as if the controller had returned a `RedirectView`, but now +the controller itself can operate in terms of logical view names. A logical view +name (such as `redirect:/myapp/some/resource`) redirects relative to the current +Servlet context, while a name such as `redirect:https://myhost.com/some/arbitrary/path`redirects to an absolute URL. + +Note that, if a controller method is annotated with the `@ResponseStatus`, the annotation +value takes precedence over the response status set by `RedirectView`. + +##### Forwarding + +You can also use a special `forward:` prefix for view names that are +ultimately resolved by `UrlBasedViewResolver` and subclasses. This creates an`InternalResourceView`, which does a `RequestDispatcher.forward()`. +Therefore, this prefix is not useful with `InternalResourceViewResolver` and`InternalResourceView` (for JSPs), but it can be helpful if you use another view +technology but still want to force a forward of a resource to be handled by the +Servlet/JSP engine. Note that you may also chain multiple view resolvers, instead. + +##### Content Negotiation + +[WebFlux](web-reactive.html#webflux-multiple-representations) + +[`ContentNegotiatingViewResolver`](https://docs.spring.io/spring-framework/docs/5.3.16/javadoc-api/org/springframework/web/servlet/view/ContentNegotiatingViewResolver.html)does not resolve views itself but rather delegates +to other view resolvers and selects the view that resembles the representation requested +by the client. The representation can be determined from the `Accept` header or from a +query parameter (for example, `"/path?format=pdf"`). + +The `ContentNegotiatingViewResolver` selects an appropriate `View` to handle the request +by comparing the request media types with the media type (also known as`Content-Type`) supported by the `View` associated with each of its `ViewResolvers`. The +first `View` in the list that has a compatible `Content-Type` returns the representation +to the client. If a compatible view cannot be supplied by the `ViewResolver` chain, +the list of views specified through the `DefaultViews` property is consulted. This +latter option is appropriate for singleton `Views` that can render an appropriate +representation of the current resource regardless of the logical view name. The `Accept`header can include wildcards (for example `text/*`), in which case a `View` whose`Content-Type` is `text/xml` is a compatible match. + +See [View Resolvers](#mvc-config-view-resolvers) under [MVC Config](#mvc-config) for configuration details. + +#### 1.1.10. Locale + +Most parts of Spring’s architecture support internationalization, as the Spring web +MVC framework does. `DispatcherServlet` lets you automatically resolve messages +by using the client’s locale. This is done with `LocaleResolver` objects. + +When a request comes in, the `DispatcherServlet` looks for a locale resolver and, if it +finds one, it tries to use it to set the locale. By using the `RequestContext.getLocale()`method, you can always retrieve the locale that was resolved by the locale resolver. + +In addition to automatic locale resolution, you can also attach an interceptor to the +handler mapping (see [Interception](#mvc-handlermapping-interceptor) for more information on handler +mapping interceptors) to change the locale under specific circumstances (for example, +based on a parameter in the request). + +Locale resolvers and interceptors are defined in the`org.springframework.web.servlet.i18n` package and are configured in your application +context in the normal way. The following selection of locale resolvers is included in +Spring. + +* [Time Zone](#mvc-timezone) + +* [Header Resolver](#mvc-localeresolver-acceptheader) + +* [Cookie Resolver](#mvc-localeresolver-cookie) + +* [Session Resolver](#mvc-localeresolver-session) + +* [Locale Interceptor](#mvc-localeresolver-interceptor) + +##### Time Zone + +In addition to obtaining the client’s locale, it is often useful to know its time zone. +The `LocaleContextResolver` interface offers an extension to `LocaleResolver` that lets +resolvers provide a richer `LocaleContext`, which may include time zone information. + +When available, the user’s `TimeZone` can be obtained by using the`RequestContext.getTimeZone()` method. Time zone information is automatically used +by any Date/Time `Converter` and `Formatter` objects that are registered with Spring’s`ConversionService`. + +##### Header Resolver + +This locale resolver inspects the `accept-language` header in the request that was sent +by the client (for example, a web browser). Usually, this header field contains the locale of +the client’s operating system. Note that this resolver does not support time zone +information. + +##### Cookie Resolver + +This locale resolver inspects a `Cookie` that might exist on the client to see if a`Locale` or `TimeZone` is specified. If so, it uses the specified details. By using the +properties of this locale resolver, you can specify the name of the cookie as well as the +maximum age. The following example defines a `CookieLocaleResolver`: + +``` +<bean id="localeResolver" class="org.springframework.web.servlet.i18n.CookieLocaleResolver"> + + <property name="cookieName" value="clientlanguage"/> + + <!-- in seconds. If set to -1, the cookie is not persisted (deleted when browser shuts down) --> + <property name="cookieMaxAge" value="100000"/> + +</bean> +``` + +The following table describes the properties `CookieLocaleResolver`: + +| Property | Default | Description | +|--------------|-------------------------|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| `cookieName` | classname + LOCALE | The name of the cookie | +|`cookieMaxAge`|Servlet container default|The maximum time a cookie persists on the client. If `-1` is specified, the<br/>cookie will not be persisted. It is available only until the client shuts down<br/>the browser.| +| `cookiePath` | / | Limits the visibility of the cookie to a certain part of your site. When `cookiePath` is<br/>specified, the cookie is visible only to that path and the paths below it. | + +##### Session Resolver + +The `SessionLocaleResolver` lets you retrieve `Locale` and `TimeZone` from the +session that might be associated with the user’s request. In contrast to`CookieLocaleResolver`, this strategy stores locally chosen locale settings in the +Servlet container’s `HttpSession`. As a consequence, those settings are temporary +for each session and are, therefore, lost when each session ends. + +Note that there is no direct relationship with external session management mechanisms, +such as the Spring Session project. This `SessionLocaleResolver` evaluates and +modifies the corresponding `HttpSession` attributes against the current `HttpServletRequest`. + +##### Locale Interceptor + +You can enable changing of locales by adding the `LocaleChangeInterceptor` to one of the`HandlerMapping` definitions. It detects a parameter in the request and changes the locale +accordingly, calling the `setLocale` method on the `LocaleResolver` in the dispatcher’s +application context. The next example shows that calls to all `*.view` resources +that contain a parameter named `siteLanguage` now changes the locale. So, for example, +a request for the URL, `[https://www.sf.net/home.view?siteLanguage=nl](https://www.sf.net/home.view?siteLanguage=nl)`, changes the site +language to Dutch. The following example shows how to intercept the locale: + +``` +<bean id="localeChangeInterceptor" + class="org.springframework.web.servlet.i18n.LocaleChangeInterceptor"> + <property name="paramName" value="siteLanguage"/> +</bean> + +<bean id="localeResolver" + class="org.springframework.web.servlet.i18n.CookieLocaleResolver"/> + +<bean id="urlMapping" + class="org.springframework.web.servlet.handler.SimpleUrlHandlerMapping"> + <property name="interceptors"> + <list> + <ref bean="localeChangeInterceptor"/> + </list> + </property> + <property name="mappings"> + <value>/**/*.view=someController</value> + </property> +</bean> +``` + +#### 1.1.11. Themes + +You can apply Spring Web MVC framework themes to set the overall look-and-feel of your +application, thereby enhancing user experience. A theme is a collection of static +resources, typically style sheets and images, that affect the visual style of the +application. + +##### Defining a theme + +To use themes in your web application, you must set up an implementation of the`org.springframework.ui.context.ThemeSource` interface. The `WebApplicationContext`interface extends `ThemeSource` but delegates its responsibilities to a dedicated +implementation. By default, the delegate is an`org.springframework.ui.context.support.ResourceBundleThemeSource` implementation that +loads properties files from the root of the classpath. To use a custom `ThemeSource`implementation or to configure the base name prefix of the `ResourceBundleThemeSource`, +you can register a bean in the application context with the reserved name, `themeSource`. +The web application context automatically detects a bean with that name and uses it. + +When you use the `ResourceBundleThemeSource`, a theme is defined in a simple properties +file. The properties file lists the resources that make up the theme, as the following example shows: + +``` +styleSheet=/themes/cool/style.css +background=/themes/cool/img/coolBg.jpg +``` + +The keys of the properties are the names that refer to the themed elements from view +code. For a JSP, you typically do this using the `spring:theme` custom tag, which is +very similar to the `spring:message` tag. The following JSP fragment uses the theme +defined in the previous example to customize the look and feel: + +``` +<%@ taglib prefix="spring" uri="http://www.springframework.org/tags"%> +<html> + <head> + <link rel="stylesheet" href="<spring:theme code='styleSheet'/>" type="text/css"/> + </head> + <body style="background=<spring:theme code='background'/>"> + ... + </body> +</html> +``` + +By default, the `ResourceBundleThemeSource` uses an empty base name prefix. As a result, +the properties files are loaded from the root of the classpath. Thus, you would put the`cool.properties` theme definition in a directory at the root of the classpath (for +example, in `/WEB-INF/classes`). The `ResourceBundleThemeSource` uses the standard Java +resource bundle loading mechanism, allowing for full internationalization of themes. For +example, we could have a `/WEB-INF/classes/cool_nl.properties` that references a special +background image with Dutch text on it. + +##### Resolving Themes + +After you define themes, as described in the [preceding section](#mvc-themeresolver-defining), +you decide which theme to use. The `DispatcherServlet` looks for a bean named `themeResolver`to find out which `ThemeResolver` implementation to use. A theme resolver works in much the same +way as a `LocaleResolver`. It detects the theme to use for a particular request and can also +alter the request’s theme. The following table describes the theme resolvers provided by Spring: + +| Class | Description | +|----------------------|--------------------------------------------------------------------------------------------------------------------------------------------| +| `FixedThemeResolver` | Selects a fixed theme, set by using the `defaultThemeName` property. | +|`SessionThemeResolver`|The theme is maintained in the user’s HTTP session. It needs to be set only once for<br/>each session but is not persisted between sessions.| +|`CookieThemeResolver` | The selected theme is stored in a cookie on the client. | + +Spring also provides a `ThemeChangeInterceptor` that lets theme changes on every +request with a simple request parameter. + +#### 1.1.12. Multipart Resolver + +[WebFlux](web-reactive.html#webflux-multipart) + +`MultipartResolver` from the `org.springframework.web.multipart` package is a strategy +for parsing multipart requests including file uploads. There is one implementation +based on [Commons FileUpload](https://commons.apache.org/proper/commons-fileupload) and +another based on Servlet 3.0 multipart request parsing. + +To enable multipart handling, you need to declare a `MultipartResolver` bean in your`DispatcherServlet` Spring configuration with a name of `multipartResolver`. +The `DispatcherServlet` detects it and applies it to the incoming request. When a POST +with a content type of `multipart/form-data` is received, the resolver parses the +content wraps the current `HttpServletRequest` as a `MultipartHttpServletRequest` to +provide access to resolved files in addition to exposing parts as request parameters. + +##### Apache Commons `FileUpload` + +To use Apache Commons `FileUpload`, you can configure a bean of type`CommonsMultipartResolver` with a name of `multipartResolver`. You also need to have +the `commons-fileupload` jar as a dependency on your classpath. + +This resolver variant delegates to a local library within the application, providing +maximum portability across Servlet containers. As an alternative, consider standard +Servlet multipart resolution through the container’s own parser as discussed below. + +| |Commons FileUpload traditionally applies to POST requests only but accepts any`multipart/` content type. See the[`CommonsMultipartResolver`](https://docs.spring.io/spring-framework/docs/5.3.16/javadoc-api/org/springframework/web/multipart/commons/CommonsMultipartResolver.html)javadoc for details and configuration options.| +|---|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +##### Servlet 3.0 + +Servlet 3.0 multipart parsing needs to be enabled through Servlet container configuration. +To do so: + +* In Java, set a `MultipartConfigElement` on the Servlet registration. + +* In `web.xml`, add a `"<multipart-config>"` section to the servlet declaration. + +The following example shows how to set a `MultipartConfigElement` on the Servlet registration: + +Java + +``` +public class AppInitializer extends AbstractAnnotationConfigDispatcherServletInitializer { + + // ... + + @Override + protected void customizeRegistration(ServletRegistration.Dynamic registration) { + + // Optionally also set maxFileSize, maxRequestSize, fileSizeThreshold + registration.setMultipartConfig(new MultipartConfigElement("/tmp")); + } + +} +``` + +Kotlin + +``` +class AppInitializer : AbstractAnnotationConfigDispatcherServletInitializer() { + + // ... + + override fun customizeRegistration(registration: ServletRegistration.Dynamic) { + + // Optionally also set maxFileSize, maxRequestSize, fileSizeThreshold + registration.setMultipartConfig(MultipartConfigElement("/tmp")) + } + +} +``` + +Once the Servlet 3.0 configuration is in place, you can add a bean of type`StandardServletMultipartResolver` with a name of `multipartResolver`. + +| |This resolver variant uses your Servlet container’s multipart parser as-is,<br/>potentially exposing the application to container implementation differences.<br/>By default, it will try to parse any `multipart/` content type with any HTTP<br/>method but this may not be supported across all Servlet containers. See the[`StandardServletMultipartResolver`](https://docs.spring.io/spring-framework/docs/5.3.16/javadoc-api/org/springframework/web/multipart/support/StandardServletMultipartResolver.html)javadoc for details and configuration options.| +|---|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +#### 1.1.13. Logging + +[WebFlux](web-reactive.html#webflux-logging) + +DEBUG-level logging in Spring MVC is designed to be compact, minimal, and +human-friendly. It focuses on high-value bits of information that are useful over and +over again versus others that are useful only when debugging a specific issue. + +TRACE-level logging generally follows the same principles as DEBUG (and, for example, also +should not be a fire hose) but can be used for debugging any issue. In addition, some log +messages may show a different level of detail at TRACE versus DEBUG. + +Good logging comes from the experience of using the logs. If you spot anything that does +not meet the stated goals, please let us know. + +##### Sensitive Data + +[WebFlux](web-reactive.html#webflux-logging-sensitive-data) + +DEBUG and TRACE logging may log sensitive information. This is why request parameters and +headers are masked by default and their logging in full must be enabled explicitly +through the `enableLoggingRequestDetails` property on `DispatcherServlet`. + +The following example shows how to do so by using Java configuration: + +Java + +``` +public class MyInitializer + extends AbstractAnnotationConfigDispatcherServletInitializer { + + @Override + protected Class<?>[] getRootConfigClasses() { + return ... ; + } + + @Override + protected Class<?>[] getServletConfigClasses() { + return ... ; + } + + @Override + protected String[] getServletMappings() { + return ... ; + } + + @Override + protected void customizeRegistration(ServletRegistration.Dynamic registration) { + registration.setInitParameter("enableLoggingRequestDetails", "true"); + } + +} +``` + +Kotlin + +``` +class MyInitializer : AbstractAnnotationConfigDispatcherServletInitializer() { + + override fun getRootConfigClasses(): Array<Class<*>>? { + return ... + } + + override fun getServletConfigClasses(): Array<Class<*>>? { + return ... + } + + override fun getServletMappings(): Array<String> { + return ... + } + + override fun customizeRegistration(registration: ServletRegistration.Dynamic) { + registration.setInitParameter("enableLoggingRequestDetails", "true") + } +} +``` + +### 1.2. Filters + +[WebFlux](web-reactive.html#webflux-filters) + +The `spring-web` module provides some useful filters: + +* [Form Data](#filters-http-put) + +* [Forwarded Headers](#filters-forwarded-headers) + +* [Shallow ETag](#filters-shallow-etag) + +* [CORS](#filters-cors) + +#### 1.2.1. Form Data + +Browsers can submit form data only through HTTP GET or HTTP POST but non-browser clients can also +use HTTP PUT, PATCH, and DELETE. The Servlet API requires `ServletRequest.getParameter*()`methods to support form field access only for HTTP POST. + +The `spring-web` module provides `FormContentFilter` to intercept HTTP PUT, PATCH, and DELETE +requests with a content type of `application/x-www-form-urlencoded`, read the form data from +the body of the request, and wrap the `ServletRequest` to make the form data +available through the `ServletRequest.getParameter*()` family of methods. + +#### 1.2.2. Forwarded Headers + +[WebFlux](web-reactive.html#webflux-forwarded-headers) + +As a request goes through proxies (such as load balancers) the host, port, and +scheme may change, and that makes it a challenge to create links that point to the correct +host, port, and scheme from a client perspective. + +[RFC 7239](https://tools.ietf.org/html/rfc7239) defines the `Forwarded` HTTP header +that proxies can use to provide information about the original request. There are other +non-standard headers, too, including `X-Forwarded-Host`, `X-Forwarded-Port`,`X-Forwarded-Proto`, `X-Forwarded-Ssl`, and `X-Forwarded-Prefix`. + +`ForwardedHeaderFilter` is a Servlet filter that modifies the request in order to +a) change the host, port, and scheme based on `Forwarded` headers, and b) to remove those +headers to eliminate further impact. The filter relies on wrapping the request, and +therefore it must be ordered ahead of other filters, such as `RequestContextFilter`, that +should work with the modified and not the original request. + +There are security considerations for forwarded headers since an application cannot know +if the headers were added by a proxy, as intended, or by a malicious client. This is why +a proxy at the boundary of trust should be configured to remove untrusted `Forwarded`headers that come from the outside. You can also configure the `ForwardedHeaderFilter`with `removeOnly=true`, in which case it removes but does not use the headers. + +In order to support [asynchronous requests](#mvc-ann-async) and error dispatches this +filter should be mapped with `DispatcherType.ASYNC` and also `DispatcherType.ERROR`. +If using Spring Framework’s `AbstractAnnotationConfigDispatcherServletInitializer`(see [Servlet Config](#mvc-container-config)) all filters are automatically registered for all dispatch +types. However if registering the filter via `web.xml` or in Spring Boot via a`FilterRegistrationBean` be sure to include `DispatcherType.ASYNC` and`DispatcherType.ERROR` in addition to `DispatcherType.REQUEST`. + +#### 1.2.3. Shallow ETag + +The `ShallowEtagHeaderFilter` filter creates a “shallow” ETag by caching the content +written to the response and computing an MD5 hash from it. The next time a client sends, +it does the same, but it also compares the computed value against the `If-None-Match`request header and, if the two are equal, returns a 304 (NOT\_MODIFIED). + +This strategy saves network bandwidth but not CPU, as the full response must be computed +for each request. Other strategies at the controller level, described earlier, can avoid +the computation. See [HTTP Caching](#mvc-caching). + +This filter has a `writeWeakETag` parameter that configures the filter to write weak ETags +similar to the following: `W/"02a2d595e6ed9a0b24f027f2b63b134d6"` (as defined in[RFC 7232 Section 2.3](https://tools.ietf.org/html/rfc7232#section-2.3)). + +In order to support [asynchronous requests](#mvc-ann-async) this filter must be mapped +with `DispatcherType.ASYNC` so that the filter can delay and successfully generate an +ETag to the end of the last async dispatch. If using Spring Framework’s`AbstractAnnotationConfigDispatcherServletInitializer` (see [Servlet Config](#mvc-container-config)) +all filters are automatically registered for all dispatch types. However if registering +the filter via `web.xml` or in Spring Boot via a `FilterRegistrationBean` be sure to include`DispatcherType.ASYNC`. + +#### 1.2.4. CORS + +[WebFlux](web-reactive.html#webflux-filters-cors) + +Spring MVC provides fine-grained support for CORS configuration through annotations on +controllers. However, when used with Spring Security, we advise relying on the built-in`CorsFilter` that must be ordered ahead of Spring Security’s chain of filters. + +See the sections on [CORS](#mvc-cors) and the [CORS Filter](#mvc-cors-filter) for more details. + +### 1.3. Annotated Controllers + +[WebFlux](web-reactive.html#webflux-controller) + +Spring MVC provides an annotation-based programming model where `@Controller` and`@RestController` components use annotations to express request mappings, request input, +exception handling, and more. Annotated controllers have flexible method signatures and +do not have to extend base classes nor implement specific interfaces. +The following example shows a controller defined by annotations: + +Java + +``` +@Controller +public class HelloController { + + @GetMapping("/hello") + public String handle(Model model) { + model.addAttribute("message", "Hello World!"); + return "index"; + } +} +``` + +Kotlin + +``` +import org.springframework.ui.set + +@Controller +class HelloController { + + @GetMapping("/hello") + fun handle(model: Model): String { + model["message"] = "Hello World!" + return "index" + } +} +``` + +In the preceding example, the method accepts a `Model` and returns a view name as a `String`, +but many other options exist and are explained later in this chapter. + +| |Guides and tutorials on [spring.io](https://spring.io/guides) use the annotation-based<br/>programming model described in this section.| +|---|---------------------------------------------------------------------------------------------------------------------------------------| + +#### 1.3.1. Declaration + +[WebFlux](web-reactive.html#webflux-ann-controller) + +You can define controller beans by using a standard Spring bean definition in the +Servlet’s `WebApplicationContext`. The `@Controller` stereotype allows for auto-detection, +aligned with Spring general support for detecting `@Component` classes in the classpath +and auto-registering bean definitions for them. It also acts as a stereotype for the +annotated class, indicating its role as a web component. + +To enable auto-detection of such `@Controller` beans, you can add component scanning to +your Java configuration, as the following example shows: + +Java + +``` +@Configuration +@ComponentScan("org.example.web") +public class WebConfig { + + // ... +} +``` + +Kotlin + +``` +@Configuration +@ComponentScan("org.example.web") +class WebConfig { + + // ... +} +``` + +The following example shows the XML configuration equivalent of the preceding example: + +``` +<?xml version="1.0" encoding="UTF-8"?> +<beans xmlns="http://www.springframework.org/schema/beans" + xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" + xmlns:p="http://www.springframework.org/schema/p" + xmlns:context="http://www.springframework.org/schema/context" + xsi:schemaLocation=" + http://www.springframework.org/schema/beans + https://www.springframework.org/schema/beans/spring-beans.xsd + http://www.springframework.org/schema/context + https://www.springframework.org/schema/context/spring-context.xsd"> + + <context:component-scan base-package="org.example.web"/> + + <!-- ... --> + +</beans> +``` + +`@RestController` is a [composed annotation](core.html#beans-meta-annotations) that is +itself meta-annotated with `@Controller` and `@ResponseBody` to indicate a controller whose +every method inherits the type-level `@ResponseBody` annotation and, therefore, writes +directly to the response body versus view resolution and rendering with an HTML template. + +##### AOP Proxies + +In some cases, you may need to decorate a controller with an AOP proxy at runtime. +One example is if you choose to have `@Transactional` annotations directly on the +controller. When this is the case, for controllers specifically, we recommend +using class-based proxying. This is typically the default choice with controllers. +However, if a controller must implement an interface that is not a Spring Context +callback (such as `InitializingBean`, `*Aware`, and others), you may need to explicitly +configure class-based proxying. For example, with `<tx:annotation-driven/>` you can +change to `<tx:annotation-driven proxy-target-class="true"/>`, and with`@EnableTransactionManagement` you can change to`@EnableTransactionManagement(proxyTargetClass = true)`. + +#### 1.3.2. Request Mapping + +[WebFlux](web-reactive.html#webflux-ann-requestmapping) + +You can use the `@RequestMapping` annotation to map requests to controllers methods. It has +various attributes to match by URL, HTTP method, request parameters, headers, and media +types. You can use it at the class level to express shared mappings or at the method level +to narrow down to a specific endpoint mapping. + +There are also HTTP method specific shortcut variants of `@RequestMapping`: + +* `@GetMapping` + +* `@PostMapping` + +* `@PutMapping` + +* `@DeleteMapping` + +* `@PatchMapping` + +The shortcuts are [Custom Annotations](#mvc-ann-requestmapping-composed) that are provided because, +arguably, most controller methods should be mapped to a specific HTTP method versus +using `@RequestMapping`, which, by default, matches to all HTTP methods. +A `@RequestMapping` is still needed at the class level to express shared mappings. + +The following example has type and method level mappings: + +Java + +``` +@RestController +@RequestMapping("/persons") +class PersonController { + + @GetMapping("/{id}") + public Person getPerson(@PathVariable Long id) { + // ... + } + + @PostMapping + @ResponseStatus(HttpStatus.CREATED) + public void add(@RequestBody Person person) { + // ... + } +} +``` + +Kotlin + +``` +@RestController +@RequestMapping("/persons") +class PersonController { + + @GetMapping("/{id}") + fun getPerson(@PathVariable id: Long): Person { + // ... + } + + @PostMapping + @ResponseStatus(HttpStatus.CREATED) + fun add(@RequestBody person: Person) { + // ... + } +} +``` + +##### URI patterns + +[WebFlux](web-reactive.html#webflux-ann-requestmapping-uri-templates) + +`@RequestMapping` methods can be mapped using URL patterns. There are two alternatives: + +* `PathPattern` — a pre-parsed pattern matched against the URL path also pre-parsed as`PathContainer`. Designed for web use, this solution deals effectively with encoding and + path parameters, and matches efficiently. + +* `AntPathMatcher` — match String patterns against a String path. This is the original + solution also used in Spring configuration to select resources on the classpath, on the + filesystem, and other locations. It is less efficient and the String path input is a + challenge for dealing effectively with encoding and other issues with URLs. + +`PathPattern` is the recommended solution for web applications and it is the only choice in +Spring WebFlux. Prior to version 5.3, `AntPathMatcher` was the only choice in Spring MVC +and continues to be the default. However `PathPattern` can be enabled in the[MVC config](#mvc-config-path-matching). + +`PathPattern` supports the same pattern syntax as `AntPathMatcher`. In addition it also +supports the capturing pattern, e.g. `{*spring}`, for matching 0 or more path segments +at the end of a path. `PathPattern` also restricts the use of `**` for matching multiple +path segments such that it’s only allowed at the end of a pattern. This eliminates many +cases of ambiguity when choosing the best matching pattern for a given request. +For full pattern syntax please refer to[PathPattern](https://docs.spring.io/spring-framework/docs/5.3.16/javadoc-api/org/springframework/web/util/pattern/PathPattern.html) and[AntPathMatcher](https://docs.spring.io/spring-framework/docs/5.3.16/javadoc-api/org/springframework/util/AntPathMatcher.html). + +Some example patterns: + +* `"/resources/ima?e.png"` - match one character in a path segment + +* `"/resources/*.png"` - match zero or more characters in a path segment + +* `"/resources/**"` - match multiple path segments + +* `"/projects/{project}/versions"` - match a path segment and capture it as a variable + +* `"/projects/{project:[a-z]+}/versions"` - match and capture a variable with a regex + +Captured URI variables can be accessed with `@PathVariable`. For example: + +Java + +``` +@GetMapping("/owners/{ownerId}/pets/{petId}") +public Pet findPet(@PathVariable Long ownerId, @PathVariable Long petId) { + // ... +} +``` + +Kotlin + +``` +@GetMapping("/owners/{ownerId}/pets/{petId}") +fun findPet(@PathVariable ownerId: Long, @PathVariable petId: Long): Pet { + // ... +} +``` + +You can declare URI variables at the class and method levels, as the following example shows: + +Java + +``` +@Controller +@RequestMapping("/owners/{ownerId}") +public class OwnerController { + + @GetMapping("/pets/{petId}") + public Pet findPet(@PathVariable Long ownerId, @PathVariable Long petId) { + // ... + } +} +``` + +Kotlin + +``` +@Controller +@RequestMapping("/owners/{ownerId}") +class OwnerController { + + @GetMapping("/pets/{petId}") + fun findPet(@PathVariable ownerId: Long, @PathVariable petId: Long): Pet { + // ... + } +} +``` + +URI variables are automatically converted to the appropriate type, or `TypeMismatchException`is raised. Simple types (`int`, `long`, `Date`, and so on) are supported by default and you can +register support for any other data type. +See [Type Conversion](#mvc-ann-typeconversion) and [`DataBinder`](#mvc-ann-initbinder). + +You can explicitly name URI variables (for example, `@PathVariable("customId")`), but you can +leave that detail out if the names are the same and your code is compiled with debugging +information or with the `-parameters` compiler flag on Java 8. + +The syntax `{varName:regex}` declares a URI variable with a regular expression that has +syntax of `{varName:regex}`. For example, given URL `"/spring-web-3.0.5.jar"`, the following method +extracts the name, version, and file extension: + +Java + +``` +@GetMapping("/{name:[a-z-]+}-{version:\\d\\.\\d\\.\\d}{ext:\\.[a-z]+}") +public void handle(@PathVariable String name, @PathVariable String version, @PathVariable String ext) { + // ... +} +``` + +Kotlin + +``` +@GetMapping("/{name:[a-z-]+}-{version:\\d\\.\\d\\.\\d}{ext:\\.[a-z]+}") +fun handle(@PathVariable name: String, @PathVariable version: String, @PathVariable ext: String) { + // ... +} +``` + +URI path patterns can also have embedded `${…​}` placeholders that are resolved on startup +by using `PropertyPlaceHolderConfigurer` against local, system, environment, and other property +sources. You can use this, for example, to parameterize a base URL based on some external +configuration. + +##### Pattern Comparison + +[WebFlux](web-reactive.html#webflux-ann-requestmapping-pattern-comparison) + +When multiple patterns match a URL, the best match must be selected. This is done with +one of the following depending on whether use of parsed `PathPattern` is enabled for use or not: + +* [`PathPattern.SPECIFICITY_COMPARATOR`](https://docs.spring.io/spring-framework/docs/5.3.16/javadoc-api/org/springframework/web/util/pattern/PathPattern.html#SPECIFICITY_COMPARATOR) + +* [`AntPathMatcher.getPatternComparator(String path)`](https://docs.spring.io/spring-framework/docs/5.3.16/javadoc-api/org/springframework/util/AntPathMatcher.html#getPatternComparator-java.lang.String-) + +Both help to sort patterns with more specific ones on top. A pattern is less specific if +it has a lower count of URI variables (counted as 1), single wildcards (counted as 1), +and double wildcards (counted as 2). Given an equal score, the longer pattern is chosen. +Given the same score and length, the pattern with more URI variables than wildcards is +chosen. + +The default mapping pattern (`/**`) is excluded from scoring and always +sorted last. Also, prefix patterns (such as `/public/**`) are considered less +specific than other pattern that do not have double wildcards. + +For the full details, follow the above links to the pattern Comparators. + +##### Suffix Match + +Starting in 5.3, by default Spring MVC no longer performs `.*` suffix pattern +matching where a controller mapped to `/person` is also implicitly mapped to`/person.*`. As a consequence path extensions are no longer used to interpret +the requested content type for the response — for example, `/person.pdf`, `/person.xml`, +and so on. + +Using file extensions in this way was necessary when browsers used to send `Accept` headers +that were hard to interpret consistently. At present, that is no longer a necessity and +using the `Accept` header should be the preferred choice. + +Over time, the use of file name extensions has proven problematic in a variety of ways. +It can cause ambiguity when overlain with the use of URI variables, path parameters, and +URI encoding. Reasoning about URL-based authorization +and security (see next section for more details) also becomes more difficult. + +To completely disable the use of path extensions in versions prior to 5.3, set the following: + +* `useSuffixPatternMatching(false)`, see [PathMatchConfigurer](#mvc-config-path-matching) + +* `favorPathExtension(false)`, see [ContentNegotiationConfigurer](#mvc-config-content-negotiation) + +Having a way to request content types other than through the `"Accept"` header can still +be useful, e.g. when typing a URL in a browser. A safe alternative to path extensions is +to use the query parameter strategy. If you must use file extensions, consider restricting +them to a list of explicitly registered extensions through the `mediaTypes` property of[ContentNegotiationConfigurer](#mvc-config-content-negotiation). + +##### Suffix Match and RFD + +A reflected file download (RFD) attack is similar to XSS in that it relies on request input +(for example, a query parameter and a URI variable) being reflected in the response. However, instead of +inserting JavaScript into HTML, an RFD attack relies on the browser switching to perform a +download and treating the response as an executable script when double-clicked later. + +In Spring MVC, `@ResponseBody` and `ResponseEntity` methods are at risk, because +they can render different content types, which clients can request through URL path extensions. +Disabling suffix pattern matching and using path extensions for content negotiation +lower the risk but are not sufficient to prevent RFD attacks. + +To prevent RFD attacks, prior to rendering the response body, Spring MVC adds a`Content-Disposition:inline;filename=f.txt` header to suggest a fixed and safe download +file. This is done only if the URL path contains a file extension that is neither +allowed as safe nor explicitly registered for content negotiation. However, it can +potentially have side effects when URLs are typed directly into a browser. + +Many common path extensions are allowed as safe by default. Applications with custom`HttpMessageConverter` implementations can explicitly register file extensions for content +negotiation to avoid having a `Content-Disposition` header added for those extensions. +See [Content Types](#mvc-config-content-negotiation). + +See [CVE-2015-5211](https://pivotal.io/security/cve-2015-5211) for additional +recommendations related to RFD. + +##### Consumable Media Types + +[WebFlux](web-reactive.html#webflux-ann-requestmapping-consumes) + +You can narrow the request mapping based on the `Content-Type` of the request, +as the following example shows: + +Java + +``` +@PostMapping(path = "/pets", consumes = "application/json") (1) +public void addPet(@RequestBody Pet pet) { + // ... +} +``` + +|**1**|Using a `consumes` attribute to narrow the mapping by the content type.| +|-----|-----------------------------------------------------------------------| + +Kotlin + +``` +@PostMapping("/pets", consumes = ["application/json"]) (1) +fun addPet(@RequestBody pet: Pet) { + // ... +} +``` + +|**1**|Using a `consumes` attribute to narrow the mapping by the content type.| +|-----|-----------------------------------------------------------------------| + +The `consumes` attribute also supports negation expressions — for example, `!text/plain` means any +content type other than `text/plain`. + +You can declare a shared `consumes` attribute at the class level. Unlike most other +request-mapping attributes, however, when used at the class level, a method-level `consumes` attribute +overrides rather than extends the class-level declaration. + +| |`MediaType` provides constants for commonly used media types, such as`APPLICATION_JSON_VALUE` and `APPLICATION_XML_VALUE`.| +|---|--------------------------------------------------------------------------------------------------------------------------| + +##### Producible Media Types + +[WebFlux](web-reactive.html#webflux-ann-requestmapping-produces) + +You can narrow the request mapping based on the `Accept` request header and the list of +content types that a controller method produces, as the following example shows: + +Java + +``` +@GetMapping(path = "/pets/{petId}", produces = "application/json") (1) +@ResponseBody +public Pet getPet(@PathVariable String petId) { + // ... +} +``` + +|**1**|Using a `produces` attribute to narrow the mapping by the content type.| +|-----|-----------------------------------------------------------------------| + +Kotlin + +``` +@GetMapping("/pets/{petId}", produces = ["application/json"]) (1) +@ResponseBody +fun getPet(@PathVariable petId: String): Pet { + // ... +} +``` + +|**1**|Using a `produces` attribute to narrow the mapping by the content type.| +|-----|-----------------------------------------------------------------------| + +The media type can specify a character set. Negated expressions are supported — for example,`!text/plain` means any content type other than "text/plain". + +You can declare a shared `produces` attribute at the class level. Unlike most other +request-mapping attributes, however, when used at the class level, a method-level `produces` attribute +overrides rather than extends the class-level declaration. + +| |`MediaType` provides constants for commonly used media types, such as`APPLICATION_JSON_VALUE` and `APPLICATION_XML_VALUE`.| +|---|--------------------------------------------------------------------------------------------------------------------------| + +##### Parameters, headers + +[WebFlux](web-reactive.html#webflux-ann-requestmapping-params-and-headers) + +You can narrow request mappings based on request parameter conditions. You can test for the +presence of a request parameter (`myParam`), for the absence of one (`!myParam`), or for a +specific value (`myParam=myValue`). The following example shows how to test for a specific value: + +Java + +``` +@GetMapping(path = "/pets/{petId}", params = "myParam=myValue") (1) +public void findPet(@PathVariable String petId) { + // ... +} +``` + +|**1**|Testing whether `myParam` equals `myValue`.| +|-----|-------------------------------------------| + +Kotlin + +``` +@GetMapping("/pets/{petId}", params = ["myParam=myValue"]) (1) +fun findPet(@PathVariable petId: String) { + // ... +} +``` + +|**1**|Testing whether `myParam` equals `myValue`.| +|-----|-------------------------------------------| + +You can also use the same with request header conditions, as the following example shows: + +Java + +``` +@GetMapping(path = "/pets", headers = "myHeader=myValue") (1) +public void findPet(@PathVariable String petId) { + // ... +} +``` + +|**1**|Testing whether `myHeader` equals `myValue`.| +|-----|--------------------------------------------| + +Kotlin + +``` +@GetMapping("/pets", headers = ["myHeader=myValue"]) (1) +fun findPet(@PathVariable petId: String) { + // ... +} +``` + +| |You can match `Content-Type` and `Accept` with the headers condition, but it is better to use[consumes](#mvc-ann-requestmapping-consumes) and [produces](#mvc-ann-requestmapping-produces)instead.| +|---|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +##### HTTP HEAD, OPTIONS + +[WebFlux](web-reactive.html#webflux-ann-requestmapping-head-options) + +`@GetMapping` (and `@RequestMapping(method=HttpMethod.GET)`) support HTTP HEAD +transparently for request mapping. Controller methods do not need to change. +A response wrapper, applied in `javax.servlet.http.HttpServlet`, ensures a `Content-Length`header is set to the number of bytes written (without actually writing to the response). + +`@GetMapping` (and `@RequestMapping(method=HttpMethod.GET)`) are implicitly mapped to +and support HTTP HEAD. An HTTP HEAD request is processed as if it were HTTP GET except +that, instead of writing the body, the number of bytes are counted and the `Content-Length`header is set. + +By default, HTTP OPTIONS is handled by setting the `Allow` response header to the list of HTTP +methods listed in all `@RequestMapping` methods that have matching URL patterns. + +For a `@RequestMapping` without HTTP method declarations, the `Allow` header is set to`GET,HEAD,POST,PUT,PATCH,DELETE,OPTIONS`. Controller methods should always declare the +supported HTTP methods (for example, by using the HTTP method specific variants:`@GetMapping`, `@PostMapping`, and others). + +You can explicitly map the `@RequestMapping` method to HTTP HEAD and HTTP OPTIONS, but that +is not necessary in the common case. + +##### Custom Annotations + +[WebFlux](web-reactive.html#mvc-ann-requestmapping-head-options) + +Spring MVC supports the use of [composed annotations](core.html#beans-meta-annotations)for request mapping. Those are annotations that are themselves meta-annotated with`@RequestMapping` and composed to redeclare a subset (or all) of the `@RequestMapping`attributes with a narrower, more specific purpose. + +`@GetMapping`, `@PostMapping`, `@PutMapping`, `@DeleteMapping`, and `@PatchMapping` are +examples of composed annotations. They are provided because, arguably, most +controller methods should be mapped to a specific HTTP method versus using `@RequestMapping`, +which, by default, matches to all HTTP methods. If you need an example of composed +annotations, look at how those are declared. + +Spring MVC also supports custom request-mapping attributes with custom request-matching +logic. This is a more advanced option that requires subclassing`RequestMappingHandlerMapping` and overriding the `getCustomMethodCondition` method, where +you can check the custom attribute and return your own `RequestCondition`. + +##### Explicit Registrations + +[WebFlux](web-reactive.html#webflux-ann-requestmapping-registration) + +You can programmatically register handler methods, which you can use for dynamic +registrations or for advanced cases, such as different instances of the same handler +under different URLs. The following example registers a handler method: + +Java + +``` +@Configuration +public class MyConfig { + + @Autowired + public void setHandlerMapping(RequestMappingHandlerMapping mapping, UserHandler handler) (1) + throws NoSuchMethodException { + + RequestMappingInfo info = RequestMappingInfo + .paths("/user/{id}").methods(RequestMethod.GET).build(); (2) + + Method method = UserHandler.class.getMethod("getUser", Long.class); (3) + + mapping.registerMapping(info, handler, method); (4) + } +} +``` + +|**1**|Inject the target handler and the handler mapping for controllers.| +|-----|------------------------------------------------------------------| +|**2**| Prepare the request mapping meta data. | +|**3**| Get the handler method. | +|**4**| Add the registration. | + +Kotlin + +``` +@Configuration +class MyConfig { + + @Autowired + fun setHandlerMapping(mapping: RequestMappingHandlerMapping, handler: UserHandler) { (1) + val info = RequestMappingInfo.paths("/user/{id}").methods(RequestMethod.GET).build() (2) + val method = UserHandler::class.java.getMethod("getUser", Long::class.java) (3) + mapping.registerMapping(info, handler, method) (4) + } +} +``` + +|**1**|Inject the target handler and the handler mapping for controllers.| +|-----|------------------------------------------------------------------| +|**2**| Prepare the request mapping meta data. | +|**3**| Get the handler method. | +|**4**| Add the registration. | + +#### 1.3.3. Handler Methods + +[WebFlux](web-reactive.html#webflux-ann-methods) + +`@RequestMapping` handler methods have a flexible signature and can choose from a range of +supported controller method arguments and return values. + +##### Method Arguments + +[WebFlux](web-reactive.html#webflux-ann-arguments) + +The next table describes the supported controller method arguments. Reactive types are not supported +for any arguments. + +JDK 8’s `java.util.Optional` is supported as a method argument in combination with +annotations that have a `required` attribute (for example, `@RequestParam`, `@RequestHeader`, +and others) and is equivalent to `required=false`. + +| Controller method argument | Description | +|----------------------------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| `WebRequest`, `NativeWebRequest` | Generic access to request parameters and request and session attributes, without direct<br/>use of the Servlet API. | +| `javax.servlet.ServletRequest`, `javax.servlet.ServletResponse` | Choose any specific request or response type — for example, `ServletRequest`, `HttpServletRequest`,<br/>or Spring’s `MultipartRequest`, `MultipartHttpServletRequest`. | +| `javax.servlet.http.HttpSession` | Enforces the presence of a session. As a consequence, such an argument is never `null`.<br/>Note that session access is not thread-safe. Consider setting the`RequestMappingHandlerAdapter` instance’s `synchronizeOnSession` flag to `true` if multiple<br/>requests are allowed to concurrently access a session. | +| `javax.servlet.http.PushBuilder` | Servlet 4.0 push builder API for programmatic HTTP/2 resource pushes.<br/>Note that, per the Servlet specification, the injected `PushBuilder` instance can be null if the client<br/>does not support that HTTP/2 feature. | +| `java.security.Principal` |Currently authenticated user — possibly a specific `Principal` implementation class if known.<br/><br/> Note that this argument is not resolved eagerly, if it is annotated in order to allow a custom resolver to resolve it<br/>before falling back on default resolution via `HttpServletRequest#getUserPrincipal`.<br/>For example, the Spring Security `Authentication` implements `Principal` and would be injected as such via`HttpServletRequest#getUserPrincipal`, unless it is also annotated with `@AuthenticationPrincipal` in which case it<br/>is resolved by a custom Spring Security resolver through `Authentication#getPrincipal`.| +| `HttpMethod` | The HTTP method of the request. | +| `java.util.Locale` | The current request locale, determined by the most specific `LocaleResolver` available (in<br/>effect, the configured `LocaleResolver` or `LocaleContextResolver`). | +| `java.util.TimeZone` + `java.time.ZoneId` | The time zone associated with the current request, as determined by a `LocaleContextResolver`. | +| `java.io.InputStream`, `java.io.Reader` | For access to the raw request body as exposed by the Servlet API. | +| `java.io.OutputStream`, `java.io.Writer` | For access to the raw response body as exposed by the Servlet API. | +| `@PathVariable` | For access to URI template variables. See [URI patterns](#mvc-ann-requestmapping-uri-templates). | +| `@MatrixVariable` | For access to name-value pairs in URI path segments. See [Matrix Variables](#mvc-ann-matrix-variables). | +| `@RequestParam` | For access to the Servlet request parameters, including multipart files. Parameter values<br/>are converted to the declared method argument type. See [`@RequestParam`](#mvc-ann-requestparam) as well<br/>as [Multipart](#mvc-multipart-forms).<br/><br/> Note that use of `@RequestParam` is optional for simple parameter values.<br/>See “Any other argument”, at the end of this table. | +| `@RequestHeader` | For access to request headers. Header values are converted to the declared method argument<br/>type. See [`@RequestHeader`](#mvc-ann-requestheader). | +| `@CookieValue` | For access to cookies. Cookies values are converted to the declared method argument<br/>type. See [`@CookieValue`](#mvc-ann-cookievalue). | +| `@RequestBody` | For access to the HTTP request body. Body content is converted to the declared method<br/>argument type by using `HttpMessageConverter` implementations. See [`@RequestBody`](#mvc-ann-requestbody). | +| `HttpEntity<B>` | For access to request headers and body. The body is converted with an `HttpMessageConverter`.<br/>See [HttpEntity](#mvc-ann-httpentity). | +| `@RequestPart` | For access to a part in a `multipart/form-data` request, converting the part’s body<br/>with an `HttpMessageConverter`. See [Multipart](#mvc-multipart-forms). | +|`java.util.Map`, `org.springframework.ui.Model`, `org.springframework.ui.ModelMap`| For access to the model that is used in HTML controllers and exposed to templates as<br/>part of view rendering. | +| `RedirectAttributes` | Specify attributes to use in case of a redirect (that is, to be appended to the query<br/>string) and flash attributes to be stored temporarily until the request after redirect.<br/>See [Redirect Attributes](#mvc-redirecting-passing-data) and [Flash Attributes](#mvc-flash-attributes). | +| `@ModelAttribute` | For access to an existing attribute in the model (instantiated if not present) with<br/>data binding and validation applied. See [`@ModelAttribute`](#mvc-ann-modelattrib-method-args) as well as[Model](#mvc-ann-modelattrib-methods) and [`DataBinder`](#mvc-ann-initbinder).<br/><br/> Note that use of `@ModelAttribute` is optional (for example, to set its attributes).<br/>See “Any other argument” at the end of this table. | +| `Errors`, `BindingResult` | For access to errors from validation and data binding for a command object<br/>(that is, a `@ModelAttribute` argument) or errors from the validation of a `@RequestBody` or`@RequestPart` arguments. You must declare an `Errors`, or `BindingResult` argument<br/>immediately after the validated method argument. | +| `SessionStatus` + class-level `@SessionAttributes` | For marking form processing complete, which triggers cleanup of session attributes<br/>declared through a class-level `@SessionAttributes` annotation. See[`@SessionAttributes`](#mvc-ann-sessionattributes) for more details. | +| `UriComponentsBuilder` | For preparing a URL relative to the current request’s host, port, scheme, context path, and<br/>the literal part of the servlet mapping. See [URI Links](#mvc-uri-building). | +| `@SessionAttribute` | For access to any session attribute, in contrast to model attributes stored in the session<br/>as a result of a class-level `@SessionAttributes` declaration. See[`@SessionAttribute`](#mvc-ann-sessionattribute) for more details. | +| `@RequestAttribute` | For access to request attributes. See [`@RequestAttribute`](#mvc-ann-requestattrib) for more details. | +| Any other argument | If a method argument is not matched to any of the earlier values in this table and it is<br/>a simple type (as determined by[BeanUtils#isSimpleProperty](https://docs.spring.io/spring-framework/docs/5.3.16/javadoc-api/org/springframework/beans/BeanUtils.html#isSimpleProperty-java.lang.Class-),<br/>it is resolved as a `@RequestParam`. Otherwise, it is resolved as a `@ModelAttribute`. | + +##### Return Values + +[WebFlux](web-reactive.html#webflux-ann-return-types) + +The next table describes the supported controller method return values. Reactive types are +supported for all return values. + +| Controller method return value | Description | +|-----------------------------------------------------------------------------------------------------------|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| `@ResponseBody` | The return value is converted through `HttpMessageConverter` implementations and written to the<br/>response. See [`@ResponseBody`](#mvc-ann-responsebody). | +| `HttpEntity<B>`, `ResponseEntity<B>` | The return value that specifies the full response (including HTTP headers and body) is to be converted<br/>through `HttpMessageConverter` implementations and written to the response.<br/>See [ResponseEntity](#mvc-ann-responseentity). | +| `HttpHeaders` | For returning a response with headers and no body. | +| `String` | A view name to be resolved with `ViewResolver` implementations and used together with the implicit<br/>model — determined through command objects and `@ModelAttribute` methods. The handler<br/>method can also programmatically enrich the model by declaring a `Model` argument<br/>(see [Explicit Registrations](#mvc-ann-requestmapping-registration)). | +| `View` | A `View` instance to use for rendering together with the implicit model — determined<br/>through command objects and `@ModelAttribute` methods. The handler method can also<br/>programmatically enrich the model by declaring a `Model` argument<br/>(see [Explicit Registrations](#mvc-ann-requestmapping-registration)). | +| `java.util.Map`, `org.springframework.ui.Model` | Attributes to be added to the implicit model, with the view name implicitly determined<br/>through a `RequestToViewNameTranslator`. | +| `@ModelAttribute` | An attribute to be added to the model, with the view name implicitly determined through<br/>a `RequestToViewNameTranslator`.<br/><br/> Note that `@ModelAttribute` is optional. See "Any other return value" at the end of<br/>this table. | +| `ModelAndView` object | The view and model attributes to use and, optionally, a response status. | +| `void` |A method with a `void` return type (or `null` return value) is considered to have fully<br/>handled the response if it also has a `ServletResponse`, an `OutputStream` argument, or<br/>an `@ResponseStatus` annotation. The same is also true if the controller has made a positive`ETag` or `lastModified` timestamp check (see [Controllers](#mvc-caching-etag-lastmodified) for details).<br/><br/> If none of the above is true, a `void` return type can also indicate “no response body” for<br/>REST controllers or a default view name selection for HTML controllers.| +| `DeferredResult<V>` | Produce any of the preceding return values asynchronously from any thread — for example, as a<br/>result of some event or callback. See [Asynchronous Requests](#mvc-ann-async) and [`DeferredResult`](#mvc-ann-async-deferredresult). | +| `Callable<V>` | Produce any of the above return values asynchronously in a Spring MVC-managed thread.<br/>See [Asynchronous Requests](#mvc-ann-async) and [`Callable`](#mvc-ann-async-callable). | +|`ListenableFuture<V>`,`java.util.concurrent.CompletionStage<V>`,`java.util.concurrent.CompletableFuture<V>`| Alternative to `DeferredResult`, as a convenience (for example, when an underlying service<br/>returns one of those). | +| `ResponseBodyEmitter`, `SseEmitter` | Emit a stream of objects asynchronously to be written to the response with`HttpMessageConverter` implementations. Also supported as the body of a `ResponseEntity`.<br/>See [Asynchronous Requests](#mvc-ann-async) and [HTTP Streaming](#mvc-ann-async-http-streaming). | +| `StreamingResponseBody` | Write to the response `OutputStream` asynchronously. Also supported as the body of a`ResponseEntity`. See [Asynchronous Requests](#mvc-ann-async) and [HTTP Streaming](#mvc-ann-async-http-streaming). | +| Reactive types — Reactor, RxJava, or others through `ReactiveAdapterRegistry` | Alternative to `DeferredResult` with multi-value streams (for example, `Flux`, `Observable`)<br/>collected to a `List`.<br/><br/> For streaming scenarios (for example, `text/event-stream`, `application/json+stream`),`SseEmitter` and `ResponseBodyEmitter` are used instead, where `ServletOutputStream`blocking I/O is performed on a Spring MVC-managed thread and back pressure is applied<br/>against the completion of each write.<br/><br/> See [Asynchronous Requests](#mvc-ann-async) and [Reactive Types](#mvc-ann-async-reactive-types). | +| Any other return value | Any return value that does not match any of the earlier values in this table and that<br/>is a `String` or `void` is treated as a view name (default view name selection through`RequestToViewNameTranslator` applies), provided it is not a simple type, as determined by[BeanUtils#isSimpleProperty](https://docs.spring.io/spring-framework/docs/5.3.16/javadoc-api/org/springframework/beans/BeanUtils.html#isSimpleProperty-java.lang.Class-).<br/>Values that are simple types remain unresolved. | + +##### Type Conversion + +[WebFlux](web-reactive.html#webflux-ann-typeconversion) + +Some annotated controller method arguments that represent `String`-based request input (such as`@RequestParam`, `@RequestHeader`, `@PathVariable`, `@MatrixVariable`, and `@CookieValue`) +can require type conversion if the argument is declared as something other than `String`. + +For such cases, type conversion is automatically applied based on the configured converters. +By default, simple types (`int`, `long`, `Date`, and others) are supported. You can customize +type conversion through a `WebDataBinder` (see [`DataBinder`](#mvc-ann-initbinder)) or by registering`Formatters` with the `FormattingConversionService`. +See [Spring Field Formatting](core.html#format). + +A practical issue in type conversion is the treatment of an empty String source value. +Such a value is treated as missing if it becomes `null` as a result of type conversion. +This can be the case for `Long`, `UUID`, and other target types. If you want to allow `null`to be injected, either use the `required` flag on the argument annotation, or declare the +argument as `@Nullable`. + +| |As of 5.3, non-null arguments will be enforced even after type conversion. If your handler<br/>method intends to accept a null value as well, either declare your argument as `@Nullable`or mark it as `required=false` in the corresponding `@RequestParam`, etc. annotation. This is<br/>a best practice and the recommended solution for regressions encountered in a 5.3 upgrade.<br/><br/>Alternatively, you may specifically handle e.g. the resulting `MissingPathVariableException`in the case of a required `@PathVariable`. A null value after conversion will be treated like<br/>an empty original value, so the corresponding `Missing…​Exception` variants will be thrown.| +|---|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +##### Matrix Variables + +[WebFlux](web-reactive.html#webflux-ann-matrix-variables) + +[RFC 3986](https://tools.ietf.org/html/rfc3986#section-3.3) discusses name-value pairs in +path segments. In Spring MVC, we refer to those as “matrix variables” based on an[“old post”](https://www.w3.org/DesignIssues/MatrixURIs.html) by Tim Berners-Lee, but they +can be also be referred to as URI path parameters. + +Matrix variables can appear in any path segment, with each variable separated by a semicolon and +multiple values separated by comma (for example, `/cars;color=red,green;year=2012`). Multiple +values can also be specified through repeated variable names (for example,`color=red;color=green;color=blue`). + +If a URL is expected to contain matrix variables, the request mapping for a controller +method must use a URI variable to mask that variable content and ensure the request can +be matched successfully independent of matrix variable order and presence. +The following example uses a matrix variable: + +Java + +``` +// GET /pets/42;q=11;r=22 + +@GetMapping("/pets/{petId}") +public void findPet(@PathVariable String petId, @MatrixVariable int q) { + + // petId == 42 + // q == 11 +} +``` + +Kotlin + +``` +// GET /pets/42;q=11;r=22 + +@GetMapping("/pets/{petId}") +fun findPet(@PathVariable petId: String, @MatrixVariable q: Int) { + + // petId == 42 + // q == 11 +} +``` + +Given that all path segments may contain matrix variables, you may sometimes need to +disambiguate which path variable the matrix variable is expected to be in. +The following example shows how to do so: + +Java + +``` +// GET /owners/42;q=11/pets/21;q=22 + +@GetMapping("/owners/{ownerId}/pets/{petId}") +public void findPet( + @MatrixVariable(name="q", pathVar="ownerId") int q1, + @MatrixVariable(name="q", pathVar="petId") int q2) { + + // q1 == 11 + // q2 == 22 +} +``` + +Kotlin + +``` +// GET /owners/42;q=11/pets/21;q=22 + +@GetMapping("/owners/{ownerId}/pets/{petId}") +fun findPet( + @MatrixVariable(name = "q", pathVar = "ownerId") q1: Int, + @MatrixVariable(name = "q", pathVar = "petId") q2: Int) { + + // q1 == 11 + // q2 == 22 +} +``` + +A matrix variable may be defined as optional and a default value specified, as the +following example shows: + +Java + +``` +// GET /pets/42 + +@GetMapping("/pets/{petId}") +public void findPet(@MatrixVariable(required=false, defaultValue="1") int q) { + + // q == 1 +} +``` + +Kotlin + +``` +// GET /pets/42 + +@GetMapping("/pets/{petId}") +fun findPet(@MatrixVariable(required = false, defaultValue = "1") q: Int) { + + // q == 1 +} +``` + +To get all matrix variables, you can use a `MultiValueMap`, as the following example shows: + +Java + +``` +// GET /owners/42;q=11;r=12/pets/21;q=22;s=23 + +@GetMapping("/owners/{ownerId}/pets/{petId}") +public void findPet( + @MatrixVariable MultiValueMap<String, String> matrixVars, + @MatrixVariable(pathVar="petId") MultiValueMap<String, String> petMatrixVars) { + + // matrixVars: ["q" : [11,22], "r" : 12, "s" : 23] + // petMatrixVars: ["q" : 22, "s" : 23] +} +``` + +Kotlin + +``` +// GET /owners/42;q=11;r=12/pets/21;q=22;s=23 + +@GetMapping("/owners/{ownerId}/pets/{petId}") +fun findPet( + @MatrixVariable matrixVars: MultiValueMap<String, String>, + @MatrixVariable(pathVar="petId") petMatrixVars: MultiValueMap<String, String>) { + + // matrixVars: ["q" : [11,22], "r" : 12, "s" : 23] + // petMatrixVars: ["q" : 22, "s" : 23] +} +``` + +Note that you need to enable the use of matrix variables. In the MVC Java configuration, +you need to set a `UrlPathHelper` with `removeSemicolonContent=false` through[Path Matching](#mvc-config-path-matching). In the MVC XML namespace, you can set`<mvc:annotation-driven enable-matrix-variables="true"/>`. + +##### `@RequestParam` + +[WebFlux](web-reactive.html#webflux-ann-requestparam) + +You can use the `@RequestParam` annotation to bind Servlet request parameters (that is, +query parameters or form data) to a method argument in a controller. + +The following example shows how to do so: + +Java + +``` +@Controller +@RequestMapping("/pets") +public class EditPetForm { + + // ... + + @GetMapping + public String setupForm(@RequestParam("petId") int petId, Model model) { (1) + Pet pet = this.clinic.loadPet(petId); + model.addAttribute("pet", pet); + return "petForm"; + } + + // ... + +} +``` + +|**1**|Using `@RequestParam` to bind `petId`.| +|-----|--------------------------------------| + +Kotlin + +``` +import org.springframework.ui.set + +@Controller +@RequestMapping("/pets") +class EditPetForm { + + // ... + + @GetMapping + fun setupForm(@RequestParam("petId") petId: Int, model: Model): String { (1) + val pet = this.clinic.loadPet(petId); + model["pet"] = pet + return "petForm" + } + + // ... + +} +``` + +|**1**|Using `@RequestParam` to bind `petId`.| +|-----|--------------------------------------| + +By default, method parameters that use this annotation are required, but you can specify that +a method parameter is optional by setting the `@RequestParam` annotation’s `required` flag to`false` or by declaring the argument with an `java.util.Optional` wrapper. + +Type conversion is automatically applied if the target method parameter type is not`String`. See [Type Conversion](#mvc-ann-typeconversion). + +Declaring the argument type as an array or list allows for resolving multiple parameter +values for the same parameter name. + +When an `@RequestParam` annotation is declared as a `Map<String, String>` or`MultiValueMap<String, String>`, without a parameter name specified in the annotation, +then the map is populated with the request parameter values for each given parameter name. + +Note that use of `@RequestParam` is optional (for example, to set its attributes). +By default, any argument that is a simple value type (as determined by[BeanUtils#isSimpleProperty](https://docs.spring.io/spring-framework/docs/5.3.16/javadoc-api/org/springframework/beans/BeanUtils.html#isSimpleProperty-java.lang.Class-)) +and is not resolved by any other argument resolver, is treated as if it were annotated +with `@RequestParam`. + +##### `@RequestHeader` + +[WebFlux](web-reactive.html#webflux-ann-requestheader) + +You can use the `@RequestHeader` annotation to bind a request header to a method argument in a +controller. + +Consider the following request, with headers: + +``` +Host localhost:8080 +Accept text/html,application/xhtml+xml,application/xml;q=0.9 +Accept-Language fr,en-gb;q=0.7,en;q=0.3 +Accept-Encoding gzip,deflate +Accept-Charset ISO-8859-1,utf-8;q=0.7,*;q=0.7 +Keep-Alive 300 +``` + +The following example gets the value of the `Accept-Encoding` and `Keep-Alive` headers: + +Java + +``` +@GetMapping("/demo") +public void handle( + @RequestHeader("Accept-Encoding") String encoding, (1) + @RequestHeader("Keep-Alive") long keepAlive) { (2) + //... +} +``` + +|**1**|Get the value of the `Accept-Encoding` header.| +|-----|----------------------------------------------| +|**2**| Get the value of the `Keep-Alive` header. | + +Kotlin + +``` +@GetMapping("/demo") +fun handle( + @RequestHeader("Accept-Encoding") encoding: String, (1) + @RequestHeader("Keep-Alive") keepAlive: Long) { (2) + //... +} +``` + +|**1**|Get the value of the `Accept-Encoding` header.| +|-----|----------------------------------------------| +|**2**| Get the value of the `Keep-Alive` header. | + +If the target method parameter type is not`String`, type conversion is automatically applied. See [Type Conversion](#mvc-ann-typeconversion). + +When an `@RequestHeader` annotation is used on a `Map<String, String>`,`MultiValueMap<String, String>`, or `HttpHeaders` argument, the map is populated +with all header values. + +| |Built-in support is available for converting a comma-separated string into an<br/>array or collection of strings or other types known to the type conversion system. For<br/>example, a method parameter annotated with `@RequestHeader("Accept")` can be of type`String` but also `String[]` or `List<String>`.| +|---|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +##### `@CookieValue` + +[WebFlux](web-reactive.html#webflux-ann-cookievalue) + +You can use the `@CookieValue` annotation to bind the value of an HTTP cookie to a method argument +in a controller. + +Consider a request with the following cookie: + +``` +JSESSIONID=415A4AC178C59DACE0B2C9CA727CDD84 +``` + +The following example shows how to get the cookie value: + +Java + +``` +@GetMapping("/demo") +public void handle(@CookieValue("JSESSIONID") String cookie) { (1) + //... +} +``` + +|**1**|Get the value of the `JSESSIONID` cookie.| +|-----|-----------------------------------------| + +Kotlin + +``` +@GetMapping("/demo") +fun handle(@CookieValue("JSESSIONID") cookie: String) { (1) + //... +} +``` + +|**1**|Get the value of the `JSESSIONID` cookie.| +|-----|-----------------------------------------| + +If the target method parameter type is not `String`, type conversion is applied automatically. +See [Type Conversion](#mvc-ann-typeconversion). + +##### `@ModelAttribute` + +[WebFlux](web-reactive.html#webflux-ann-modelattrib-method-args) + +You can use the `@ModelAttribute` annotation on a method argument to access an attribute from +the model or have it be instantiated if not present. The model attribute is also overlain with +values from HTTP Servlet request parameters whose names match to field names. This is referred +to as data binding, and it saves you from having to deal with parsing and converting individual +query parameters and form fields. The following example shows how to do so: + +Java + +``` +@PostMapping("/owners/{ownerId}/pets/{petId}/edit") +public String processSubmit(@ModelAttribute Pet pet) { + // method logic... +} +``` + +Kotlin + +``` +@PostMapping("/owners/{ownerId}/pets/{petId}/edit") +fun processSubmit(@ModelAttribute pet: Pet): String { + // method logic... +} +``` + +The `Pet` instance above is sourced in one of the following ways: + +* Retrieved from the model where it may have been added by a[@ModelAttribute method](#mvc-ann-modelattrib-methods). + +* Retrieved from the HTTP session if the model attribute was listed in + the class-level [`@SessionAttributes`](#mvc-ann-sessionattributes) annotation. + +* Obtained through a `Converter` where the model attribute name matches the name of a + request value such as a path variable or a request parameter (see next example). + +* Instantiated using its default constructor. + +* Instantiated through a “primary constructor” with arguments that match to Servlet + request parameters. Argument names are determined through JavaBeans`@ConstructorProperties` or through runtime-retained parameter names in the bytecode. + +One alternative to using a [@ModelAttribute method](#mvc-ann-modelattrib-methods) to +supply it or relying on the framework to create the model attribute, is to have a`Converter<String, T>` to provide the instance. This is applied when the model attribute +name matches to the name of a request value such as a path variable or a request +parameter, and there is a `Converter` from `String` to the model attribute type. +In the following example, the model attribute name is `account` which matches the URI +path variable `account`, and there is a registered `Converter<String, Account>` which +could load the `Account` from a data store: + +Java + +``` +@PutMapping("/accounts/{account}") +public String save(@ModelAttribute("account") Account account) { + // ... +} +``` + +Kotlin + +``` +@PutMapping("/accounts/{account}") +fun save(@ModelAttribute("account") account: Account): String { + // ... +} +``` + +After the model attribute instance is obtained, data binding is applied. The`WebDataBinder` class matches Servlet request parameter names (query parameters and form +fields) to field names on the target `Object`. Matching fields are populated after type +conversion is applied, where necessary. For more on data binding (and validation), see[Validation](core.html#validation). For more on customizing data binding, see[`DataBinder`](#mvc-ann-initbinder). + +Data binding can result in errors. By default, a `BindException` is raised. However, to check +for such errors in the controller method, you can add a `BindingResult` argument immediately next +to the `@ModelAttribute`, as the following example shows: + +Java + +``` +@PostMapping("/owners/{ownerId}/pets/{petId}/edit") +public String processSubmit(@ModelAttribute("pet") Pet pet, BindingResult result) { (1) + if (result.hasErrors()) { + return "petForm"; + } + // ... +} +``` + +|**1**|Adding a `BindingResult` next to the `@ModelAttribute`.| +|-----|-------------------------------------------------------| + +Kotlin + +``` +@PostMapping("/owners/{ownerId}/pets/{petId}/edit") +fun processSubmit(@ModelAttribute("pet") pet: Pet, result: BindingResult): String { (1) + if (result.hasErrors()) { + return "petForm" + } + // ... +} +``` + +|**1**|Adding a `BindingResult` next to the `@ModelAttribute`.| +|-----|-------------------------------------------------------| + +In some cases, you may want access to a model attribute without data binding. For such +cases, you can inject the `Model` into the controller and access it directly or, +alternatively, set `@ModelAttribute(binding=false)`, as the following example shows: + +Java + +``` +@ModelAttribute +public AccountForm setUpForm() { + return new AccountForm(); +} + +@ModelAttribute +public Account findAccount(@PathVariable String accountId) { + return accountRepository.findOne(accountId); +} + +@PostMapping("update") +public String update(@Valid AccountForm form, BindingResult result, + @ModelAttribute(binding=false) Account account) { (1) + // ... +} +``` + +|**1**|Setting `@ModelAttribute(binding=false)`.| +|-----|-----------------------------------------| + +Kotlin + +``` +@ModelAttribute +fun setUpForm(): AccountForm { + return AccountForm() +} + +@ModelAttribute +fun findAccount(@PathVariable accountId: String): Account { + return accountRepository.findOne(accountId) +} + +@PostMapping("update") +fun update(@Valid form: AccountForm, result: BindingResult, + @ModelAttribute(binding = false) account: Account): String { (1) + // ... +} +``` + +|**1**|Setting `@ModelAttribute(binding=false)`.| +|-----|-----------------------------------------| + +You can automatically apply validation after data binding by adding the`javax.validation.Valid` annotation or Spring’s `@Validated` annotation +([Bean Validation](core.html#validation-beanvalidation) and[Spring validation](core.html#validation)). The following example shows how to do so: + +Java + +``` +@PostMapping("/owners/{ownerId}/pets/{petId}/edit") +public String processSubmit(@Valid @ModelAttribute("pet") Pet pet, BindingResult result) { (1) + if (result.hasErrors()) { + return "petForm"; + } + // ... +} +``` + +|**1**|Validate the `Pet` instance.| +|-----|----------------------------| + +Kotlin + +``` +@PostMapping("/owners/{ownerId}/pets/{petId}/edit") +fun processSubmit(@Valid @ModelAttribute("pet") pet: Pet, result: BindingResult): String { (1) + if (result.hasErrors()) { + return "petForm" + } + // ... +} +``` + +Note that using `@ModelAttribute` is optional (for example, to set its attributes). +By default, any argument that is not a simple value type (as determined by[BeanUtils#isSimpleProperty](https://docs.spring.io/spring-framework/docs/5.3.16/javadoc-api/org/springframework/beans/BeanUtils.html#isSimpleProperty-java.lang.Class-)) +and is not resolved by any other argument resolver is treated as if it were annotated +with `@ModelAttribute`. + +##### `@SessionAttributes` + +[WebFlux](web-reactive.html#webflux-ann-sessionattributes) + +`@SessionAttributes` is used to store model attributes in the HTTP Servlet session between +requests. It is a type-level annotation that declares the session attributes used by a +specific controller. This typically lists the names of model attributes or types of +model attributes that should be transparently stored in the session for subsequent +requests to access. + +The following example uses the `@SessionAttributes` annotation: + +Java + +``` +@Controller +@SessionAttributes("pet") (1) +public class EditPetForm { + // ... +} +``` + +|**1**|Using the `@SessionAttributes` annotation.| +|-----|------------------------------------------| + +Kotlin + +``` +@Controller +@SessionAttributes("pet") (1) +class EditPetForm { + // ... +} +``` + +|**1**|Using the `@SessionAttributes` annotation.| +|-----|------------------------------------------| + +On the first request, when a model attribute with the name, `pet`, is added to the model, +it is automatically promoted to and saved in the HTTP Servlet session. It remains there +until another controller method uses a `SessionStatus` method argument to clear the +storage, as the following example shows: + +Java + +``` +@Controller +@SessionAttributes("pet") (1) +public class EditPetForm { + + // ... + + @PostMapping("/pets/{id}") + public String handle(Pet pet, BindingResult errors, SessionStatus status) { + if (errors.hasErrors) { + // ... + } + status.setComplete(); (2) + // ... + } +} +``` + +|**1**| Storing the `Pet` value in the Servlet session. | +|-----|--------------------------------------------------| +|**2**|Clearing the `Pet` value from the Servlet session.| + +Kotlin + +``` +@Controller +@SessionAttributes("pet") (1) +class EditPetForm { + + // ... + + @PostMapping("/pets/{id}") + fun handle(pet: Pet, errors: BindingResult, status: SessionStatus): String { + if (errors.hasErrors()) { + // ... + } + status.setComplete() (2) + // ... + } +} +``` + +|**1**| Storing the `Pet` value in the Servlet session. | +|-----|--------------------------------------------------| +|**2**|Clearing the `Pet` value from the Servlet session.| + +##### `@SessionAttribute` + +[WebFlux](web-reactive.html#webflux-ann-sessionattribute) + +If you need access to pre-existing session attributes that are managed globally +(that is, outside the controller — for example, by a filter) and may or may not be present, +you can use the `@SessionAttribute` annotation on a method parameter, +as the following example shows: + +Java + +``` +@RequestMapping("/") +public String handle(@SessionAttribute User user) { (1) + // ... +} +``` + +|**1**|Using a `@SessionAttribute` annotation.| +|-----|---------------------------------------| + +Kotlin + +``` +@RequestMapping("/") +fun handle(@SessionAttribute user: User): String { (1) + // ... +} +``` + +For use cases that require adding or removing session attributes, consider injecting`org.springframework.web.context.request.WebRequest` or`javax.servlet.http.HttpSession` into the controller method. + +For temporary storage of model attributes in the session as part of a controller +workflow, consider using `@SessionAttributes` as described in[`@SessionAttributes`](#mvc-ann-sessionattributes). + +##### `@RequestAttribute` + +[WebFlux](web-reactive.html#webflux-ann-requestattrib) + +Similar to `@SessionAttribute`, you can use the `@RequestAttribute` annotations to +access pre-existing request attributes created earlier (for example, by a Servlet `Filter`or `HandlerInterceptor`): + +Java + +``` +@GetMapping("/") +public String handle(@RequestAttribute Client client) { (1) + // ... +} +``` + +|**1**|Using the `@RequestAttribute` annotation.| +|-----|-----------------------------------------| + +Kotlin + +``` +@GetMapping("/") +fun handle(@RequestAttribute client: Client): String { (1) + // ... +} +``` + +|**1**|Using the `@RequestAttribute` annotation.| +|-----|-----------------------------------------| + +##### Redirect Attributes + +By default, all model attributes are considered to be exposed as URI template variables in +the redirect URL. Of the remaining attributes, those that are primitive types or +collections or arrays of primitive types are automatically appended as query parameters. + +Appending primitive type attributes as query parameters can be the desired result if a +model instance was prepared specifically for the redirect. However, in annotated +controllers, the model can contain additional attributes added for rendering purposes (for example, +drop-down field values). To avoid the possibility of having such attributes appear in the +URL, a `@RequestMapping` method can declare an argument of type `RedirectAttributes` and +use it to specify the exact attributes to make available to `RedirectView`. If the method +does redirect, the content of `RedirectAttributes` is used. Otherwise, the content of the +model is used. + +The `RequestMappingHandlerAdapter` provides a flag called`ignoreDefaultModelOnRedirect`, which you can use to indicate that the content of the default`Model` should never be used if a controller method redirects. Instead, the controller +method should declare an attribute of type `RedirectAttributes` or, if it does not do so, +no attributes should be passed on to `RedirectView`. Both the MVC namespace and the MVC +Java configuration keep this flag set to `false`, to maintain backwards compatibility. +However, for new applications, we recommend setting it to `true`. + +Note that URI template variables from the present request are automatically made +available when expanding a redirect URL, and you don’t need to explicitly add them +through `Model` or `RedirectAttributes`. The following example shows how to define a redirect: + +Java + +``` +@PostMapping("/files/{path}") +public String upload(...) { + // ... + return "redirect:files/{path}"; +} +``` + +Kotlin + +``` +@PostMapping("/files/{path}") +fun upload(...): String { + // ... + return "redirect:files/{path}" +} +``` + +Another way of passing data to the redirect target is by using flash attributes. Unlike +other redirect attributes, flash attributes are saved in the HTTP session (and, hence, do +not appear in the URL). See [Flash Attributes](#mvc-flash-attributes) for more information. + +##### Flash Attributes + +Flash attributes provide a way for one request to store attributes that are intended for use in +another. This is most commonly needed when redirecting — for example, the +Post-Redirect-Get pattern. Flash attributes are saved temporarily before the +redirect (typically in the session) to be made available to the request after the +redirect and are removed immediately. + +Spring MVC has two main abstractions in support of flash attributes. `FlashMap` is used +to hold flash attributes, while `FlashMapManager` is used to store, retrieve, and manage`FlashMap` instances. + +Flash attribute support is always “on” and does not need to be enabled explicitly. +However, if not used, it never causes HTTP session creation. On each request, there is an +“input” `FlashMap` with attributes passed from a previous request (if any) and an +“output” `FlashMap` with attributes to save for a subsequent request. Both `FlashMap`instances are accessible from anywhere in Spring MVC through static methods in`RequestContextUtils`. + +Annotated controllers typically do not need to work with `FlashMap` directly. Instead, a`@RequestMapping` method can accept an argument of type `RedirectAttributes` and use it +to add flash attributes for a redirect scenario. Flash attributes added through`RedirectAttributes` are automatically propagated to the “output” FlashMap. Similarly, +after the redirect, attributes from the “input” `FlashMap` are automatically added to the`Model` of the controller that serves the target URL. + +Matching requests to flash attributes + +The concept of flash attributes exists in many other web frameworks and has proven to sometimes +be exposed to concurrency issues. This is because, by definition, flash attributes +are to be stored until the next request. However the very “next” request may not be the +intended recipient but another asynchronous request (for example, polling or resource requests), +in which case the flash attributes are removed too early. + +To reduce the possibility of such issues, `RedirectView` automatically “stamps”`FlashMap` instances with the path and query parameters of the target redirect URL. In +turn, the default `FlashMapManager` matches that information to incoming requests when +it looks up the “input” `FlashMap`. + +This does not entirely eliminate the possibility of a concurrency issue but +reduces it greatly with information that is already available in the redirect URL. +Therefore, we recommend that you use flash attributes mainly for redirect scenarios. + +##### Multipart + +[WebFlux](web-reactive.html#webflux-multipart-forms) + +After a `MultipartResolver` has been [enabled](#mvc-multipart), the content of POST +requests with `multipart/form-data` is parsed and accessible as regular request +parameters. The following example accesses one regular form field and one uploaded +file: + +Java + +``` +@Controller +public class FileUploadController { + + @PostMapping("/form") + public String handleFormUpload(@RequestParam("name") String name, + @RequestParam("file") MultipartFile file) { + + if (!file.isEmpty()) { + byte[] bytes = file.getBytes(); + // store the bytes somewhere + return "redirect:uploadSuccess"; + } + return "redirect:uploadFailure"; + } +} +``` + +Kotlin + +``` +@Controller +class FileUploadController { + + @PostMapping("/form") + fun handleFormUpload(@RequestParam("name") name: String, + @RequestParam("file") file: MultipartFile): String { + + if (!file.isEmpty) { + val bytes = file.bytes + // store the bytes somewhere + return "redirect:uploadSuccess" + } + return "redirect:uploadFailure" + } +} +``` + +Declaring the argument type as a `List<MultipartFile>` allows for resolving multiple +files for the same parameter name. + +When the `@RequestParam` annotation is declared as a `Map<String, MultipartFile>` or`MultiValueMap<String, MultipartFile>`, without a parameter name specified in the annotation, +then the map is populated with the multipart files for each given parameter name. + +| |With Servlet 3.0 multipart parsing, you may also declare `javax.servlet.http.Part`instead of Spring’s `MultipartFile`, as a method argument or collection value type.| +|---|---------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +You can also use multipart content as part of data binding to a[command object](#mvc-ann-modelattrib-method-args). For example, the form field +and file from the preceding example could be fields on a form object, +as the following example shows: + +Java + +``` +class MyForm { + + private String name; + + private MultipartFile file; + + // ... +} + +@Controller +public class FileUploadController { + + @PostMapping("/form") + public String handleFormUpload(MyForm form, BindingResult errors) { + if (!form.getFile().isEmpty()) { + byte[] bytes = form.getFile().getBytes(); + // store the bytes somewhere + return "redirect:uploadSuccess"; + } + return "redirect:uploadFailure"; + } +} +``` + +Kotlin + +``` +class MyForm(val name: String, val file: MultipartFile, ...) + +@Controller +class FileUploadController { + + @PostMapping("/form") + fun handleFormUpload(form: MyForm, errors: BindingResult): String { + if (!form.file.isEmpty) { + val bytes = form.file.bytes + // store the bytes somewhere + return "redirect:uploadSuccess" + } + return "redirect:uploadFailure" + } +} +``` + +Multipart requests can also be submitted from non-browser clients in a RESTful service +scenario. The following example shows a file with JSON: + +``` +POST /someUrl +Content-Type: multipart/mixed + +--edt7Tfrdusa7r3lNQc79vXuhIIMlatb7PQg7Vp +Content-Disposition: form-data; name="meta-data" +Content-Type: application/json; charset=UTF-8 +Content-Transfer-Encoding: 8bit + +{ + "name": "value" +} +--edt7Tfrdusa7r3lNQc79vXuhIIMlatb7PQg7Vp +Content-Disposition: form-data; name="file-data"; filename="file.properties" +Content-Type: text/xml +Content-Transfer-Encoding: 8bit +... File Data ... +``` + +You can access the "meta-data" part with `@RequestParam` as a `String` but you’ll +probably want it deserialized from JSON (similar to `@RequestBody`). Use the`@RequestPart` annotation to access a multipart after converting it with an[HttpMessageConverter](integration.html#rest-message-conversion): + +Java + +``` +@PostMapping("/") +public String handle(@RequestPart("meta-data") MetaData metadata, + @RequestPart("file-data") MultipartFile file) { + // ... +} +``` + +Kotlin + +``` +@PostMapping("/") +fun handle(@RequestPart("meta-data") metadata: MetaData, + @RequestPart("file-data") file: MultipartFile): String { + // ... +} +``` + +You can use `@RequestPart` in combination with `javax.validation.Valid` or use Spring’s`@Validated` annotation, both of which cause Standard Bean Validation to be applied. +By default, validation errors cause a `MethodArgumentNotValidException`, which is turned +into a 400 (BAD\_REQUEST) response. Alternatively, you can handle validation errors locally +within the controller through an `Errors` or `BindingResult` argument, +as the following example shows: + +Java + +``` +@PostMapping("/") +public String handle(@Valid @RequestPart("meta-data") MetaData metadata, + BindingResult result) { + // ... +} +``` + +Kotlin + +``` +@PostMapping("/") +fun handle(@Valid @RequestPart("meta-data") metadata: MetaData, + result: BindingResult): String { + // ... +} +``` + +##### `@RequestBody` + +[WebFlux](web-reactive.html#webflux-ann-requestbody) + +You can use the `@RequestBody` annotation to have the request body read and deserialized into an`Object` through an [`HttpMessageConverter`](integration.html#rest-message-conversion). +The following example uses a `@RequestBody` argument: + +Java + +``` +@PostMapping("/accounts") +public void handle(@RequestBody Account account) { + // ... +} +``` + +Kotlin + +``` +@PostMapping("/accounts") +fun handle(@RequestBody account: Account) { + // ... +} +``` + +You can use the [Message Converters](#mvc-config-message-converters) option of the [MVC Config](#mvc-config) to +configure or customize message conversion. + +You can use `@RequestBody` in combination with `javax.validation.Valid` or Spring’s`@Validated` annotation, both of which cause Standard Bean Validation to be applied. +By default, validation errors cause a `MethodArgumentNotValidException`, which is turned +into a 400 (BAD\_REQUEST) response. Alternatively, you can handle validation errors locally +within the controller through an `Errors` or `BindingResult` argument, +as the following example shows: + +Java + +``` +@PostMapping("/accounts") +public void handle(@Valid @RequestBody Account account, BindingResult result) { + // ... +} +``` + +Kotlin + +``` +@PostMapping("/accounts") +fun handle(@Valid @RequestBody account: Account, result: BindingResult) { + // ... +} +``` + +##### HttpEntity + +[WebFlux](web-reactive.html#webflux-ann-httpentity) + +`HttpEntity` is more or less identical to using [`@RequestBody`](#mvc-ann-requestbody) but is based on a +container object that exposes request headers and body. The following listing shows an example: + +Java + +``` +@PostMapping("/accounts") +public void handle(HttpEntity<Account> entity) { + // ... +} +``` + +Kotlin + +``` +@PostMapping("/accounts") +fun handle(entity: HttpEntity<Account>) { + // ... +} +``` + +##### `@ResponseBody` + +[WebFlux](web-reactive.html#webflux-ann-responsebody) + +You can use the `@ResponseBody` annotation on a method to have the return serialized +to the response body through an[HttpMessageConverter](integration.html#rest-message-conversion). +The following listing shows an example: + +Java + +``` +@GetMapping("/accounts/{id}") +@ResponseBody +public Account handle() { + // ... +} +``` + +Kotlin + +``` +@GetMapping("/accounts/{id}") +@ResponseBody +fun handle(): Account { + // ... +} +``` + +`@ResponseBody` is also supported at the class level, in which case it is inherited by +all controller methods. This is the effect of `@RestController`, which is nothing more +than a meta-annotation marked with `@Controller` and `@ResponseBody`. + +You can use `@ResponseBody` with reactive types. +See [Asynchronous Requests](#mvc-ann-async) and [Reactive Types](#mvc-ann-async-reactive-types) for more details. + +You can use the [Message Converters](#mvc-config-message-converters) option of the [MVC Config](#mvc-config) to +configure or customize message conversion. + +You can combine `@ResponseBody` methods with JSON serialization views. +See [Jackson JSON](#mvc-ann-jackson) for details. + +##### ResponseEntity + +[WebFlux](web-reactive.html#webflux-ann-responseentity) + +`ResponseEntity` is like [`@ResponseBody`](#mvc-ann-responsebody) but with status and headers. For example: + +Java + +``` +@GetMapping("/something") +public ResponseEntity<String> handle() { + String body = ... ; + String etag = ... ; + return ResponseEntity.ok().eTag(etag).build(body); +} +``` + +Kotlin + +``` +@GetMapping("/something") +fun handle(): ResponseEntity<String> { + val body = ... + val etag = ... + return ResponseEntity.ok().eTag(etag).build(body) +} +``` + +Spring MVC supports using a single value [reactive type](#mvc-ann-async-reactive-types)to produce the `ResponseEntity` asynchronously, and/or single and multi-value reactive +types for the body. This allows the following types of async responses: + +* `ResponseEntity<Mono<T>>` or `ResponseEntity<Flux<T>>` make the response status and + headers known immediately while the body is provided asynchronously at a later point. + Use `Mono` if the body consists of 0..1 values or `Flux` if it can produce multiple values. + +* `Mono<ResponseEntity<T>>` provides all three — response status, headers, and body, + asynchronously at a later point. This allows the response status and headers to vary + depending on the outcome of asynchronous request handling. + +##### Jackson JSON + +Spring offers support for the Jackson JSON library. + +###### JSON Views + +[WebFlux](web-reactive.html#webflux-ann-jsonview) + +Spring MVC provides built-in support for[Jackson’s Serialization Views](https://www.baeldung.com/jackson-json-view-annotation), +which allow rendering only a subset of all fields in an `Object`. To use it with`@ResponseBody` or `ResponseEntity` controller methods, you can use Jackson’s`@JsonView` annotation to activate a serialization view class, as the following example shows: + +Java + +``` +@RestController +public class UserController { + + @GetMapping("/user") + @JsonView(User.WithoutPasswordView.class) + public User getUser() { + return new User("eric", "7!jd#h23"); + } +} + +public class User { + + public interface WithoutPasswordView {}; + public interface WithPasswordView extends WithoutPasswordView {}; + + private String username; + private String password; + + public User() { + } + + public User(String username, String password) { + this.username = username; + this.password = password; + } + + @JsonView(WithoutPasswordView.class) + public String getUsername() { + return this.username; + } + + @JsonView(WithPasswordView.class) + public String getPassword() { + return this.password; + } +} +``` + +Kotlin + +``` +@RestController +class UserController { + + @GetMapping("/user") + @JsonView(User.WithoutPasswordView::class) + fun getUser() = User("eric", "7!jd#h23") +} + +class User( + @JsonView(WithoutPasswordView::class) val username: String, + @JsonView(WithPasswordView::class) val password: String) { + + interface WithoutPasswordView + interface WithPasswordView : WithoutPasswordView +} +``` + +| |`@JsonView` allows an array of view classes, but you can specify only one per<br/>controller method. If you need to activate multiple views, you can use a composite interface.| +|---|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +If you want to do the above programmatically, instead of declaring an `@JsonView` annotation, +wrap the return value with `MappingJacksonValue` and use it to supply the serialization view: + +Java + +``` +@RestController +public class UserController { + + @GetMapping("/user") + public MappingJacksonValue getUser() { + User user = new User("eric", "7!jd#h23"); + MappingJacksonValue value = new MappingJacksonValue(user); + value.setSerializationView(User.WithoutPasswordView.class); + return value; + } +} +``` + +Kotlin + +``` +@RestController +class UserController { + + @GetMapping("/user") + fun getUser(): MappingJacksonValue { + val value = MappingJacksonValue(User("eric", "7!jd#h23")) + value.serializationView = User.WithoutPasswordView::class.java + return value + } +} +``` + +For controllers that rely on view resolution, you can add the serialization view class +to the model, as the following example shows: + +Java + +``` +@Controller +public class UserController extends AbstractController { + + @GetMapping("/user") + public String getUser(Model model) { + model.addAttribute("user", new User("eric", "7!jd#h23")); + model.addAttribute(JsonView.class.getName(), User.WithoutPasswordView.class); + return "userView"; + } +} +``` + +Kotlin + +``` +import org.springframework.ui.set + +@Controller +class UserController : AbstractController() { + + @GetMapping("/user") + fun getUser(model: Model): String { + model["user"] = User("eric", "7!jd#h23") + model[JsonView::class.qualifiedName] = User.WithoutPasswordView::class.java + return "userView" + } +} +``` + +#### 1.3.4. Model + +[WebFlux](web-reactive.html#webflux-ann-modelattrib-methods) + +You can use the `@ModelAttribute` annotation: + +* On a [method argument](#mvc-ann-modelattrib-method-args) in `@RequestMapping` methods + to create or access an `Object` from the model and to bind it to the request through a`WebDataBinder`. + +* As a method-level annotation in `@Controller` or `@ControllerAdvice` classes that help + to initialize the model prior to any `@RequestMapping` method invocation. + +* On a `@RequestMapping` method to mark its return value is a model attribute. + +This section discusses `@ModelAttribute` methods — the second item in the preceding list. +A controller can have any number of `@ModelAttribute` methods. All such methods are +invoked before `@RequestMapping` methods in the same controller. A `@ModelAttribute`method can also be shared across controllers through `@ControllerAdvice`. See the section on[Controller Advice](#mvc-ann-controller-advice) for more details. + +`@ModelAttribute` methods have flexible method signatures. They support many of the same +arguments as `@RequestMapping` methods, except for `@ModelAttribute` itself or anything +related to the request body. + +The following example shows a `@ModelAttribute` method: + +Java + +``` +@ModelAttribute +public void populateModel(@RequestParam String number, Model model) { + model.addAttribute(accountRepository.findAccount(number)); + // add more ... +} +``` + +Kotlin + +``` +@ModelAttribute +fun populateModel(@RequestParam number: String, model: Model) { + model.addAttribute(accountRepository.findAccount(number)) + // add more ... +} +``` + +The following example adds only one attribute: + +Java + +``` +@ModelAttribute +public Account addAccount(@RequestParam String number) { + return accountRepository.findAccount(number); +} +``` + +Kotlin + +``` +@ModelAttribute +fun addAccount(@RequestParam number: String): Account { + return accountRepository.findAccount(number) +} +``` + +| |When a name is not explicitly specified, a default name is chosen based on the `Object`type, as explained in the javadoc for [`Conventions`](https://docs.spring.io/spring-framework/docs/5.3.16/javadoc-api/org/springframework/core/Conventions.html).<br/>You can always assign an explicit name by using the overloaded `addAttribute` method or<br/>through the `name` attribute on `@ModelAttribute` (for a return value).| +|---|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +You can also use `@ModelAttribute` as a method-level annotation on `@RequestMapping` methods, +in which case the return value of the `@RequestMapping` method is interpreted as a model +attribute. This is typically not required, as it is the default behavior in HTML controllers, +unless the return value is a `String` that would otherwise be interpreted as a view name.`@ModelAttribute` can also customize the model attribute name, as the following example shows: + +Java + +``` +@GetMapping("/accounts/{id}") +@ModelAttribute("myAccount") +public Account handle() { + // ... + return account; +} +``` + +Kotlin + +``` +@GetMapping("/accounts/{id}") +@ModelAttribute("myAccount") +fun handle(): Account { + // ... + return account +} +``` + +#### 1.3.5. `DataBinder` + +[WebFlux](web-reactive.html#webflux-ann-initbinder) + +`@Controller` or `@ControllerAdvice` classes can have `@InitBinder` methods that +initialize instances of `WebDataBinder`, and those, in turn, can: + +* Bind request parameters (that is, form or query data) to a model object. + +* Convert String-based request values (such as request parameters, path variables, + headers, cookies, and others) to the target type of controller method arguments. + +* Format model object values as `String` values when rendering HTML forms. + +`@InitBinder` methods can register controller-specific `java.beans.PropertyEditor` or +Spring `Converter` and `Formatter` components. In addition, you can use the[MVC config](#mvc-config-conversion) to register `Converter` and `Formatter`types in a globally shared `FormattingConversionService`. + +`@InitBinder` methods support many of the same arguments that `@RequestMapping` methods +do, except for `@ModelAttribute` (command object) arguments. Typically, they are declared +with a `WebDataBinder` argument (for registrations) and a `void` return value. +The following listing shows an example: + +Java + +``` +@Controller +public class FormController { + + @InitBinder (1) + public void initBinder(WebDataBinder binder) { + SimpleDateFormat dateFormat = new SimpleDateFormat("yyyy-MM-dd"); + dateFormat.setLenient(false); + binder.registerCustomEditor(Date.class, new CustomDateEditor(dateFormat, false)); + } + + // ... +} +``` + +|**1**|Defining an `@InitBinder` method.| +|-----|---------------------------------| + +Kotlin + +``` +@Controller +class FormController { + + @InitBinder (1) + fun initBinder(binder: WebDataBinder) { + val dateFormat = SimpleDateFormat("yyyy-MM-dd") + dateFormat.isLenient = false + binder.registerCustomEditor(Date::class.java, CustomDateEditor(dateFormat, false)) + } + + // ... +} +``` + +|**1**|Defining an `@InitBinder` method.| +|-----|---------------------------------| + +Alternatively, when you use a `Formatter`-based setup through a shared`FormattingConversionService`, you can re-use the same approach and register +controller-specific `Formatter` implementations, as the following example shows: + +Java + +``` +@Controller +public class FormController { + + @InitBinder (1) + protected void initBinder(WebDataBinder binder) { + binder.addCustomFormatter(new DateFormatter("yyyy-MM-dd")); + } + + // ... +} +``` + +|**1**|Defining an `@InitBinder` method on a custom formatter.| +|-----|-------------------------------------------------------| + +Kotlin + +``` +@Controller +class FormController { + + @InitBinder (1) + protected fun initBinder(binder: WebDataBinder) { + binder.addCustomFormatter(DateFormatter("yyyy-MM-dd")) + } + + // ... +} +``` + +|**1**|Defining an `@InitBinder` method on a custom formatter.| +|-----|-------------------------------------------------------| + +#### 1.3.6. Exceptions + +[WebFlux](web-reactive.html#webflux-ann-controller-exceptions) + +`@Controller` and [@ControllerAdvice](#mvc-ann-controller-advice) classes can have`@ExceptionHandler` methods to handle exceptions from controller methods, as the following example shows: + +Java + +``` +@Controller +public class SimpleController { + + // ... + + @ExceptionHandler + public ResponseEntity<String> handle(IOException ex) { + // ... + } +} +``` + +Kotlin + +``` +@Controller +class SimpleController { + + // ... + + @ExceptionHandler + fun handle(ex: IOException): ResponseEntity<String> { + // ... + } +} +``` + +The exception may match against a top-level exception being propagated (e.g. a direct`IOException` being thrown) or against a nested cause within a wrapper exception (e.g. +an `IOException` wrapped inside an `IllegalStateException`). As of 5.3, this can match +at arbitrary cause levels, whereas previously only an immediate cause was considered. + +For matching exception types, preferably declare the target exception as a method argument, +as the preceding example shows. When multiple exception methods match, a root exception match is +generally preferred to a cause exception match. More specifically, the `ExceptionDepthComparator`is used to sort exceptions based on their depth from the thrown exception type. + +Alternatively, the annotation declaration may narrow the exception types to match, +as the following example shows: + +Java + +``` +@ExceptionHandler({FileSystemException.class, RemoteException.class}) +public ResponseEntity<String> handle(IOException ex) { + // ... +} +``` + +Kotlin + +``` +@ExceptionHandler(FileSystemException::class, RemoteException::class) +fun handle(ex: IOException): ResponseEntity<String> { + // ... +} +``` + +You can even use a list of specific exception types with a very generic argument signature, +as the following example shows: + +Java + +``` +@ExceptionHandler({FileSystemException.class, RemoteException.class}) +public ResponseEntity<String> handle(Exception ex) { + // ... +} +``` + +Kotlin + +``` +@ExceptionHandler(FileSystemException::class, RemoteException::class) +fun handle(ex: Exception): ResponseEntity<String> { + // ... +} +``` + +| |The distinction between root and cause exception matching can be surprising.<br/><br/>In the `IOException` variant shown earlier, the method is typically called with<br/>the actual `FileSystemException` or `RemoteException` instance as the argument,<br/>since both of them extend from `IOException`. However, if any such matching<br/>exception is propagated within a wrapper exception which is itself an `IOException`,<br/>the passed-in exception instance is that wrapper exception.<br/><br/>The behavior is even simpler in the `handle(Exception)` variant. This is<br/>always invoked with the wrapper exception in a wrapping scenario, with the<br/>actually matching exception to be found through `ex.getCause()` in that case.<br/>The passed-in exception is the actual `FileSystemException` or`RemoteException` instance only when these are thrown as top-level exceptions.| +|---|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +We generally recommend that you be as specific as possible in the argument signature, +reducing the potential for mismatches between root and cause exception types. +Consider breaking a multi-matching method into individual `@ExceptionHandler`methods, each matching a single specific exception type through its signature. + +In a multi-`@ControllerAdvice` arrangement, we recommend declaring your primary root exception +mappings on a `@ControllerAdvice` prioritized with a corresponding order. While a root +exception match is preferred to a cause, this is defined among the methods of a given +controller or `@ControllerAdvice` class. This means a cause match on a higher-priority`@ControllerAdvice` bean is preferred to any match (for example, root) on a lower-priority`@ControllerAdvice` bean. + +Last but not least, an `@ExceptionHandler` method implementation can choose to back +out of dealing with a given exception instance by rethrowing it in its original form. +This is useful in scenarios where you are interested only in root-level matches or in +matches within a specific context that cannot be statically determined. A rethrown +exception is propagated through the remaining resolution chain, as though +the given `@ExceptionHandler` method would not have matched in the first place. + +Support for `@ExceptionHandler` methods in Spring MVC is built on the `DispatcherServlet`level, [HandlerExceptionResolver](#mvc-exceptionhandlers) mechanism. + +##### Method Arguments + +`@ExceptionHandler` methods support the following arguments: + +| Method argument | Description | +|----------------------------------------------------------------------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| Exception type | For access to the raised exception. | +| `HandlerMethod` | For access to the controller method that raised the exception. | +| `WebRequest`, `NativeWebRequest` | Generic access to request parameters and request and session attributes without direct<br/>use of the Servlet API. | +| `javax.servlet.ServletRequest`, `javax.servlet.ServletResponse` | Choose any specific request or response type (for example, `ServletRequest` or`HttpServletRequest` or Spring’s `MultipartRequest` or `MultipartHttpServletRequest`). | +| `javax.servlet.http.HttpSession` |Enforces the presence of a session. As a consequence, such an argument is never `null`. <br/>Note that session access is not thread-safe. Consider setting the`RequestMappingHandlerAdapter` instance’s `synchronizeOnSession` flag to `true` if multiple<br/>requests are allowed to access a session concurrently.| +| `java.security.Principal` | Currently authenticated user — possibly a specific `Principal` implementation class if known. | +| `HttpMethod` | The HTTP method of the request. | +| `java.util.Locale` | The current request locale, determined by the most specific `LocaleResolver` available — in<br/>effect, the configured `LocaleResolver` or `LocaleContextResolver`. | +| `java.util.TimeZone`, `java.time.ZoneId` | The time zone associated with the current request, as determined by a `LocaleContextResolver`. | +| `java.io.OutputStream`, `java.io.Writer` | For access to the raw response body, as exposed by the Servlet API. | +|`java.util.Map`, `org.springframework.ui.Model`, `org.springframework.ui.ModelMap`| For access to the model for an error response. Always empty. | +| `RedirectAttributes` | Specify attributes to use in case of a redirect — (that is to be appended to the query<br/>string) and flash attributes to be stored temporarily until the request after the redirect.<br/>See [Redirect Attributes](#mvc-redirecting-passing-data) and [Flash Attributes](#mvc-flash-attributes). | +| `@SessionAttribute` | For access to any session attribute, in contrast to model attributes stored in the<br/>session as a result of a class-level `@SessionAttributes` declaration.<br/>See [`@SessionAttribute`](#mvc-ann-sessionattribute) for more details. | +| `@RequestAttribute` | For access to request attributes. See [`@RequestAttribute`](#mvc-ann-requestattrib) for more details. | + +##### Return Values + +`@ExceptionHandler` methods support the following return values: + +| Return value | Description | +|-----------------------------------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| `@ResponseBody` | The return value is converted through `HttpMessageConverter` instances and written to the<br/>response. See [`@ResponseBody`](#mvc-ann-responsebody). | +| `HttpEntity<B>`, `ResponseEntity<B>` | The return value specifies that the full response (including the HTTP headers and the body)<br/>be converted through `HttpMessageConverter` instances and written to the response.<br/>See [ResponseEntity](#mvc-ann-responseentity). | +| `String` | A view name to be resolved with `ViewResolver` implementations and used together with the<br/>implicit model — determined through command objects and `@ModelAttribute` methods.<br/>The handler method can also programmatically enrich the model by declaring a `Model`argument (described earlier). | +| `View` | A `View` instance to use for rendering together with the implicit model — determined<br/>through command objects and `@ModelAttribute` methods. The handler method may also<br/>programmatically enrich the model by declaring a `Model` argument (descried earlier). | +|`java.util.Map`, `org.springframework.ui.Model`| Attributes to be added to the implicit model with the view name implicitly determined<br/>through a `RequestToViewNameTranslator`. | +| `@ModelAttribute` | An attribute to be added to the model with the view name implicitly determined through<br/>a `RequestToViewNameTranslator`.<br/><br/> Note that `@ModelAttribute` is optional. See “Any other return value” at the end of<br/>this table. | +| `ModelAndView` object | The view and model attributes to use and, optionally, a response status. | +| `void` |A method with a `void` return type (or `null` return value) is considered to have fully<br/>handled the response if it also has a `ServletResponse` an `OutputStream` argument, or<br/>a `@ResponseStatus` annotation. The same is also true if the controller has made a positive`ETag` or `lastModified` timestamp check (see [Controllers](#mvc-caching-etag-lastmodified) for details).<br/><br/> If none of the above is true, a `void` return type can also indicate “no response body” for<br/>REST controllers or default view name selection for HTML controllers.| +| Any other return value | If a return value is not matched to any of the above and is not a simple type (as determined by[BeanUtils#isSimpleProperty](https://docs.spring.io/spring-framework/docs/5.3.16/javadoc-api/org/springframework/beans/BeanUtils.html#isSimpleProperty-java.lang.Class-)),<br/>by default, it is treated as a model attribute to be added to the model. If it is a simple type,<br/>it remains unresolved. | + +##### REST API exceptions + +[WebFlux](web-reactive.html#webflux-ann-rest-exceptions) + +A common requirement for REST services is to include error details in the body of the +response. The Spring Framework does not automatically do this because the representation +of error details in the response body is application-specific. However, a`@RestController` may use `@ExceptionHandler` methods with a `ResponseEntity` return +value to set the status and the body of the response. Such methods can also be declared +in `@ControllerAdvice` classes to apply them globally. + +Applications that implement global exception handling with error details in the response +body should consider extending[`ResponseEntityExceptionHandler`](https://docs.spring.io/spring-framework/docs/5.3.16/javadoc-api/org/springframework/web/servlet/mvc/method/annotation/ResponseEntityExceptionHandler.html), +which provides handling for exceptions that Spring MVC raises and provides hooks to +customize the response body. To make use of this, create a subclass of`ResponseEntityExceptionHandler`, annotate it with `@ControllerAdvice`, override the +necessary methods, and declare it as a Spring bean. + +#### 1.3.7. Controller Advice + +[WebFlux](web-reactive.html#webflux-ann-controller-advice) + +`@ExceptionHandler`, `@InitBinder`, and `@ModelAttribute` methods apply only to the`@Controller` class, or class hierarchy, in which they are declared. If, instead, they +are declared in an `@ControllerAdvice` or `@RestControllerAdvice` class, then they apply +to any controller. Moreover, as of 5.3, `@ExceptionHandler` methods in `@ControllerAdvice`can be used to handle exceptions from any `@Controller` or any other handler. + +`@ControllerAdvice` is meta-annotated with `@Component` and therefore can be registered as +a Spring bean through [component scanning](core.html#beans-java-instantiating-container-scan). `@RestControllerAdvice` is meta-annotated with `@ControllerAdvice`and `@ResponseBody`, and that means `@ExceptionHandler` methods will have their return +value rendered via response body message conversion, rather than via HTML views. + +On startup, `RequestMappingHandlerMapping` and `ExceptionHandlerExceptionResolver` detect +controller advice beans and apply them at runtime. Global `@ExceptionHandler` methods, +from an `@ControllerAdvice`, are applied *after* local ones, from the `@Controller`. +By contrast, global `@ModelAttribute` and `@InitBinder` methods are applied *before* local ones. + +The `@ControllerAdvice` annotation has attributes that let you narrow the set of controllers +and handlers that they apply to. For example: + +Java + +``` +// Target all Controllers annotated with @RestController +@ControllerAdvice(annotations = RestController.class) +public class ExampleAdvice1 {} + +// Target all Controllers within specific packages +@ControllerAdvice("org.example.controllers") +public class ExampleAdvice2 {} + +// Target all Controllers assignable to specific classes +@ControllerAdvice(assignableTypes = {ControllerInterface.class, AbstractController.class}) +public class ExampleAdvice3 {} +``` + +Kotlin + +``` +// Target all Controllers annotated with @RestController +@ControllerAdvice(annotations = [RestController::class]) +class ExampleAdvice1 + +// Target all Controllers within specific packages +@ControllerAdvice("org.example.controllers") +class ExampleAdvice2 + +// Target all Controllers assignable to specific classes +@ControllerAdvice(assignableTypes = [ControllerInterface::class, AbstractController::class]) +class ExampleAdvice3 +``` + +The selectors in the preceding example are evaluated at runtime and may negatively impact +performance if used extensively. See the[`@ControllerAdvice`](https://docs.spring.io/spring-framework/docs/5.3.16/javadoc-api/org/springframework/web/bind/annotation/ControllerAdvice.html)javadoc for more details. + +### 1.4. Functional Endpoints + +[WebFlux](web-reactive.html#webflux-fn) + +Spring Web MVC includes WebMvc.fn, a lightweight functional programming model in which functions +are used to route and handle requests and contracts are designed for immutability. +It is an alternative to the annotation-based programming model but otherwise runs on +the same [DispatcherServlet](#mvc-servlet). + +#### 1.4.1. Overview + +[WebFlux](web-reactive.html#webflux-fn-overview) + +In WebMvc.fn, an HTTP request is handled with a `HandlerFunction`: a function that takes`ServerRequest` and returns a `ServerResponse`. +Both the request and the response object have immutable contracts that offer JDK 8-friendly +access to the HTTP request and response.`HandlerFunction` is the equivalent of the body of a `@RequestMapping` method in the +annotation-based programming model. + +Incoming requests are routed to a handler function with a `RouterFunction`: a function that +takes `ServerRequest` and returns an optional `HandlerFunction` (i.e. `Optional<HandlerFunction>`). +When the router function matches, a handler function is returned; otherwise an empty Optional.`RouterFunction` is the equivalent of a `@RequestMapping` annotation, but with the major +difference that router functions provide not just data, but also behavior. + +`RouterFunctions.route()` provides a router builder that facilitates the creation of routers, +as the following example shows: + +Java + +``` +import static org.springframework.http.MediaType.APPLICATION_JSON; +import static org.springframework.web.servlet.function.RequestPredicates.*; +import static org.springframework.web.servlet.function.RouterFunctions.route; + +PersonRepository repository = ... +PersonHandler handler = new PersonHandler(repository); + +RouterFunction<ServerResponse> route = route() + .GET("/person/{id}", accept(APPLICATION_JSON), handler::getPerson) + .GET("/person", accept(APPLICATION_JSON), handler::listPeople) + .POST("/person", handler::createPerson) + .build(); + +public class PersonHandler { + + // ... + + public ServerResponse listPeople(ServerRequest request) { + // ... + } + + public ServerResponse createPerson(ServerRequest request) { + // ... + } + + public ServerResponse getPerson(ServerRequest request) { + // ... + } +} +``` + +Kotlin + +``` +import org.springframework.web.servlet.function.router + +val repository: PersonRepository = ... +val handler = PersonHandler(repository) + +val route = router { (1) + accept(APPLICATION_JSON).nest { + GET("/person/{id}", handler::getPerson) + GET("/person", handler::listPeople) + } + POST("/person", handler::createPerson) +} + +class PersonHandler(private val repository: PersonRepository) { + + // ... + + fun listPeople(request: ServerRequest): ServerResponse { + // ... + } + + fun createPerson(request: ServerRequest): ServerResponse { + // ... + } + + fun getPerson(request: ServerRequest): ServerResponse { + // ... + } +} +``` + +|**1**|Create router using the router DSL.| +|-----|-----------------------------------| + +If you register the `RouterFunction` as a bean, for instance by exposing it in a`@Configuration` class, it will be auto-detected by the servlet, as explained in [Running a Server](#webmvc-fn-running). + +#### 1.4.2. HandlerFunction + +[WebFlux](web-reactive.html#webflux-fn-handler-functions) + +`ServerRequest` and `ServerResponse` are immutable interfaces that offer JDK 8-friendly +access to the HTTP request and response, including headers, body, method, and status code. + +##### ServerRequest + +`ServerRequest` provides access to the HTTP method, URI, headers, and query parameters, +while access to the body is provided through the `body` methods. + +The following example extracts the request body to a `String`: + +Java + +``` +String string = request.body(String.class); +``` + +Kotlin + +``` +val string = request.body<String>() +``` + +The following example extracts the body to a `List<Person>`, +where `Person` objects are decoded from a serialized form, such as JSON or XML: + +Java + +``` +List<Person> people = request.body(new ParameterizedTypeReference<List<Person>>() {}); +``` + +Kotlin + +``` +val people = request.body<Person>() +``` + +The following example shows how to access parameters: + +Java + +``` +MultiValueMap<String, String> params = request.params(); +``` + +Kotlin + +``` +val map = request.params() +``` + +##### ServerResponse + +`ServerResponse` provides access to the HTTP response and, since it is immutable, you can use +a `build` method to create it. You can use the builder to set the response status, to add response +headers, or to provide a body. The following example creates a 200 (OK) response with JSON +content: + +Java + +``` +Person person = ... +ServerResponse.ok().contentType(MediaType.APPLICATION_JSON).body(person); +``` + +Kotlin + +``` +val person: Person = ... +ServerResponse.ok().contentType(MediaType.APPLICATION_JSON).body(person) +``` + +The following example shows how to build a 201 (CREATED) response with a `Location` header and no body: + +Java + +``` +URI location = ... +ServerResponse.created(location).build(); +``` + +Kotlin + +``` +val location: URI = ... +ServerResponse.created(location).build() +``` + +You can also use an asynchronous result as the body, in the form of a `CompletableFuture`,`Publisher`, or any other type supported by the `ReactiveAdapterRegistry`. For instance: + +Java + +``` +Mono<Person> person = webClient.get().retrieve().bodyToMono(Person.class); +ServerResponse.ok().contentType(MediaType.APPLICATION_JSON).body(person); +``` + +Kotlin + +``` +val person = webClient.get().retrieve().awaitBody<Person>() +ServerResponse.ok().contentType(MediaType.APPLICATION_JSON).body(person) +``` + +If not just the body, but also the status or headers are based on an asynchronous type, +you can use the static `async` method on `ServerResponse`, which +accepts `CompletableFuture<ServerResponse>`, `Publisher<ServerResponse>`, or +any other asynchronous type supported by the `ReactiveAdapterRegistry`. For instance: + +Java + +``` +Mono<ServerResponse> asyncResponse = webClient.get().retrieve().bodyToMono(Person.class) + .map(p -> ServerResponse.ok().header("Name", p.name()).body(p)); +ServerResponse.async(asyncResponse); +``` + +[Server-Sent Events](https://www.w3.org/TR/eventsource/) can be provided via the +static `sse` method on `ServerResponse`. The builder provided by that method +allows you to send Strings, or other objects as JSON. For example: + +Java + +``` +public RouterFunction<ServerResponse> sse() { + return route(GET("/sse"), request -> ServerResponse.sse(sseBuilder -> { + // Save the sseBuilder object somewhere.. + })); +} + +// In some other thread, sending a String +sseBuilder.send("Hello world"); + +// Or an object, which will be transformed into JSON +Person person = ... +sseBuilder.send(person); + +// Customize the event by using the other methods +sseBuilder.id("42") + .event("sse event") + .data(person); + +// and done at some point +sseBuilder.complete(); +``` + +Kotlin + +``` +fun sse(): RouterFunction<ServerResponse> = router { + GET("/sse") { request -> ServerResponse.sse { sseBuilder -> + // Save the sseBuilder object somewhere.. + } +} + +// In some other thread, sending a String +sseBuilder.send("Hello world") + +// Or an object, which will be transformed into JSON +val person = ... +sseBuilder.send(person) + +// Customize the event by using the other methods +sseBuilder.id("42") + .event("sse event") + .data(person) + +// and done at some point +sseBuilder.complete() +``` + +##### Handler Classes + +We can write a handler function as a lambda, as the following example shows: + +Java + +``` +HandlerFunction<ServerResponse> helloWorld = + request -> ServerResponse.ok().body("Hello World"); +``` + +Kotlin + +``` +val helloWorld: (ServerRequest) -> ServerResponse = + { ServerResponse.ok().body("Hello World") } +``` + +That is convenient, but in an application we need multiple functions, and multiple inline +lambda’s can get messy. +Therefore, it is useful to group related handler functions together into a handler class, which +has a similar role as `@Controller` in an annotation-based application. +For example, the following class exposes a reactive `Person` repository: + +Java + +``` +import static org.springframework.http.MediaType.APPLICATION_JSON; +import static org.springframework.web.reactive.function.server.ServerResponse.ok; + +public class PersonHandler { + + private final PersonRepository repository; + + public PersonHandler(PersonRepository repository) { + this.repository = repository; + } + + public ServerResponse listPeople(ServerRequest request) { (1) + List<Person> people = repository.allPeople(); + return ok().contentType(APPLICATION_JSON).body(people); + } + + public ServerResponse createPerson(ServerRequest request) throws Exception { (2) + Person person = request.body(Person.class); + repository.savePerson(person); + return ok().build(); + } + + public ServerResponse getPerson(ServerRequest request) { (3) + int personId = Integer.parseInt(request.pathVariable("id")); + Person person = repository.getPerson(personId); + if (person != null) { + return ok().contentType(APPLICATION_JSON).body(person); + } + else { + return ServerResponse.notFound().build(); + } + } + +} +``` + +|**1**| `listPeople` is a handler function that returns all `Person` objects found in the repository as<br/>JSON. | +|-----|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +|**2**| `createPerson` is a handler function that stores a new `Person` contained in the request body. | +|**3**|`getPerson` is a handler function that returns a single person, identified by the `id` path<br/>variable. We retrieve that `Person` from the repository and create a JSON response, if it is<br/>found. If it is not found, we return a 404 Not Found response.| + +Kotlin + +``` +class PersonHandler(private val repository: PersonRepository) { + + fun listPeople(request: ServerRequest): ServerResponse { (1) + val people: List<Person> = repository.allPeople() + return ok().contentType(APPLICATION_JSON).body(people); + } + + fun createPerson(request: ServerRequest): ServerResponse { (2) + val person = request.body<Person>() + repository.savePerson(person) + return ok().build() + } + + fun getPerson(request: ServerRequest): ServerResponse { (3) + val personId = request.pathVariable("id").toInt() + return repository.getPerson(personId)?.let { ok().contentType(APPLICATION_JSON).body(it) } + ?: ServerResponse.notFound().build() + + } +} +``` + +|**1**| `listPeople` is a handler function that returns all `Person` objects found in the repository as<br/>JSON. | +|-----|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +|**2**| `createPerson` is a handler function that stores a new `Person` contained in the request body. | +|**3**|`getPerson` is a handler function that returns a single person, identified by the `id` path<br/>variable. We retrieve that `Person` from the repository and create a JSON response, if it is<br/>found. If it is not found, we return a 404 Not Found response.| + +##### Validation + +A functional endpoint can use Spring’s [validation facilities](core.html#validation) to +apply validation to the request body. For example, given a custom Spring[Validator](core.html#validation) implementation for a `Person`: + +Java + +``` +public class PersonHandler { + + private final Validator validator = new PersonValidator(); (1) + + // ... + + public ServerResponse createPerson(ServerRequest request) { + Person person = request.body(Person.class); + validate(person); (2) + repository.savePerson(person); + return ok().build(); + } + + private void validate(Person person) { + Errors errors = new BeanPropertyBindingResult(person, "person"); + validator.validate(person, errors); + if (errors.hasErrors()) { + throw new ServerWebInputException(errors.toString()); (3) + } + } +} +``` + +|**1**| Create `Validator` instance. | +|-----|-----------------------------------| +|**2**| Apply validation. | +|**3**|Raise exception for a 400 response.| + +Kotlin + +``` +class PersonHandler(private val repository: PersonRepository) { + + private val validator = PersonValidator() (1) + + // ... + + fun createPerson(request: ServerRequest): ServerResponse { + val person = request.body<Person>() + validate(person) (2) + repository.savePerson(person) + return ok().build() + } + + private fun validate(person: Person) { + val errors: Errors = BeanPropertyBindingResult(person, "person") + validator.validate(person, errors) + if (errors.hasErrors()) { + throw ServerWebInputException(errors.toString()) (3) + } + } +} +``` + +|**1**| Create `Validator` instance. | +|-----|-----------------------------------| +|**2**| Apply validation. | +|**3**|Raise exception for a 400 response.| + +Handlers can also use the standard bean validation API (JSR-303) by creating and injecting +a global `Validator` instance based on `LocalValidatorFactoryBean`. +See [Spring Validation](core.html#validation-beanvalidation). + +#### 1.4.3. `RouterFunction` + +[WebFlux](web-reactive.html#webflux-fn-router-functions) + +Router functions are used to route the requests to the corresponding `HandlerFunction`. +Typically, you do not write router functions yourself, but rather use a method on the`RouterFunctions` utility class to create one.`RouterFunctions.route()` (no parameters) provides you with a fluent builder for creating a router +function, whereas `RouterFunctions.route(RequestPredicate, HandlerFunction)` offers a direct way +to create a router. + +Generally, it is recommended to use the `route()` builder, as it provides +convenient short-cuts for typical mapping scenarios without requiring hard-to-discover +static imports. +For instance, the router function builder offers the method `GET(String, HandlerFunction)` to create a mapping for GET requests; and `POST(String, HandlerFunction)` for POSTs. + +Besides HTTP method-based mapping, the route builder offers a way to introduce additional +predicates when mapping to requests. +For each HTTP method there is an overloaded variant that takes a `RequestPredicate` as a +parameter, through which additional constraints can be expressed. + +##### Predicates + +You can write your own `RequestPredicate`, but the `RequestPredicates` utility class +offers commonly used implementations, based on the request path, HTTP method, content-type, +and so on. +The following example uses a request predicate to create a constraint based on the `Accept`header: + +Java + +``` +RouterFunction<ServerResponse> route = RouterFunctions.route() + .GET("/hello-world", accept(MediaType.TEXT_PLAIN), + request -> ServerResponse.ok().body("Hello World")).build(); +``` + +Kotlin + +``` +import org.springframework.web.servlet.function.router + +val route = router { + GET("/hello-world", accept(TEXT_PLAIN)) { + ServerResponse.ok().body("Hello World") + } +} +``` + +You can compose multiple request predicates together by using: + +* `RequestPredicate.and(RequestPredicate)` — both must match. + +* `RequestPredicate.or(RequestPredicate)` — either can match. + +Many of the predicates from `RequestPredicates` are composed. +For example, `RequestPredicates.GET(String)` is composed from `RequestPredicates.method(HttpMethod)`and `RequestPredicates.path(String)`. +The example shown above also uses two request predicates, as the builder uses`RequestPredicates.GET` internally, and composes that with the `accept` predicate. + +##### Routes + +Router functions are evaluated in order: if the first route does not match, the +second is evaluated, and so on. +Therefore, it makes sense to declare more specific routes before general ones. +This is also important when registering router functions as Spring beans, as will +be described later. +Note that this behavior is different from the annotation-based programming model, where the +"most specific" controller method is picked automatically. + +When using the router function builder, all defined routes are composed into one`RouterFunction` that is returned from `build()`. +There are also other ways to compose multiple router functions together: + +* `add(RouterFunction)` on the `RouterFunctions.route()` builder + +* `RouterFunction.and(RouterFunction)` + +* `RouterFunction.andRoute(RequestPredicate, HandlerFunction)` — shortcut for`RouterFunction.and()` with nested `RouterFunctions.route()`. + +The following example shows the composition of four routes: + +Java + +``` +import static org.springframework.http.MediaType.APPLICATION_JSON; +import static org.springframework.web.servlet.function.RequestPredicates.*; + +PersonRepository repository = ... +PersonHandler handler = new PersonHandler(repository); + +RouterFunction<ServerResponse> otherRoute = ... + +RouterFunction<ServerResponse> route = route() + .GET("/person/{id}", accept(APPLICATION_JSON), handler::getPerson) (1) + .GET("/person", accept(APPLICATION_JSON), handler::listPeople) (2) + .POST("/person", handler::createPerson) (3) + .add(otherRoute) (4) + .build(); +``` + +|**1**|`GET /person/{id}` with an `Accept` header that matches JSON is routed to`PersonHandler.getPerson`| +|-----|--------------------------------------------------------------------------------------------------| +|**2**| `GET /person` with an `Accept` header that matches JSON is routed to`PersonHandler.listPeople` | +|**3**| `POST /person` with no additional predicates is mapped to`PersonHandler.createPerson`, and | +|**4**| `otherRoute` is a router function that is created elsewhere, and added to the route built. | + +Kotlin + +``` +import org.springframework.http.MediaType.APPLICATION_JSON +import org.springframework.web.servlet.function.router + +val repository: PersonRepository = ... +val handler = PersonHandler(repository); + +val otherRoute = router { } + +val route = router { + GET("/person/{id}", accept(APPLICATION_JSON), handler::getPerson) (1) + GET("/person", accept(APPLICATION_JSON), handler::listPeople) (2) + POST("/person", handler::createPerson) (3) +}.and(otherRoute) (4) +``` + +|**1**|`GET /person/{id}` with an `Accept` header that matches JSON is routed to`PersonHandler.getPerson`| +|-----|--------------------------------------------------------------------------------------------------| +|**2**| `GET /person` with an `Accept` header that matches JSON is routed to`PersonHandler.listPeople` | +|**3**| `POST /person` with no additional predicates is mapped to`PersonHandler.createPerson`, and | +|**4**| `otherRoute` is a router function that is created elsewhere, and added to the route built. | + +##### Nested Routes + +It is common for a group of router functions to have a shared predicate, for instance a shared +path. +In the example above, the shared predicate would be a path predicate that matches `/person`, +used by three of the routes. +When using annotations, you would remove this duplication by using a type-level `@RequestMapping`annotation that maps to `/person`. +In WebMvc.fn, path predicates can be shared through the `path` method on the router function builder. +For instance, the last few lines of the example above can be improved in the following way by using nested routes: + +Java + +``` +RouterFunction<ServerResponse> route = route() + .path("/person", builder -> builder (1) + .GET("/{id}", accept(APPLICATION_JSON), handler::getPerson) + .GET(accept(APPLICATION_JSON), handler::listPeople) + .POST("/person", handler::createPerson)) + .build(); +``` + +|**1**|Note that second parameter of `path` is a consumer that takes the router builder.| +|-----|---------------------------------------------------------------------------------| + +Kotlin + +``` +import org.springframework.web.servlet.function.router + +val route = router { + "/person".nest { + GET("/{id}", accept(APPLICATION_JSON), handler::getPerson) + GET(accept(APPLICATION_JSON), handler::listPeople) + POST("/person", handler::createPerson) + } +} +``` + +Though path-based nesting is the most common, you can nest on any kind of predicate by using +the `nest` method on the builder. +The above still contains some duplication in the form of the shared `Accept`-header predicate. +We can further improve by using the `nest` method together with `accept`: + +Java + +``` +RouterFunction<ServerResponse> route = route() + .path("/person", b1 -> b1 + .nest(accept(APPLICATION_JSON), b2 -> b2 + .GET("/{id}", handler::getPerson) + .GET(handler::listPeople)) + .POST("/person", handler::createPerson)) + .build(); +``` + +Kotlin + +``` +import org.springframework.web.servlet.function.router + +val route = router { + "/person".nest { + accept(APPLICATION_JSON).nest { + GET("/{id}", handler::getPerson) + GET("", handler::listPeople) + POST("/person", handler::createPerson) + } + } +} +``` + +#### 1.4.4. Running a Server + +[WebFlux](web-reactive.html#webflux-fn-running) + +You typically run router functions in a [`DispatcherHandler`](#mvc-servlet)-based setup through the[MVC Config](#mvc-config), which uses Spring configuration to declare the +components required to process requests. The MVC Java configuration declares the following +infrastructure components to support functional endpoints: + +* `RouterFunctionMapping`: Detects one or more `RouterFunction<?>` beans in the Spring + configuration, [orders them](core.html#beans-factory-ordered), combines them through`RouterFunction.andOther`, and routes requests to the resulting composed `RouterFunction`. + +* `HandlerFunctionAdapter`: Simple adapter that lets `DispatcherHandler` invoke + a `HandlerFunction` that was mapped to a request. + +The preceding components let functional endpoints fit within the `DispatcherServlet` request +processing lifecycle and also (potentially) run side by side with annotated controllers, if +any are declared. It is also how functional endpoints are enabled by the Spring Boot Web +starter. + +The following example shows a WebFlux Java configuration: + +Java + +``` +@Configuration +@EnableMvc +public class WebConfig implements WebMvcConfigurer { + + @Bean + public RouterFunction<?> routerFunctionA() { + // ... + } + + @Bean + public RouterFunction<?> routerFunctionB() { + // ... + } + + // ... + + @Override + public void configureMessageConverters(List<HttpMessageConverter<?>> converters) { + // configure message conversion... + } + + @Override + public void addCorsMappings(CorsRegistry registry) { + // configure CORS... + } + + @Override + public void configureViewResolvers(ViewResolverRegistry registry) { + // configure view resolution for HTML rendering... + } +} +``` + +Kotlin + +``` +@Configuration +@EnableMvc +class WebConfig : WebMvcConfigurer { + + @Bean + fun routerFunctionA(): RouterFunction<*> { + // ... + } + + @Bean + fun routerFunctionB(): RouterFunction<*> { + // ... + } + + // ... + + override fun configureMessageConverters(converters: List<HttpMessageConverter<*>>) { + // configure message conversion... + } + + override fun addCorsMappings(registry: CorsRegistry) { + // configure CORS... + } + + override fun configureViewResolvers(registry: ViewResolverRegistry) { + // configure view resolution for HTML rendering... + } +} +``` + +#### 1.4.5. Filtering Handler Functions + +[WebFlux](web-reactive.html#webflux-fn-handler-filter-function) + +You can filter handler functions by using the `before`, `after`, or `filter` methods on the routing +function builder. +With annotations, you can achieve similar functionality by using `@ControllerAdvice`, a `ServletFilter`, or both. +The filter will apply to all routes that are built by the builder. +This means that filters defined in nested routes do not apply to "top-level" routes. +For instance, consider the following example: + +Java + +``` +RouterFunction<ServerResponse> route = route() + .path("/person", b1 -> b1 + .nest(accept(APPLICATION_JSON), b2 -> b2 + .GET("/{id}", handler::getPerson) + .GET(handler::listPeople) + .before(request -> ServerRequest.from(request) (1) + .header("X-RequestHeader", "Value") + .build())) + .POST("/person", handler::createPerson)) + .after((request, response) -> logResponse(response)) (2) + .build(); +``` + +|**1**| The `before` filter that adds a custom request header is only applied to the two GET routes. | +|-----|----------------------------------------------------------------------------------------------| +|**2**|The `after` filter that logs the response is applied to all routes, including the nested ones.| + +Kotlin + +``` +import org.springframework.web.servlet.function.router + +val route = router { + "/person".nest { + GET("/{id}", handler::getPerson) + GET(handler::listPeople) + before { (1) + ServerRequest.from(it) + .header("X-RequestHeader", "Value").build() + } + } + POST("/person", handler::createPerson) + after { _, response -> (2) + logResponse(response) + } +} +``` + +|**1**| The `before` filter that adds a custom request header is only applied to the two GET routes. | +|-----|----------------------------------------------------------------------------------------------| +|**2**|The `after` filter that logs the response is applied to all routes, including the nested ones.| + +The `filter` method on the router builder takes a `HandlerFilterFunction`: a +function that takes a `ServerRequest` and `HandlerFunction` and returns a `ServerResponse`. +The handler function parameter represents the next element in the chain. +This is typically the handler that is routed to, but it can also be another +filter if multiple are applied. + +Now we can add a simple security filter to our route, assuming that we have a `SecurityManager` that +can determine whether a particular path is allowed. +The following example shows how to do so: + +Java + +``` +SecurityManager securityManager = ... + +RouterFunction<ServerResponse> route = route() + .path("/person", b1 -> b1 + .nest(accept(APPLICATION_JSON), b2 -> b2 + .GET("/{id}", handler::getPerson) + .GET(handler::listPeople)) + .POST("/person", handler::createPerson)) + .filter((request, next) -> { + if (securityManager.allowAccessTo(request.path())) { + return next.handle(request); + } + else { + return ServerResponse.status(UNAUTHORIZED).build(); + } + }) + .build(); +``` + +Kotlin + +``` +import org.springframework.web.servlet.function.router + +val securityManager: SecurityManager = ... + +val route = router { + ("/person" and accept(APPLICATION_JSON)).nest { + GET("/{id}", handler::getPerson) + GET("", handler::listPeople) + POST("/person", handler::createPerson) + filter { request, next -> + if (securityManager.allowAccessTo(request.path())) { + next(request) + } + else { + status(UNAUTHORIZED).build(); + } + } + } +} +``` + +The preceding example demonstrates that invoking the `next.handle(ServerRequest)` is optional. +We only let the handler function be run when access is allowed. + +Besides using the `filter` method on the router function builder, it is possible to apply a +filter to an existing router function via `RouterFunction.filter(HandlerFilterFunction)`. + +| |CORS support for functional endpoints is provided through a dedicated[`CorsFilter`](webmvc-cors.html#mvc-cors-filter).| +|---|----------------------------------------------------------------------------------------------------------------------| + +### 1.5. URI Links + +[WebFlux](web-reactive.html#webflux-uri-building) + +This section describes various options available in the Spring Framework to work with URI’s. + +#### 1.5.1. UriComponents + +Spring MVC and Spring WebFlux + +`UriComponentsBuilder` helps to build URI’s from URI templates with variables, as the following example shows: + +Java + +``` +UriComponents uriComponents = UriComponentsBuilder + .fromUriString("https://example.com/hotels/{hotel}") (1) + .queryParam("q", "{q}") (2) + .encode() (3) + .build(); (4) + +URI uri = uriComponents.expand("Westin", "123").toUri(); (5) +``` + +|**1**| Static factory method with a URI template. | +|-----|-----------------------------------------------------------| +|**2**| Add or replace URI components. | +|**3**|Request to have the URI template and URI variables encoded.| +|**4**| Build a `UriComponents`. | +|**5**| Expand variables and obtain the `URI`. | + +Kotlin + +``` +val uriComponents = UriComponentsBuilder + .fromUriString("https://example.com/hotels/{hotel}") (1) + .queryParam("q", "{q}") (2) + .encode() (3) + .build() (4) + +val uri = uriComponents.expand("Westin", "123").toUri() (5) +``` + +|**1**| Static factory method with a URI template. | +|-----|-----------------------------------------------------------| +|**2**| Add or replace URI components. | +|**3**|Request to have the URI template and URI variables encoded.| +|**4**| Build a `UriComponents`. | +|**5**| Expand variables and obtain the `URI`. | + +The preceding example can be consolidated into one chain and shortened with `buildAndExpand`, +as the following example shows: + +Java + +``` +URI uri = UriComponentsBuilder + .fromUriString("https://example.com/hotels/{hotel}") + .queryParam("q", "{q}") + .encode() + .buildAndExpand("Westin", "123") + .toUri(); +``` + +Kotlin + +``` +val uri = UriComponentsBuilder + .fromUriString("https://example.com/hotels/{hotel}") + .queryParam("q", "{q}") + .encode() + .buildAndExpand("Westin", "123") + .toUri() +``` + +You can shorten it further by going directly to a URI (which implies encoding), +as the following example shows: + +Java + +``` +URI uri = UriComponentsBuilder + .fromUriString("https://example.com/hotels/{hotel}") + .queryParam("q", "{q}") + .build("Westin", "123"); +``` + +Kotlin + +``` +val uri = UriComponentsBuilder + .fromUriString("https://example.com/hotels/{hotel}") + .queryParam("q", "{q}") + .build("Westin", "123") +``` + +You can shorten it further still with a full URI template, as the following example shows: + +Java + +``` +URI uri = UriComponentsBuilder + .fromUriString("https://example.com/hotels/{hotel}?q={q}") + .build("Westin", "123"); +``` + +Kotlin + +``` +val uri = UriComponentsBuilder + .fromUriString("https://example.com/hotels/{hotel}?q={q}") + .build("Westin", "123") +``` + +#### 1.5.2. UriBuilder + +Spring MVC and Spring WebFlux + +[`UriComponentsBuilder`](#web-uricomponents) implements `UriBuilder`. You can create a`UriBuilder`, in turn, with a `UriBuilderFactory`. Together, `UriBuilderFactory` and`UriBuilder` provide a pluggable mechanism to build URIs from URI templates, based on +shared configuration, such as a base URL, encoding preferences, and other details. + +You can configure `RestTemplate` and `WebClient` with a `UriBuilderFactory`to customize the preparation of URIs. `DefaultUriBuilderFactory` is a default +implementation of `UriBuilderFactory` that uses `UriComponentsBuilder` internally and +exposes shared configuration options. + +The following example shows how to configure a `RestTemplate`: + +Java + +``` +// import org.springframework.web.util.DefaultUriBuilderFactory.EncodingMode; + +String baseUrl = "https://example.org"; +DefaultUriBuilderFactory factory = new DefaultUriBuilderFactory(baseUrl); +factory.setEncodingMode(EncodingMode.TEMPLATE_AND_VALUES); + +RestTemplate restTemplate = new RestTemplate(); +restTemplate.setUriTemplateHandler(factory); +``` + +Kotlin + +``` +// import org.springframework.web.util.DefaultUriBuilderFactory.EncodingMode + +val baseUrl = "https://example.org" +val factory = DefaultUriBuilderFactory(baseUrl) +factory.encodingMode = EncodingMode.TEMPLATE_AND_VALUES + +val restTemplate = RestTemplate() +restTemplate.uriTemplateHandler = factory +``` + +The following example configures a `WebClient`: + +Java + +``` +// import org.springframework.web.util.DefaultUriBuilderFactory.EncodingMode; + +String baseUrl = "https://example.org"; +DefaultUriBuilderFactory factory = new DefaultUriBuilderFactory(baseUrl); +factory.setEncodingMode(EncodingMode.TEMPLATE_AND_VALUES); + +WebClient client = WebClient.builder().uriBuilderFactory(factory).build(); +``` + +Kotlin + +``` +// import org.springframework.web.util.DefaultUriBuilderFactory.EncodingMode + +val baseUrl = "https://example.org" +val factory = DefaultUriBuilderFactory(baseUrl) +factory.encodingMode = EncodingMode.TEMPLATE_AND_VALUES + +val client = WebClient.builder().uriBuilderFactory(factory).build() +``` + +In addition, you can also use `DefaultUriBuilderFactory` directly. It is similar to using`UriComponentsBuilder` but, instead of static factory methods, it is an actual instance +that holds configuration and preferences, as the following example shows: + +Java + +``` +String baseUrl = "https://example.com"; +DefaultUriBuilderFactory uriBuilderFactory = new DefaultUriBuilderFactory(baseUrl); + +URI uri = uriBuilderFactory.uriString("/hotels/{hotel}") + .queryParam("q", "{q}") + .build("Westin", "123"); +``` + +Kotlin + +``` +val baseUrl = "https://example.com" +val uriBuilderFactory = DefaultUriBuilderFactory(baseUrl) + +val uri = uriBuilderFactory.uriString("/hotels/{hotel}") + .queryParam("q", "{q}") + .build("Westin", "123") +``` + +#### 1.5.3. URI Encoding + +Spring MVC and Spring WebFlux + +`UriComponentsBuilder` exposes encoding options at two levels: + +* [UriComponentsBuilder#encode()](https://docs.spring.io/spring-framework/docs/5.3.16/javadoc-api/org/springframework/web/util/UriComponentsBuilder.html#encode--): + Pre-encodes the URI template first and then strictly encodes URI variables when expanded. + +* [UriComponents#encode()](https://docs.spring.io/spring-framework/docs/5.3.16/javadoc-api/org/springframework/web/util/UriComponents.html#encode--): + Encodes URI components *after* URI variables are expanded. + +Both options replace non-ASCII and illegal characters with escaped octets. However, the first option +also replaces characters with reserved meaning that appear in URI variables. + +| |Consider ";", which is legal in a path but has reserved meaning. The first option replaces<br/>";" with "%3B" in URI variables but not in the URI template. By contrast, the second option never<br/>replaces ";", since it is a legal character in a path.| +|---|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +For most cases, the first option is likely to give the expected result, because it treats URI +variables as opaque data to be fully encoded, while the second option is useful if URI +variables do intentionally contain reserved characters. The second option is also useful +when not expanding URI variables at all since that will also encode anything that +incidentally looks like a URI variable. + +The following example uses the first option: + +Java + +``` +URI uri = UriComponentsBuilder.fromPath("/hotel list/{city}") + .queryParam("q", "{q}") + .encode() + .buildAndExpand("New York", "foo+bar") + .toUri(); + +// Result is "/hotel%20list/New%20York?q=foo%2Bbar" +``` + +Kotlin + +``` +val uri = UriComponentsBuilder.fromPath("/hotel list/{city}") + .queryParam("q", "{q}") + .encode() + .buildAndExpand("New York", "foo+bar") + .toUri() + +// Result is "/hotel%20list/New%20York?q=foo%2Bbar" +``` + +You can shorten the preceding example by going directly to the URI (which implies encoding), +as the following example shows: + +Java + +``` +URI uri = UriComponentsBuilder.fromPath("/hotel list/{city}") + .queryParam("q", "{q}") + .build("New York", "foo+bar"); +``` + +Kotlin + +``` +val uri = UriComponentsBuilder.fromPath("/hotel list/{city}") + .queryParam("q", "{q}") + .build("New York", "foo+bar") +``` + +You can shorten it further still with a full URI template, as the following example shows: + +Java + +``` +URI uri = UriComponentsBuilder.fromUriString("/hotel list/{city}?q={q}") + .build("New York", "foo+bar"); +``` + +Kotlin + +``` +val uri = UriComponentsBuilder.fromUriString("/hotel list/{city}?q={q}") + .build("New York", "foo+bar") +``` + +The `WebClient` and the `RestTemplate` expand and encode URI templates internally through +the `UriBuilderFactory` strategy. Both can be configured with a custom strategy, +as the following example shows: + +Java + +``` +String baseUrl = "https://example.com"; +DefaultUriBuilderFactory factory = new DefaultUriBuilderFactory(baseUrl) +factory.setEncodingMode(EncodingMode.TEMPLATE_AND_VALUES); + +// Customize the RestTemplate.. +RestTemplate restTemplate = new RestTemplate(); +restTemplate.setUriTemplateHandler(factory); + +// Customize the WebClient.. +WebClient client = WebClient.builder().uriBuilderFactory(factory).build(); +``` + +Kotlin + +``` +val baseUrl = "https://example.com" +val factory = DefaultUriBuilderFactory(baseUrl).apply { + encodingMode = EncodingMode.TEMPLATE_AND_VALUES +} + +// Customize the RestTemplate.. +val restTemplate = RestTemplate().apply { + uriTemplateHandler = factory +} + +// Customize the WebClient.. +val client = WebClient.builder().uriBuilderFactory(factory).build() +``` + +The `DefaultUriBuilderFactory` implementation uses `UriComponentsBuilder` internally to +expand and encode URI templates. As a factory, it provides a single place to configure +the approach to encoding, based on one of the below encoding modes: + +* `TEMPLATE_AND_VALUES`: Uses `UriComponentsBuilder#encode()`, corresponding to + the first option in the earlier list, to pre-encode the URI template and strictly encode URI variables when + expanded. + +* `VALUES_ONLY`: Does not encode the URI template and, instead, applies strict encoding + to URI variables through `UriUtils#encodeUriVariables` prior to expanding them into the + template. + +* `URI_COMPONENT`: Uses `UriComponents#encode()`, corresponding to the second option in the earlier list, to + encode URI component value *after* URI variables are expanded. + +* `NONE`: No encoding is applied. + +The `RestTemplate` is set to `EncodingMode.URI_COMPONENT` for historic +reasons and for backwards compatibility. The `WebClient` relies on the default value +in `DefaultUriBuilderFactory`, which was changed from `EncodingMode.URI_COMPONENT` in +5.0.x to `EncodingMode.TEMPLATE_AND_VALUES` in 5.1. + +#### 1.5.4. Relative Servlet Requests + +You can use `ServletUriComponentsBuilder` to create URIs relative to the current request, +as the following example shows: + +Java + +``` +HttpServletRequest request = ... + +// Re-uses scheme, host, port, path, and query string... + +URI uri = ServletUriComponentsBuilder.fromRequest(request) + .replaceQueryParam("accountId", "{id}") + .build("123"); +``` + +Kotlin + +``` +val request: HttpServletRequest = ... + +// Re-uses scheme, host, port, path, and query string... + +val uri = ServletUriComponentsBuilder.fromRequest(request) + .replaceQueryParam("accountId", "{id}") + .build("123") +``` + +You can create URIs relative to the context path, as the following example shows: + +Java + +``` +HttpServletRequest request = ... + +// Re-uses scheme, host, port, and context path... + +URI uri = ServletUriComponentsBuilder.fromContextPath(request) + .path("/accounts") + .build() + .toUri(); +``` + +Kotlin + +``` +val request: HttpServletRequest = ... + +// Re-uses scheme, host, port, and context path... + +val uri = ServletUriComponentsBuilder.fromContextPath(request) + .path("/accounts") + .build() + .toUri() +``` + +You can create URIs relative to a Servlet (for example, `/main/*`), +as the following example shows: + +Java + +``` +HttpServletRequest request = ... + +// Re-uses scheme, host, port, context path, and Servlet mapping prefix... + +URI uri = ServletUriComponentsBuilder.fromServletMapping(request) + .path("/accounts") + .build() + .toUri(); +``` + +Kotlin + +``` +val request: HttpServletRequest = ... + +// Re-uses scheme, host, port, context path, and Servlet mapping prefix... + +val uri = ServletUriComponentsBuilder.fromServletMapping(request) + .path("/accounts") + .build() + .toUri() +``` + +| |As of 5.1, `ServletUriComponentsBuilder` ignores information from the `Forwarded` and`X-Forwarded-*` headers, which specify the client-originated address. Consider using the[`ForwardedHeaderFilter`](#filters-forwarded-headers) to extract and use or to discard<br/>such headers.| +|---|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +#### 1.5.5. Links to Controllers + +Spring MVC provides a mechanism to prepare links to controller methods. For example, +the following MVC controller allows for link creation: + +Java + +``` +@Controller +@RequestMapping("/hotels/{hotel}") +public class BookingController { + + @GetMapping("/bookings/{booking}") + public ModelAndView getBooking(@PathVariable Long booking) { + // ... + } +} +``` + +Kotlin + +``` +@Controller +@RequestMapping("/hotels/{hotel}") +class BookingController { + + @GetMapping("/bookings/{booking}") + fun getBooking(@PathVariable booking: Long): ModelAndView { + // ... + } +} +``` + +You can prepare a link by referring to the method by name, as the following example shows: + +Java + +``` +UriComponents uriComponents = MvcUriComponentsBuilder + .fromMethodName(BookingController.class, "getBooking", 21).buildAndExpand(42); + +URI uri = uriComponents.encode().toUri(); +``` + +Kotlin + +``` +val uriComponents = MvcUriComponentsBuilder + .fromMethodName(BookingController::class.java, "getBooking", 21).buildAndExpand(42) + +val uri = uriComponents.encode().toUri() +``` + +In the preceding example, we provide actual method argument values (in this case, the long value: `21`) +to be used as a path variable and inserted into the URL. Furthermore, we provide the +value, `42`, to fill in any remaining URI variables, such as the `hotel` variable inherited +from the type-level request mapping. If the method had more arguments, we could supply null for +arguments not needed for the URL. In general, only `@PathVariable` and `@RequestParam` arguments +are relevant for constructing the URL. + +There are additional ways to use `MvcUriComponentsBuilder`. For example, you can use a technique +akin to mock testing through proxies to avoid referring to the controller method by name, as the following example shows +(the example assumes static import of `MvcUriComponentsBuilder.on`): + +Java + +``` +UriComponents uriComponents = MvcUriComponentsBuilder + .fromMethodCall(on(BookingController.class).getBooking(21)).buildAndExpand(42); + +URI uri = uriComponents.encode().toUri(); +``` + +Kotlin + +``` +val uriComponents = MvcUriComponentsBuilder + .fromMethodCall(on(BookingController::class.java).getBooking(21)).buildAndExpand(42) + +val uri = uriComponents.encode().toUri() +``` + +| |Controller method signatures are limited in their design when they are supposed to be usable for<br/>link creation with `fromMethodCall`. Aside from needing a proper parameter signature,<br/>there is a technical limitation on the return type (namely, generating a runtime proxy<br/>for link builder invocations), so the return type must not be `final`. In particular,<br/>the common `String` return type for view names does not work here. You should use `ModelAndView`or even plain `Object` (with a `String` return value) instead.| +|---|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +The earlier examples use static methods in `MvcUriComponentsBuilder`. Internally, they rely +on `ServletUriComponentsBuilder` to prepare a base URL from the scheme, host, port, +context path, and servlet path of the current request. This works well in most cases. +However, sometimes, it can be insufficient. For example, you may be outside the context of +a request (such as a batch process that prepares links) or perhaps you need to insert a path +prefix (such as a locale prefix that was removed from the request path and needs to be +re-inserted into links). + +For such cases, you can use the static `fromXxx` overloaded methods that accept a`UriComponentsBuilder` to use a base URL. Alternatively, you can create an instance of `MvcUriComponentsBuilder`with a base URL and then use the instance-based `withXxx` methods. For example, the +following listing uses `withMethodCall`: + +Java + +``` +UriComponentsBuilder base = ServletUriComponentsBuilder.fromCurrentContextPath().path("/en"); +MvcUriComponentsBuilder builder = MvcUriComponentsBuilder.relativeTo(base); +builder.withMethodCall(on(BookingController.class).getBooking(21)).buildAndExpand(42); + +URI uri = uriComponents.encode().toUri(); +``` + +Kotlin + +``` +val base = ServletUriComponentsBuilder.fromCurrentContextPath().path("/en") +val builder = MvcUriComponentsBuilder.relativeTo(base) +builder.withMethodCall(on(BookingController::class.java).getBooking(21)).buildAndExpand(42) + +val uri = uriComponents.encode().toUri() +``` + +| |As of 5.1, `MvcUriComponentsBuilder` ignores information from the `Forwarded` and`X-Forwarded-*` headers, which specify the client-originated address. Consider using the[ForwardedHeaderFilter](#filters-forwarded-headers) to extract and use or to discard<br/>such headers.| +|---|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +#### 1.5.6. Links in Views + +In views such as Thymeleaf, FreeMarker, or JSP, you can build links to annotated controllers +by referring to the implicitly or explicitly assigned name for each request mapping. + +Consider the following example: + +Java + +``` +@RequestMapping("/people/{id}/addresses") +public class PersonAddressController { + + @RequestMapping("/{country}") + public HttpEntity<PersonAddress> getAddress(@PathVariable String country) { ... } +} +``` + +Kotlin + +``` +@RequestMapping("/people/{id}/addresses") +class PersonAddressController { + + @RequestMapping("/{country}") + fun getAddress(@PathVariable country: String): HttpEntity<PersonAddress> { ... } +} +``` + +Given the preceding controller, you can prepare a link from a JSP, as follows: + +``` +<%@ taglib uri="http://www.springframework.org/tags" prefix="s" %> +... +<a href="${s:mvcUrl('PAC#getAddress').arg(0,'US').buildAndExpand('123')}">Get Address</a> +``` + +The preceding example relies on the `mvcUrl` function declared in the Spring tag library +(that is, META-INF/spring.tld), but it is easy to define your own function or prepare a +similar one for other templating technologies. + +Here is how this works. On startup, every `@RequestMapping` is assigned a default name +through `HandlerMethodMappingNamingStrategy`, whose default implementation uses the +capital letters of the class and the method name (for example, the `getThing` method in`ThingController` becomes "TC#getThing"). If there is a name clash, you can use`@RequestMapping(name="..")` to assign an explicit name or implement your own`HandlerMethodMappingNamingStrategy`. + +### 1.6. Asynchronous Requests + +[Compared to WebFlux](#mvc-ann-async-vs-webflux) + +Spring MVC has an extensive integration with Servlet 3.0 asynchronous request[processing](#mvc-ann-async-processing): + +* [`DeferredResult`](#mvc-ann-async-deferredresult) and [`Callable`](#mvc-ann-async-callable)return values in controller methods provide basic support for a single asynchronous + return value. + +* Controllers can [stream](#mvc-ann-async-http-streaming) multiple values, including[SSE](#mvc-ann-async-sse) and [raw data](#mvc-ann-async-output-stream). + +* Controllers can use reactive clients and return[reactive types](#mvc-ann-async-reactive-types) for response handling. + +#### 1.6.1. `DeferredResult` + +[Compared to WebFlux](#mvc-ann-async-vs-webflux) + +Once the asynchronous request processing feature is [enabled](#mvc-ann-async-configuration)in the Servlet container, controller methods can wrap any supported controller method +return value with `DeferredResult`, as the following example shows: + +Java + +``` +@GetMapping("/quotes") +@ResponseBody +public DeferredResult<String> quotes() { + DeferredResult<String> deferredResult = new DeferredResult<String>(); + // Save the deferredResult somewhere.. + return deferredResult; +} + +// From some other thread... +deferredResult.setResult(result); +``` + +Kotlin + +``` +@GetMapping("/quotes") +@ResponseBody +fun quotes(): DeferredResult<String> { + val deferredResult = DeferredResult<String>() + // Save the deferredResult somewhere.. + return deferredResult +} + +// From some other thread... +deferredResult.setResult(result) +``` + +The controller can produce the return value asynchronously, from a different thread — for +example, in response to an external event (JMS message), a scheduled task, or other event. + +#### 1.6.2. `Callable` + +[Compared to WebFlux](#mvc-ann-async-vs-webflux) + +A controller can wrap any supported return value with `java.util.concurrent.Callable`, +as the following example shows: + +Java + +``` +@PostMapping +public Callable<String> processUpload(final MultipartFile file) { + + return new Callable<String>() { + public String call() throws Exception { + // ... + return "someView"; + } + }; +} +``` + +Kotlin + +``` +@PostMapping +fun processUpload(file: MultipartFile) = Callable<String> { + // ... + "someView" +} +``` + +The return value can then be obtained by running the given task through the[configured](#mvc-ann-async-configuration-spring-mvc) `TaskExecutor`. + +#### 1.6.3. Processing + +[Compared to WebFlux](#mvc-ann-async-vs-webflux) + +Here is a very concise overview of Servlet asynchronous request processing: + +* A `ServletRequest` can be put in asynchronous mode by calling `request.startAsync()`. + The main effect of doing so is that the Servlet (as well as any filters) can exit, but + the response remains open to let processing complete later. + +* The call to `request.startAsync()` returns `AsyncContext`, which you can use for + further control over asynchronous processing. For example, it provides the `dispatch` method, + which is similar to a forward from the Servlet API, except that it lets an + application resume request processing on a Servlet container thread. + +* The `ServletRequest` provides access to the current `DispatcherType`, which you can + use to distinguish between processing the initial request, an asynchronous + dispatch, a forward, and other dispatcher types. + +`DeferredResult` processing works as follows: + +* The controller returns a `DeferredResult` and saves it in some in-memory + queue or list where it can be accessed. + +* Spring MVC calls `request.startAsync()`. + +* Meanwhile, the `DispatcherServlet` and all configured filters exit the request + processing thread, but the response remains open. + +* The application sets the `DeferredResult` from some thread, and Spring MVC + dispatches the request back to the Servlet container. + +* The `DispatcherServlet` is invoked again, and processing resumes with the + asynchronously produced return value. + +`Callable` processing works as follows: + +* The controller returns a `Callable`. + +* Spring MVC calls `request.startAsync()` and submits the `Callable` to + a `TaskExecutor` for processing in a separate thread. + +* Meanwhile, the `DispatcherServlet` and all filters exit the Servlet container thread, + but the response remains open. + +* Eventually the `Callable` produces a result, and Spring MVC dispatches the request back + to the Servlet container to complete processing. + +* The `DispatcherServlet` is invoked again, and processing resumes with the + asynchronously produced return value from the `Callable`. + +For further background and context, you can also read[the +blog posts](https://spring.io/blog/2012/05/07/spring-mvc-3-2-preview-introducing-servlet-3-async-support) that introduced asynchronous request processing support in Spring MVC 3.2. + +##### Exception Handling + +When you use a `DeferredResult`, you can choose whether to call `setResult` or`setErrorResult` with an exception. In both cases, Spring MVC dispatches the request back +to the Servlet container to complete processing. It is then treated either as if the +controller method returned the given value or as if it produced the given exception. +The exception then goes through the regular exception handling mechanism (for example, invoking`@ExceptionHandler` methods). + +When you use `Callable`, similar processing logic occurs, the main difference being that +the result is returned from the `Callable` or an exception is raised by it. + +##### Interception + +`HandlerInterceptor` instances can be of type `AsyncHandlerInterceptor`, to receive the`afterConcurrentHandlingStarted` callback on the initial request that starts asynchronous +processing (instead of `postHandle` and `afterCompletion`). + +`HandlerInterceptor` implementations can also register a `CallableProcessingInterceptor`or a `DeferredResultProcessingInterceptor`, to integrate more deeply with the +lifecycle of an asynchronous request (for example, to handle a timeout event). See[`AsyncHandlerInterceptor`](https://docs.spring.io/spring-framework/docs/5.3.16/javadoc-api/org/springframework/web/servlet/AsyncHandlerInterceptor.html)for more details. + +`DeferredResult` provides `onTimeout(Runnable)` and `onCompletion(Runnable)` callbacks. +See the [javadoc of `DeferredResult`](https://docs.spring.io/spring-framework/docs/5.3.16/javadoc-api/org/springframework/web/context/request/async/DeferredResult.html)for more details. `Callable` can be substituted for `WebAsyncTask` that exposes additional +methods for timeout and completion callbacks. + +##### Compared to WebFlux + +The Servlet API was originally built for making a single pass through the Filter-Servlet +chain. Asynchronous request processing, added in Servlet 3.0, lets applications exit +the Filter-Servlet chain but leave the response open for further processing. The Spring MVC +asynchronous support is built around that mechanism. When a controller returns a `DeferredResult`, +the Filter-Servlet chain is exited, and the Servlet container thread is released. Later, when +the `DeferredResult` is set, an `ASYNC` dispatch (to the same URL) is made, during which the +controller is mapped again but, rather than invoking it, the `DeferredResult` value is used +(as if the controller returned it) to resume processing. + +By contrast, Spring WebFlux is neither built on the Servlet API, nor does it need such an +asynchronous request processing feature, because it is asynchronous by design. Asynchronous +handling is built into all framework contracts and is intrinsically supported through all +stages of request processing. + +From a programming model perspective, both Spring MVC and Spring WebFlux support +asynchronous and [Reactive Types](#mvc-ann-async-reactive-types) as return values in controller methods. +Spring MVC even supports streaming, including reactive back pressure. However, individual +writes to the response remain blocking (and are performed on a separate thread), unlike WebFlux, +which relies on non-blocking I/O and does not need an extra thread for each write. + +Another fundamental difference is that Spring MVC does not support asynchronous or reactive +types in controller method arguments (for example, `@RequestBody`, `@RequestPart`, and others), +nor does it have any explicit support for asynchronous and reactive types as model attributes. +Spring WebFlux does support all that. + +#### 1.6.4. HTTP Streaming + +[WebFlux](web-reactive.html#webflux-codecs-streaming) + +You can use `DeferredResult` and `Callable` for a single asynchronous return value. +What if you want to produce multiple asynchronous values and have those written to the +response? This section describes how to do so. + +##### Objects + +You can use the `ResponseBodyEmitter` return value to produce a stream of objects, where +each object is serialized with an[`HttpMessageConverter`](integration.html#rest-message-conversion) and written to the +response, as the following example shows: + +Java + +``` +@GetMapping("/events") +public ResponseBodyEmitter handle() { + ResponseBodyEmitter emitter = new ResponseBodyEmitter(); + // Save the emitter somewhere.. + return emitter; +} + +// In some other thread +emitter.send("Hello once"); + +// and again later on +emitter.send("Hello again"); + +// and done at some point +emitter.complete(); +``` + +Kotlin + +``` +@GetMapping("/events") +fun handle() = ResponseBodyEmitter().apply { + // Save the emitter somewhere.. +} + +// In some other thread +emitter.send("Hello once") + +// and again later on +emitter.send("Hello again") + +// and done at some point +emitter.complete() +``` + +You can also use `ResponseBodyEmitter` as the body in a `ResponseEntity`, letting you +customize the status and headers of the response. + +When an `emitter` throws an `IOException` (for example, if the remote client went away), applications +are not responsible for cleaning up the connection and should not invoke `emitter.complete`or `emitter.completeWithError`. Instead, the servlet container automatically initiates an`AsyncListener` error notification, in which Spring MVC makes a `completeWithError` call. +This call, in turn, performs one final `ASYNC` dispatch to the application, during which Spring MVC +invokes the configured exception resolvers and completes the request. + +##### SSE + +`SseEmitter` (a subclass of `ResponseBodyEmitter`) provides support for[Server-Sent Events](https://www.w3.org/TR/eventsource/), where events sent from the server +are formatted according to the W3C SSE specification. To produce an SSE +stream from a controller, return `SseEmitter`, as the following example shows: + +Java + +``` +@GetMapping(path="/events", produces=MediaType.TEXT_EVENT_STREAM_VALUE) +public SseEmitter handle() { + SseEmitter emitter = new SseEmitter(); + // Save the emitter somewhere.. + return emitter; +} + +// In some other thread +emitter.send("Hello once"); + +// and again later on +emitter.send("Hello again"); + +// and done at some point +emitter.complete(); +``` + +Kotlin + +``` +@GetMapping("/events", produces = [MediaType.TEXT_EVENT_STREAM_VALUE]) +fun handle() = SseEmitter().apply { + // Save the emitter somewhere.. +} + +// In some other thread +emitter.send("Hello once") + +// and again later on +emitter.send("Hello again") + +// and done at some point +emitter.complete() +``` + +While SSE is the main option for streaming into browsers, note that Internet Explorer +does not support Server-Sent Events. Consider using Spring’s[WebSocket messaging](#websocket) with[SockJS fallback](#websocket-fallback) transports (including SSE) that target +a wide range of browsers. + +See also [previous section](#mvc-ann-async-objects) for notes on exception handling. + +##### Raw Data + +Sometimes, it is useful to bypass message conversion and stream directly to the response`OutputStream` (for example, for a file download). You can use the `StreamingResponseBody`return value type to do so, as the following example shows: + +Java + +``` +@GetMapping("/download") +public StreamingResponseBody handle() { + return new StreamingResponseBody() { + @Override + public void writeTo(OutputStream outputStream) throws IOException { + // write... + } + }; +} +``` + +Kotlin + +``` +@GetMapping("/download") +fun handle() = StreamingResponseBody { + // write... +} +``` + +You can use `StreamingResponseBody` as the body in a `ResponseEntity` to +customize the status and headers of the response. + +#### 1.6.5. Reactive Types + +[WebFlux](web-reactive.html#webflux-codecs-streaming) + +Spring MVC supports use of reactive client libraries in a controller (also read[Reactive Libraries](web-reactive.html#webflux-reactive-libraries) in the WebFlux section). +This includes the `WebClient` from `spring-webflux` and others, such as Spring Data +reactive data repositories. In such scenarios, it is convenient to be able to return +reactive types from the controller method. + +Reactive return values are handled as follows: + +* A single-value promise is adapted to, similar to using `DeferredResult`. Examples + include `Mono` (Reactor) or `Single` (RxJava). + +* A multi-value stream with a streaming media type (such as `application/x-ndjson`or `text/event-stream`) is adapted to, similar to using `ResponseBodyEmitter` or`SseEmitter`. Examples include `Flux` (Reactor) or `Observable` (RxJava). + Applications can also return `Flux<ServerSentEvent>` or `Observable<ServerSentEvent>`. + +* A multi-value stream with any other media type (such as `application/json`) is adapted + to, similar to using `DeferredResult<List<?>>`. + +| |Spring MVC supports Reactor and RxJava through the[`ReactiveAdapterRegistry`](https://docs.spring.io/spring-framework/docs/5.3.16/javadoc-api/org/springframework/core/ReactiveAdapterRegistry.html) from`spring-core`, which lets it adapt from multiple reactive libraries.| +|---|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +For streaming to the response, reactive back pressure is supported, but writes to the +response are still blocking and are run on a separate thread through the[configured](#mvc-ann-async-configuration-spring-mvc) `TaskExecutor`, to avoid +blocking the upstream source (such as a `Flux` returned from `WebClient`). +By default, `SimpleAsyncTaskExecutor` is used for the blocking writes, but that is not +suitable under load. If you plan to stream with a reactive type, you should use the[MVC configuration](#mvc-ann-async-configuration-spring-mvc) to configure a task executor. + +#### 1.6.6. Disconnects + +[WebFlux](web-reactive.html#webflux-codecs-streaming) + +The Servlet API does not provide any notification when a remote client goes away. +Therefore, while streaming to the response, whether through [SseEmitter](#mvc-ann-async-sse)or [reactive types](#mvc-ann-async-reactive-types), it is important to send data periodically, +since the write fails if the client has disconnected. The send could take the form of an +empty (comment-only) SSE event or any other data that the other side would have to interpret +as a heartbeat and ignore. + +Alternatively, consider using web messaging solutions (such as[STOMP over WebSocket](#websocket-stomp) or WebSocket with [SockJS](#websocket-fallback)) +that have a built-in heartbeat mechanism. + +#### 1.6.7. Configuration + +[Compared to WebFlux](#mvc-ann-async-vs-webflux) + +The asynchronous request processing feature must be enabled at the Servlet container level. +The MVC configuration also exposes several options for asynchronous requests. + +##### Servlet Container + +Filter and Servlet declarations have an `asyncSupported` flag that needs to be set to `true`to enable asynchronous request processing. In addition, Filter mappings should be +declared to handle the `ASYNC` `javax.servlet.DispatchType`. + +In Java configuration, when you use `AbstractAnnotationConfigDispatcherServletInitializer`to initialize the Servlet container, this is done automatically. + +In `web.xml` configuration, you can add `<async-supported>true</async-supported>` to the`DispatcherServlet` and to `Filter` declarations and add`<dispatcher>ASYNC</dispatcher>` to filter mappings. + +##### Spring MVC + +The MVC configuration exposes the following options related to asynchronous request processing: + +* Java configuration: Use the `configureAsyncSupport` callback on `WebMvcConfigurer`. + +* XML namespace: Use the `<async-support>` element under `<mvc:annotation-driven>`. + +You can configure the following: + +* Default timeout value for async requests, which if not set, depends + on the underlying Servlet container. + +* `AsyncTaskExecutor` to use for blocking writes when streaming with[Reactive Types](#mvc-ann-async-reactive-types) and for executing `Callable` instances returned from + controller methods. We highly recommended configuring this property if you + stream with reactive types or have controller methods that return `Callable`, since + by default, it is a `SimpleAsyncTaskExecutor`. + +* `DeferredResultProcessingInterceptor` implementations and `CallableProcessingInterceptor` implementations. + +Note that you can also set the default timeout value on a `DeferredResult`, +a `ResponseBodyEmitter`, and an `SseEmitter`. For a `Callable`, you can use`WebAsyncTask` to provide a timeout value. + +### 1.7. CORS + +[WebFlux](web-reactive.html#webflux-cors) + +Spring MVC lets you handle CORS (Cross-Origin Resource Sharing). This section +describes how to do so. + +#### 1.7.1. Introduction + +[WebFlux](web-reactive.html#webflux-cors-intro) + +For security reasons, browsers prohibit AJAX calls to resources outside the current origin. +For example, you could have your bank account in one tab and evil.com in another. Scripts +from evil.com should not be able to make AJAX requests to your bank API with your +credentials — for example withdrawing money from your account! + +Cross-Origin Resource Sharing (CORS) is a [W3C specification](https://www.w3.org/TR/cors/)implemented by [most browsers](https://caniuse.com/#feat=cors) that lets you specify +what kind of cross-domain requests are authorized, rather than using less secure and less +powerful workarounds based on IFRAME or JSONP. + +#### 1.7.2. Processing + +[WebFlux](web-reactive.html#webflux-cors-processing) + +The CORS specification distinguishes between preflight, simple, and actual requests. +To learn how CORS works, you can read[this article](https://developer.mozilla.org/en-US/docs/Web/HTTP/CORS), among +many others, or see the specification for more details. + +Spring MVC `HandlerMapping` implementations provide built-in support for CORS. After successfully +mapping a request to a handler, `HandlerMapping` implementations check the CORS configuration for the +given request and handler and take further actions. Preflight requests are handled +directly, while simple and actual CORS requests are intercepted, validated, and have +required CORS response headers set. + +In order to enable cross-origin requests (that is, the `Origin` header is present and +differs from the host of the request), you need to have some explicitly declared CORS +configuration. If no matching CORS configuration is found, preflight requests are +rejected. No CORS headers are added to the responses of simple and actual CORS requests +and, consequently, browsers reject them. + +Each `HandlerMapping` can be[configured](https://docs.spring.io/spring-framework/docs/5.3.16/javadoc-api/org/springframework/web/servlet/handler/AbstractHandlerMapping.html#setCorsConfigurations-java.util.Map-)individually with URL pattern-based `CorsConfiguration` mappings. In most cases, applications +use the MVC Java configuration or the XML namespace to declare such mappings, which results +in a single global map being passed to all `HandlerMapping` instances. + +You can combine global CORS configuration at the `HandlerMapping` level with more +fine-grained, handler-level CORS configuration. For example, annotated controllers can use +class- or method-level `@CrossOrigin` annotations (other handlers can implement`CorsConfigurationSource`). + +The rules for combining global and local configuration are generally additive — for example, +all global and all local origins. For those attributes where only a single value can be +accepted, e.g. `allowCredentials` and `maxAge`, the local overrides the global value. See[`CorsConfiguration#combine(CorsConfiguration)`](https://docs.spring.io/spring-framework/docs/5.3.16/javadoc-api/org/springframework/web/cors/CorsConfiguration.html#combine-org.springframework.web.cors.CorsConfiguration-)for more details. + +| |To learn more from the source or make advanced customizations, check the code behind:<br/><br/>* `CorsConfiguration`<br/><br/>* `CorsProcessor`, `DefaultCorsProcessor`<br/><br/>* `AbstractHandlerMapping`| +|---|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +#### 1.7.3. `@CrossOrigin` + +[WebFlux](web-reactive.html#webflux-cors-controller) + +The [`@CrossOrigin`](https://docs.spring.io/spring-framework/docs/5.3.16/javadoc-api/org/springframework/web/bind/annotation/CrossOrigin.html)annotation enables cross-origin requests on annotated controller methods, +as the following example shows: + +Java + +``` +@RestController +@RequestMapping("/account") +public class AccountController { + + @CrossOrigin + @GetMapping("/{id}") + public Account retrieve(@PathVariable Long id) { + // ... + } + + @DeleteMapping("/{id}") + public void remove(@PathVariable Long id) { + // ... + } +} +``` + +Kotlin + +``` +@RestController +@RequestMapping("/account") +class AccountController { + + @CrossOrigin + @GetMapping("/{id}") + fun retrieve(@PathVariable id: Long): Account { + // ... + } + + @DeleteMapping("/{id}") + fun remove(@PathVariable id: Long) { + // ... + } +} +``` + +By default, `@CrossOrigin` allows: + +* All origins. + +* All headers. + +* All HTTP methods to which the controller method is mapped. + +`allowCredentials` is not enabled by default, since that establishes a trust level +that exposes sensitive user-specific information (such as cookies and CSRF tokens) and +should only be used where appropriate. When it is enabled either `allowOrigins` must be +set to one or more specific domain (but not the special value `"*"`) or alternatively +the `allowOriginPatterns` property may be used to match to a dynamic set of origins. + +`maxAge` is set to 30 minutes. + +`@CrossOrigin` is supported at the class level, too, and is inherited by all methods, +as the following example shows: + +Java + +``` +@CrossOrigin(origins = "https://domain2.com", maxAge = 3600) +@RestController +@RequestMapping("/account") +public class AccountController { + + @GetMapping("/{id}") + public Account retrieve(@PathVariable Long id) { + // ... + } + + @DeleteMapping("/{id}") + public void remove(@PathVariable Long id) { + // ... + } +} +``` + +Kotlin + +``` +@CrossOrigin(origins = ["https://domain2.com"], maxAge = 3600) +@RestController +@RequestMapping("/account") +class AccountController { + + @GetMapping("/{id}") + fun retrieve(@PathVariable id: Long): Account { + // ... + } + + @DeleteMapping("/{id}") + fun remove(@PathVariable id: Long) { + // ... + } +``` + +You can use `@CrossOrigin` at both the class level and the method level, +as the following example shows: + +Java + +``` +@CrossOrigin(maxAge = 3600) +@RestController +@RequestMapping("/account") +public class AccountController { + + @CrossOrigin("https://domain2.com") + @GetMapping("/{id}") + public Account retrieve(@PathVariable Long id) { + // ... + } + + @DeleteMapping("/{id}") + public void remove(@PathVariable Long id) { + // ... + } +} +``` + +Kotlin + +``` +@CrossOrigin(maxAge = 3600) +@RestController +@RequestMapping("/account") +class AccountController { + + @CrossOrigin("https://domain2.com") + @GetMapping("/{id}") + fun retrieve(@PathVariable id: Long): Account { + // ... + } + + @DeleteMapping("/{id}") + fun remove(@PathVariable id: Long) { + // ... + } +} +``` + +#### 1.7.4. Global Configuration + +[WebFlux](web-reactive.html#webflux-cors-global) + +In addition to fine-grained, controller method level configuration, you probably want to +define some global CORS configuration, too. You can set URL-based `CorsConfiguration`mappings individually on any `HandlerMapping`. Most applications, however, use the +MVC Java configuration or the MVC XML namespace to do that. + +By default, global configuration enables the following: + +* All origins. + +* All headers. + +* `GET`, `HEAD`, and `POST` methods. + +`allowCredentials` is not enabled by default, since that establishes a trust level +that exposes sensitive user-specific information (such as cookies and CSRF tokens) and +should only be used where appropriate. When it is enabled either `allowOrigins` must be +set to one or more specific domain (but not the special value `"*"`) or alternatively +the `allowOriginPatterns` property may be used to match to a dynamic set of origins. + +`maxAge` is set to 30 minutes. + +##### Java Configuration + +[WebFlux](web-reactive.html#webflux-cors-global) + +To enable CORS in the MVC Java config, you can use the `CorsRegistry` callback, +as the following example shows: + +Java + +``` +@Configuration +@EnableWebMvc +public class WebConfig implements WebMvcConfigurer { + + @Override + public void addCorsMappings(CorsRegistry registry) { + + registry.addMapping("/api/**") + .allowedOrigins("https://domain2.com") + .allowedMethods("PUT", "DELETE") + .allowedHeaders("header1", "header2", "header3") + .exposedHeaders("header1", "header2") + .allowCredentials(true).maxAge(3600); + + // Add more mappings... + } +} +``` + +Kotlin + +``` +@Configuration +@EnableWebMvc +class WebConfig : WebMvcConfigurer { + + override fun addCorsMappings(registry: CorsRegistry) { + + registry.addMapping("/api/**") + .allowedOrigins("https://domain2.com") + .allowedMethods("PUT", "DELETE") + .allowedHeaders("header1", "header2", "header3") + .exposedHeaders("header1", "header2") + .allowCredentials(true).maxAge(3600) + + // Add more mappings... + } +} +``` + +##### XML Configuration + +To enable CORS in the XML namespace, you can use the `<mvc:cors>` element, +as the following example shows: + +``` +<mvc:cors> + + <mvc:mapping path="/api/**" + allowed-origins="https://domain1.com, https://domain2.com" + allowed-methods="GET, PUT" + allowed-headers="header1, header2, header3" + exposed-headers="header1, header2" allow-credentials="true" + max-age="123" /> + + <mvc:mapping path="/resources/**" + allowed-origins="https://domain1.com" /> + +</mvc:cors> +``` + +#### 1.7.5. CORS Filter + +[WebFlux](webflux-cors.html#webflux-cors-webfilter) + +You can apply CORS support through the built-in[`CorsFilter`](https://docs.spring.io/spring-framework/docs/5.3.16/javadoc-api/org/springframework/web/filter/CorsFilter.html). + +| |If you try to use the `CorsFilter` with Spring Security, keep in mind that<br/>Spring Security has[built-in support](https://docs.spring.io/spring-security/site/docs/current/reference/htmlsingle/#cors)for CORS.| +|---|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +To configure the filter, pass a`CorsConfigurationSource` to its constructor, as the following example shows: + +Java + +``` +CorsConfiguration config = new CorsConfiguration(); + +// Possibly... +// config.applyPermitDefaultValues() + +config.setAllowCredentials(true); +config.addAllowedOrigin("https://domain1.com"); +config.addAllowedHeader("*"); +config.addAllowedMethod("*"); + +UrlBasedCorsConfigurationSource source = new UrlBasedCorsConfigurationSource(); +source.registerCorsConfiguration("/**", config); + +CorsFilter filter = new CorsFilter(source); +``` + +Kotlin + +``` +val config = CorsConfiguration() + +// Possibly... +// config.applyPermitDefaultValues() + +config.allowCredentials = true +config.addAllowedOrigin("https://domain1.com") +config.addAllowedHeader("*") +config.addAllowedMethod("*") + +val source = UrlBasedCorsConfigurationSource() +source.registerCorsConfiguration("/**", config) + +val filter = CorsFilter(source) +``` + +### 1.8. Web Security + +[WebFlux](web-reactive.html#webflux-web-security) + +The [Spring Security](https://projects.spring.io/spring-security/) project provides support +for protecting web applications from malicious exploits. See the Spring Security +reference documentation, including: + +* [Spring MVC Security](https://docs.spring.io/spring-security/site/docs/current/reference/html5/#mvc) + +* [Spring MVC Test Support](https://docs.spring.io/spring-security/site/docs/current/reference/html5/#test-mockmvc) + +* [CSRF protection](https://docs.spring.io/spring-security/site/docs/current/reference/html5/#csrf) + +* [Security Response Headers](https://docs.spring.io/spring-security/site/docs/current/reference/html5/#headers) + +[HDIV](https://hdiv.org/) is another web security framework that integrates with Spring MVC. + +### 1.9. HTTP Caching + +[WebFlux](web-reactive.html#webflux-caching) + +HTTP caching can significantly improve the performance of a web application. HTTP caching +revolves around the `Cache-Control` response header and, subsequently, conditional request +headers (such as `Last-Modified` and `ETag`). `Cache-Control` advises private (for example, browser) +and public (for example, proxy) caches on how to cache and re-use responses. An `ETag` header is used +to make a conditional request that may result in a 304 (NOT\_MODIFIED) without a body, +if the content has not changed. `ETag` can be seen as a more sophisticated successor to +the `Last-Modified` header. + +This section describes the HTTP caching-related options that are available in Spring Web MVC. + +#### 1.9.1. `CacheControl` + +[WebFlux](web-reactive.html#webflux-caching-cachecontrol) + +[`CacheControl`](https://docs.spring.io/spring-framework/docs/5.3.16/javadoc-api/org/springframework/http/CacheControl.html) provides support for +configuring settings related to the `Cache-Control` header and is accepted as an argument +in a number of places: + +* [`WebContentInterceptor`](https://docs.spring.io/spring-framework/docs/5.3.16/javadoc-api/org/springframework/web/servlet/mvc/WebContentInterceptor.html) + +* [`WebContentGenerator`](https://docs.spring.io/spring-framework/docs/5.3.16/javadoc-api/org/springframework/web/servlet/support/WebContentGenerator.html) + +* [Controllers](#mvc-caching-etag-lastmodified) + +* [Static Resources](#mvc-caching-static-resources) + +While [RFC 7234](https://tools.ietf.org/html/rfc7234#section-5.2.2) describes all possible +directives for the `Cache-Control` response header, the `CacheControl` type takes a +use case-oriented approach that focuses on the common scenarios: + +Java + +``` +// Cache for an hour - "Cache-Control: max-age=3600" +CacheControl ccCacheOneHour = CacheControl.maxAge(1, TimeUnit.HOURS); + +// Prevent caching - "Cache-Control: no-store" +CacheControl ccNoStore = CacheControl.noStore(); + +// Cache for ten days in public and private caches, +// public caches should not transform the response +// "Cache-Control: max-age=864000, public, no-transform" +CacheControl ccCustom = CacheControl.maxAge(10, TimeUnit.DAYS).noTransform().cachePublic(); +``` + +Kotlin + +``` +// Cache for an hour - "Cache-Control: max-age=3600" +val ccCacheOneHour = CacheControl.maxAge(1, TimeUnit.HOURS) + +// Prevent caching - "Cache-Control: no-store" +val ccNoStore = CacheControl.noStore() + +// Cache for ten days in public and private caches, +// public caches should not transform the response +// "Cache-Control: max-age=864000, public, no-transform" +val ccCustom = CacheControl.maxAge(10, TimeUnit.DAYS).noTransform().cachePublic() +``` + +`WebContentGenerator` also accepts a simpler `cachePeriod` property (defined in seconds) that +works as follows: + +* A `-1` value does not generate a `Cache-Control` response header. + +* A `0` value prevents caching by using the `'Cache-Control: no-store'` directive. + +* An `n > 0` value caches the given response for `n` seconds by using the`'Cache-Control: max-age=n'` directive. + +#### 1.9.2. Controllers + +[WebFlux](web-reactive.html#webflux-caching-etag-lastmodified) + +Controllers can add explicit support for HTTP caching. We recommended doing so, since the`lastModified` or `ETag` value for a resource needs to be calculated before it can be compared +against conditional request headers. A controller can add an `ETag` header and `Cache-Control`settings to a `ResponseEntity`, as the following example shows: + +Java + +``` +@GetMapping("/book/{id}") +public ResponseEntity<Book> showBook(@PathVariable Long id) { + + Book book = findBook(id); + String version = book.getVersion(); + + return ResponseEntity + .ok() + .cacheControl(CacheControl.maxAge(30, TimeUnit.DAYS)) + .eTag(version) // lastModified is also available + .body(book); +} +``` + +Kotlin + +``` +@GetMapping("/book/{id}") +fun showBook(@PathVariable id: Long): ResponseEntity<Book> { + + val book = findBook(id); + val version = book.getVersion() + + return ResponseEntity + .ok() + .cacheControl(CacheControl.maxAge(30, TimeUnit.DAYS)) + .eTag(version) // lastModified is also available + .body(book) +} +``` + +The preceding example sends a 304 (NOT\_MODIFIED) response with an empty body if the comparison +to the conditional request headers indicates that the content has not changed. Otherwise, the`ETag` and `Cache-Control` headers are added to the response. + +You can also make the check against conditional request headers in the controller, +as the following example shows: + +Java + +``` +@RequestMapping +public String myHandleMethod(WebRequest request, Model model) { + + long eTag = ... (1) + + if (request.checkNotModified(eTag)) { + return null; (2) + } + + model.addAttribute(...); (3) + return "myViewName"; +} +``` + +|**1**| Application-specific calculation. | +|-----|-------------------------------------------------------------------------| +|**2**|The response has been set to 304 (NOT\_MODIFIED) — no further processing.| +|**3**| Continue with the request processing. | + +Kotlin + +``` +@RequestMapping +fun myHandleMethod(request: WebRequest, model: Model): String? { + + val eTag: Long = ... (1) + + if (request.checkNotModified(eTag)) { + return null (2) + } + + model[...] = ... (3) + return "myViewName" +} +``` + +|**1**| Application-specific calculation. | +|-----|-------------------------------------------------------------------------| +|**2**|The response has been set to 304 (NOT\_MODIFIED) — no further processing.| +|**3**| Continue with the request processing. | + +There are three variants for checking conditional requests against `eTag` values, `lastModified`values, or both. For conditional `GET` and `HEAD` requests, you can set the response to +304 (NOT\_MODIFIED). For conditional `POST`, `PUT`, and `DELETE`, you can instead set the response +to 412 (PRECONDITION\_FAILED), to prevent concurrent modification. + +#### 1.9.3. Static Resources + +[WebFlux](web-reactive.html#webflux-caching-static-resources) + +You should serve static resources with a `Cache-Control` and conditional response headers +for optimal performance. See the section on configuring [Static Resources](#mvc-config-static-resources). + +#### 1.9.4. `ETag` Filter + +You can use the `ShallowEtagHeaderFilter` to add “shallow” `eTag` values that are computed from the +response content and, thus, save bandwidth but not CPU time. See [Shallow ETag](#filters-shallow-etag). + +### 1.10. View Technologies + +[WebFlux](web-reactive.html#webflux-view) + +The use of view technologies in Spring MVC is pluggable. Whether you decide to use +Thymeleaf, Groovy Markup Templates, JSPs, or other technologies is primarily a matter of +a configuration change. This chapter covers view technologies integrated with Spring MVC. +We assume you are already familiar with [View Resolution](#mvc-viewresolver). + +| |The views of a Spring MVC application live within the internal trust boundaries<br/>of that application. Views have access to all the beans of your application context. As<br/>such, it is not recommended to use Spring MVC’s template support in applications where<br/>the templates are editable by external sources, since this can have security implications.| +|---|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +#### 1.10.1. Thymeleaf + +[WebFlux](web-reactive.html#webflux-view-thymeleaf) + +Thymeleaf is a modern server-side Java template engine that emphasizes natural HTML +templates that can be previewed in a browser by double-clicking, which is very helpful +for independent work on UI templates (for example, by a designer) without the need for +a running server. If you want to replace JSPs, Thymeleaf offers one of the most +extensive sets of features to make such a transition easier. Thymeleaf is actively +developed and maintained. For a more complete introduction, see the[Thymeleaf](https://www.thymeleaf.org/) project home page. + +The Thymeleaf integration with Spring MVC is managed by the Thymeleaf project. +The configuration involves a few bean declarations, such as`ServletContextTemplateResolver`, `SpringTemplateEngine`, and `ThymeleafViewResolver`. +See [Thymeleaf+Spring](https://www.thymeleaf.org/documentation.html) for more details. + +#### 1.10.2. FreeMarker + +[WebFlux](web-reactive.html#webflux-view-freemarker) + +[Apache FreeMarker](https://freemarker.apache.org/) is a template engine for generating any +kind of text output from HTML to email and others. The Spring Framework has built-in +integration for using Spring MVC with FreeMarker templates. + +##### View Configuration + +[WebFlux](web-reactive.html#webflux-view-freemarker-contextconfig) + +The following example shows how to configure FreeMarker as a view technology: + +Java + +``` +@Configuration +@EnableWebMvc +public class WebConfig implements WebMvcConfigurer { + + @Override + public void configureViewResolvers(ViewResolverRegistry registry) { + registry.freeMarker(); + } + + // Configure FreeMarker... + + @Bean + public FreeMarkerConfigurer freeMarkerConfigurer() { + FreeMarkerConfigurer configurer = new FreeMarkerConfigurer(); + configurer.setTemplateLoaderPath("/WEB-INF/freemarker"); + return configurer; + } +} +``` + +Kotlin + +``` +@Configuration +@EnableWebMvc +class WebConfig : WebMvcConfigurer { + + override fun configureViewResolvers(registry: ViewResolverRegistry) { + registry.freeMarker() + } + + // Configure FreeMarker... + + @Bean + fun freeMarkerConfigurer() = FreeMarkerConfigurer().apply { + setTemplateLoaderPath("/WEB-INF/freemarker") + } +} +``` + +The following example shows how to configure the same in XML: + +``` +<mvc:annotation-driven/> + +<mvc:view-resolvers> + <mvc:freemarker/> +</mvc:view-resolvers> + +<!-- Configure FreeMarker... --> +<mvc:freemarker-configurer> + <mvc:template-loader-path location="/WEB-INF/freemarker"/> +</mvc:freemarker-configurer> +``` + +Alternatively, you can also declare the `FreeMarkerConfigurer` bean for full control over all +properties, as the following example shows: + +``` +<bean id="freemarkerConfig" class="org.springframework.web.servlet.view.freemarker.FreeMarkerConfigurer"> + <property name="templateLoaderPath" value="/WEB-INF/freemarker/"/> +</bean> +``` + +Your templates need to be stored in the directory specified by the `FreeMarkerConfigurer`shown in the preceding example. Given the preceding configuration, if your controller +returns a view name of `welcome`, the resolver looks for the`/WEB-INF/freemarker/welcome.ftl` template. + +##### FreeMarker Configuration + +[WebFlux](web-reactive.html#webflux-views-freemarker) + +You can pass FreeMarker 'Settings' and 'SharedVariables' directly to the FreeMarker`Configuration` object (which is managed by Spring) by setting the appropriate bean +properties on the `FreeMarkerConfigurer` bean. The `freemarkerSettings` property requires +a `java.util.Properties` object, and the `freemarkerVariables` property requires a`java.util.Map`. The following example shows how to use a `FreeMarkerConfigurer`: + +``` +<bean id="freemarkerConfig" class="org.springframework.web.servlet.view.freemarker.FreeMarkerConfigurer"> + <property name="templateLoaderPath" value="/WEB-INF/freemarker/"/> + <property name="freemarkerVariables"> + <map> + <entry key="xml_escape" value-ref="fmXmlEscape"/> + </map> + </property> +</bean> + +<bean id="fmXmlEscape" class="freemarker.template.utility.XmlEscape"/> +``` + +See the FreeMarker documentation for details of settings and variables as they apply to +the `Configuration` object. + +##### Form Handling + +Spring provides a tag library for use in JSPs that contains, among others, a`<spring:bind/>` element. This element primarily lets forms display values from +form-backing objects and show the results of failed validations from a `Validator` in the +web or business tier. Spring also has support for the same functionality in FreeMarker, +with additional convenience macros for generating form input elements themselves. + +###### The Bind Macros + +[WebFlux](web-reactive.html#webflux-view-bind-macros) + +A standard set of macros are maintained within the `spring-webmvc.jar` file for +FreeMarker, so they are always available to a suitably configured application. + +Some of the macros defined in the Spring templating libraries are considered internal +(private), but no such scoping exists in the macro definitions, making all macros visible +to calling code and user templates. The following sections concentrate only on the macros +you need to directly call from within your templates. If you wish to view the macro code +directly, the file is called `spring.ftl` and is in the`org.springframework.web.servlet.view.freemarker` package. + +###### Simple Binding + +In your HTML forms based on FreeMarker templates that act as a form view for a Spring MVC +controller, you can use code similar to the next example to bind to field values and +display error messages for each input field in similar fashion to the JSP equivalent. The +following example shows a `personForm` view: + +``` +<!-- FreeMarker macros have to be imported into a namespace. + We strongly recommend sticking to 'spring'. --> +<#import "/spring.ftl" as spring/> +<html> + ... + <form action="" method="POST"> + Name: + <@spring.bind "personForm.name"/> + <input type="text" + name="${spring.status.expression}" + value="${spring.status.value?html}"/><br /> + <#list spring.status.errorMessages as error> <b>${error}</b> <br /> </#list> + <br /> + ... + <input type="submit" value="submit"/> + </form> + ... +</html> +``` + +`<@spring.bind>` requires a 'path' argument, which consists of the name of your command +object (it is 'command', unless you changed it in your controller configuration) followed +by a period and the name of the field on the command object to which you wish to bind. You +can also use nested fields, such as `command.address.street`. The `bind` macro assumes the +default HTML escaping behavior specified by the `ServletContext` parameter`defaultHtmlEscape` in `web.xml`. + +An alternative form of the macro called `<@spring.bindEscaped>` takes a second argument +that explicitly specifies whether HTML escaping should be used in the status error +messages or values. You can set it to `true` or `false` as required. Additional form +handling macros simplify the use of HTML escaping, and you should use these macros +wherever possible. They are explained in the next section. + +###### Input Macros + +Additional convenience macros for FreeMarker simplify both binding and form generation +(including validation error display). It is never necessary to use these macros to +generate form input fields, and you can mix and match them with simple HTML or direct +calls to the Spring bind macros that we highlighted previously. + +The following table of available macros shows the FreeMarker Template (FTL) definitions +and the parameter list that each takes: + +| macro | FTL definition | +|------------------------------------------------------------------------------------------------------------------------------------------|-----------------------------------------------------------------| +| `message` (output a string from a resource bundle based on the code parameter) | \<@spring.message code/\> | +|`messageText` (output a string from a resource bundle based on the code parameter,<br/>falling back to the value of the default parameter)| \<@spring.messageText code, text/\> | +| `url` (prefix a relative URL with the application’s context root) | \<@spring.url relativeUrl/\> | +| `formInput` (standard input field for gathering user input) | \<@spring.formInput path, attributes, fieldType/\> | +| `formHiddenInput` (hidden input field for submitting non-user input) | \<@spring.formHiddenInput path, attributes/\> | +| `formPasswordInput` (standard input field for gathering passwords. Note that no<br/>value is ever populated in fields of this type.) | \<@spring.formPasswordInput path, attributes/\> | +| `formTextarea` (large text field for gathering long, freeform text input) | \<@spring.formTextarea path, attributes/\> | +| `formSingleSelect` (drop down box of options that let a single required value be<br/>selected) | \<@spring.formSingleSelect path, options, attributes/\> | +| `formMultiSelect` (a list box of options that let the user select 0 or more values) | \<@spring.formMultiSelect path, options, attributes/\> | +| `formRadioButtons` (a set of radio buttons that let a single selection be made<br/>from the available choices) |\<@spring.formRadioButtons path, options separator, attributes/\>| +| `formCheckboxes` (a set of checkboxes that let 0 or more values be selected) |\<@spring.formCheckboxes path, options, separator, attributes/\> | +| `formCheckbox` (a single checkbox) | \<@spring.formCheckbox path, attributes/\> | +| `showErrors` (simplify display of validation errors for the bound field) | \<@spring.showErrors separator, classOrStyle/\> | + +| |In FreeMarker templates, `formHiddenInput` and `formPasswordInput` are not actually<br/>required, as you can use the normal `formInput` macro, specifying `hidden` or `password`as the value for the `fieldType` parameter.| +|---|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +The parameters to any of the above macros have consistent meanings: + +* `path`: The name of the field to bind to (ie "command.name") + +* `options`: A `Map` of all the available values that can be selected from in the input + field. The keys to the map represent the values that are POSTed back from the form + and bound to the command object. Map objects stored against the keys are the labels + displayed on the form to the user and may be different from the corresponding values + posted back by the form. Usually, such a map is supplied as reference data by the + controller. You can use any `Map` implementation, depending on required behavior. + For strictly sorted maps, you can use a `SortedMap` (such as a `TreeMap`) with a + suitable `Comparator` and, for arbitrary Maps that should return values in insertion + order, use a `LinkedHashMap` or a `LinkedMap` from `commons-collections`. + +* `separator`: Where multiple options are available as discreet elements (radio buttons + or checkboxes), the sequence of characters used to separate each one in the list + (such as `<br>`). + +* `attributes`: An additional string of arbitrary tags or text to be included within + the HTML tag itself. This string is echoed literally by the macro. For example, in a`textarea` field, you may supply attributes (such as 'rows="5" cols="60"'), or you + could pass style information such as 'style="border:1px solid silver"'. + +* `classOrStyle`: For the `showErrors` macro, the name of the CSS class that the `span`element that wraps each error uses. If no information is supplied (or the value is + empty), the errors are wrapped in `<b></b>` tags. + +The following sections outline examples of the macros. + +## Input Fields + +The `formInput` macro takes the `path` parameter (`command.name`) and an additional `attributes`parameter (which is empty in the upcoming example). The macro, along with all other form +generation macros, performs an implicit Spring bind on the path parameter. The binding +remains valid until a new bind occurs, so the `showErrors` macro does not need to pass the +path parameter again — it operates on the field for which a binding was last created. + +The `showErrors` macro takes a separator parameter (the characters that are used to +separate multiple errors on a given field) and also accepts a second parameter — this +time, a class name or style attribute. Note that FreeMarker can specify default +values for the attributes parameter. The following example shows how to use the `formInput`and `showErrors` macros: + +``` +<@spring.formInput "command.name"/> +<@spring.showErrors "<br>"/> +``` + +The next example shows the output of the form fragment, generating the name field and displaying a +validation error after the form was submitted with no value in the field. Validation +occurs through Spring’s Validation framework. + +The generated HTML resembles the following example: + +``` +Name: +<input type="text" name="name" value=""> +<br> + <b>required</b> +<br> +<br> +``` + +The `formTextarea` macro works the same way as the `formInput` macro and accepts the same +parameter list. Commonly, the second parameter (`attributes`) is used to pass style +information or `rows` and `cols` attributes for the `textarea`. + +## Selection Fields + +You can use four selection field macros to generate common UI value selection inputs in +your HTML forms: + +* `formSingleSelect` + +* `formMultiSelect` + +* `formRadioButtons` + +* `formCheckboxes` + +Each of the four macros accepts a `Map` of options that contains the value for the form +field and the label that corresponds to that value. The value and the label can be the +same. + +The next example is for radio buttons in FTL. The form-backing object specifies a default +value of 'London' for this field, so no validation is necessary. When the form is +rendered, the entire list of cities to choose from is supplied as reference data in the +model under the name 'cityMap'. The following listing shows the example: + +``` +... +Town: +<@spring.formRadioButtons "command.address.town", cityMap, ""/><br><br> +``` + +The preceding listing renders a line of radio buttons, one for each value in `cityMap`, and uses a +separator of `""`. No additional attributes are supplied (the last parameter to the macro is +missing). The `cityMap` uses the same `String` for each key-value pair in the map. The map’s +keys are what the form actually submits as `POST` request parameters. The map values are the +labels that the user sees. In the preceding example, given a list of three well known cities +and a default value in the form backing object, the HTML resembles the following: + +``` +Town: +<input type="radio" name="address.town" value="London">London</input> +<input type="radio" name="address.town" value="Paris" checked="checked">Paris</input> +<input type="radio" name="address.town" value="New York">New York</input> +``` + +If your application expects to handle cities by internal codes (for example), you can create the map of +codes with suitable keys, as the following example shows: + +Java + +``` +protected Map<String, ?> referenceData(HttpServletRequest request) throws Exception { + Map<String, String> cityMap = new LinkedHashMap<>(); + cityMap.put("LDN", "London"); + cityMap.put("PRS", "Paris"); + cityMap.put("NYC", "New York"); + + Map<String, Object> model = new HashMap<>(); + model.put("cityMap", cityMap); + return model; +} +``` + +Kotlin + +``` +protected fun referenceData(request: HttpServletRequest): Map<String, *> { + val cityMap = linkedMapOf( + "LDN" to "London", + "PRS" to "Paris", + "NYC" to "New York" + ) + return hashMapOf("cityMap" to cityMap) +} +``` + +The code now produces output where the radio values are the relevant codes, but the +user still sees the more user-friendly city names, as follows: + +``` +Town: +<input type="radio" name="address.town" value="LDN">London</input> +<input type="radio" name="address.town" value="PRS" checked="checked">Paris</input> +<input type="radio" name="address.town" value="NYC">New York</input> +``` + +###### HTML Escaping + +Default usage of the form macros described earlier results in HTML elements that are HTML 4.01 +compliant and that use the default value for HTML escaping defined in your `web.xml` file, as +used by Spring’s bind support. To make the elements be XHTML compliant or to override +the default HTML escaping value, you can specify two variables in your template (or in +your model, where they are visible to your templates). The advantage of specifying +them in the templates is that they can be changed to different values later in the +template processing to provide different behavior for different fields in your form. + +To switch to XHTML compliance for your tags, specify a value of `true` for a +model or context variable named `xhtmlCompliant`, as the following example shows: + +``` +<#-- for FreeMarker --> +<#assign xhtmlCompliant = true> +``` + +After processing this directive, any elements generated by the Spring macros are now XHTML +compliant. + +In similar fashion, you can specify HTML escaping per field, as the following example shows: + +``` +<#-- until this point, default HTML escaping is used --> + +<#assign htmlEscape = true> +<#-- next field will use HTML escaping --> +<@spring.formInput "command.name"/> + +<#assign htmlEscape = false in spring> +<#-- all future fields will be bound with HTML escaping off --> +``` + +#### 1.10.3. Groovy Markup + +The [Groovy Markup Template Engine](http://groovy-lang.org/templating.html#_the_markuptemplateengine)is primarily aimed at generating XML-like markup (XML, XHTML, HTML5, and others), but you can +use it to generate any text-based content. The Spring Framework has a built-in +integration for using Spring MVC with Groovy Markup. + +| |The Groovy Markup Template engine requires Groovy 2.3.1+.| +|---|---------------------------------------------------------| + +##### Configuration + +The following example shows how to configure the Groovy Markup Template Engine: + +Java + +``` +@Configuration +@EnableWebMvc +public class WebConfig implements WebMvcConfigurer { + + @Override + public void configureViewResolvers(ViewResolverRegistry registry) { + registry.groovy(); + } + + // Configure the Groovy Markup Template Engine... + + @Bean + public GroovyMarkupConfigurer groovyMarkupConfigurer() { + GroovyMarkupConfigurer configurer = new GroovyMarkupConfigurer(); + configurer.setResourceLoaderPath("/WEB-INF/"); + return configurer; + } +} +``` + +Kotlin + +``` +@Configuration +@EnableWebMvc +class WebConfig : WebMvcConfigurer { + + override fun configureViewResolvers(registry: ViewResolverRegistry) { + registry.groovy() + } + + // Configure the Groovy Markup Template Engine... + + @Bean + fun groovyMarkupConfigurer() = GroovyMarkupConfigurer().apply { + resourceLoaderPath = "/WEB-INF/" + } +} +``` + +The following example shows how to configure the same in XML: + +``` +<mvc:annotation-driven/> + +<mvc:view-resolvers> + <mvc:groovy/> +</mvc:view-resolvers> + +<!-- Configure the Groovy Markup Template Engine... --> +<mvc:groovy-configurer resource-loader-path="/WEB-INF/"/> +``` + +##### Example + +Unlike traditional template engines, Groovy Markup relies on a DSL that uses a builder +syntax. The following example shows a sample template for an HTML page: + +``` +yieldUnescaped '<!DOCTYPE html>' +html(lang:'en') { + head { + meta('http-equiv':'"Content-Type" content="text/html; charset=utf-8"') + title('My page') + } + body { + p('This is an example of HTML contents') + } +} +``` + +#### 1.10.4. Script Views + +[WebFlux](web-reactive.html#webflux-view-script) + +The Spring Framework has a built-in integration for using Spring MVC with any +templating library that can run on top of the[JSR-223](https://www.jcp.org/en/jsr/detail?id=223) Java scripting engine. We have tested the following +templating libraries on different script engines: + +| Scripting Library | Scripting Engine | +|----------------------------------------------------------------------------------|-----------------------------------------------------| +| [Handlebars](https://handlebarsjs.com/) |[Nashorn](https://openjdk.java.net/projects/nashorn/)| +| [Mustache](https://mustache.github.io/) |[Nashorn](https://openjdk.java.net/projects/nashorn/)| +| [React](https://facebook.github.io/react/) |[Nashorn](https://openjdk.java.net/projects/nashorn/)| +| [EJS](https://www.embeddedjs.com/) |[Nashorn](https://openjdk.java.net/projects/nashorn/)| +| [ERB](https://www.stuartellis.name/articles/erb/) | [JRuby](https://www.jruby.org) | +|[String templates](https://docs.python.org/2/library/string.html#template-strings)| [Jython](https://www.jython.org/) | +| [Kotlin Script templating](https://github.com/sdeleuze/kotlin-script-templating) | [Kotlin](https://kotlinlang.org/) | + +| |The basic rule for integrating any other script engine is that it must implement the`ScriptEngine` and `Invocable` interfaces.| +|---|------------------------------------------------------------------------------------------------------------------------------| + +##### Requirements + +[WebFlux](web-reactive.html#webflux-view-script-dependencies) + +You need to have the script engine on your classpath, the details of which vary by script engine: + +* The [Nashorn](https://openjdk.java.net/projects/nashorn/) JavaScript engine is provided with + Java 8+. Using the latest update release available is highly recommended. + +* [JRuby](https://www.jruby.org) should be added as a dependency for Ruby support. + +* [Jython](https://www.jython.org) should be added as a dependency for Python support. + +* `org.jetbrains.kotlin:kotlin-script-util` dependency and a `META-INF/services/javax.script.ScriptEngineFactory`file containing a `org.jetbrains.kotlin.script.jsr223.KotlinJsr223JvmLocalScriptEngineFactory`line should be added for Kotlin script support. See[this example](https://github.com/sdeleuze/kotlin-script-templating) for more details. + +You need to have the script templating library. One way to do that for JavaScript is +through [WebJars](https://www.webjars.org/). + +##### Script Templates + +[WebFlux](web-reactive.html#webflux-script-integrate) + +You can declare a `ScriptTemplateConfigurer` bean to specify the script engine to use, +the script files to load, what function to call to render templates, and so on. +The following example uses Mustache templates and the Nashorn JavaScript engine: + +Java + +``` +@Configuration +@EnableWebMvc +public class WebConfig implements WebMvcConfigurer { + + @Override + public void configureViewResolvers(ViewResolverRegistry registry) { + registry.scriptTemplate(); + } + + @Bean + public ScriptTemplateConfigurer configurer() { + ScriptTemplateConfigurer configurer = new ScriptTemplateConfigurer(); + configurer.setEngineName("nashorn"); + configurer.setScripts("mustache.js"); + configurer.setRenderObject("Mustache"); + configurer.setRenderFunction("render"); + return configurer; + } +} +``` + +Kotlin + +``` +@Configuration +@EnableWebMvc +class WebConfig : WebMvcConfigurer { + + override fun configureViewResolvers(registry: ViewResolverRegistry) { + registry.scriptTemplate() + } + + @Bean + fun configurer() = ScriptTemplateConfigurer().apply { + engineName = "nashorn" + setScripts("mustache.js") + renderObject = "Mustache" + renderFunction = "render" + } +} +``` + +The following example shows the same arrangement in XML: + +``` +<mvc:annotation-driven/> + +<mvc:view-resolvers> + <mvc:script-template/> +</mvc:view-resolvers> + +<mvc:script-template-configurer engine-name="nashorn" render-object="Mustache" render-function="render"> + <mvc:script location="mustache.js"/> +</mvc:script-template-configurer> +``` + +The controller would look no different for the Java and XML configurations, as the following example shows: + +Java + +``` +@Controller +public class SampleController { + + @GetMapping("/sample") + public String test(Model model) { + model.addAttribute("title", "Sample title"); + model.addAttribute("body", "Sample body"); + return "template"; + } +} +``` + +Kotlin + +``` +@Controller +class SampleController { + + @GetMapping("/sample") + fun test(model: Model): String { + model["title"] = "Sample title" + model["body"] = "Sample body" + return "template" + } +} +``` + +The following example shows the Mustache template: + +``` +<html> + <head> + <title>{{title}} + + +

{{body}}

+ + +``` + +The render function is called with the following parameters: + +* `String template`: The template content + +* `Map model`: The view model + +* `RenderingContext renderingContext`: The[`RenderingContext`](https://docs.spring.io/spring-framework/docs/5.3.16/javadoc-api/org/springframework/web/servlet/view/script/RenderingContext.html)that gives access to the application context, the locale, the template loader, and the + URL (since 5.0) + +`Mustache.render()` is natively compatible with this signature, so you can call it directly. + +If your templating technology requires some customization, you can provide a script that +implements a custom render function. For example, [Handlerbars](https://handlebarsjs.com)needs to compile templates before using them and requires a[polyfill](https://en.wikipedia.org/wiki/Polyfill) to emulate some +browser facilities that are not available in the server-side script engine. + +The following example shows how to do so: + +Java + +``` +@Configuration +@EnableWebMvc +public class WebConfig implements WebMvcConfigurer { + + @Override + public void configureViewResolvers(ViewResolverRegistry registry) { + registry.scriptTemplate(); + } + + @Bean + public ScriptTemplateConfigurer configurer() { + ScriptTemplateConfigurer configurer = new ScriptTemplateConfigurer(); + configurer.setEngineName("nashorn"); + configurer.setScripts("polyfill.js", "handlebars.js", "render.js"); + configurer.setRenderFunction("render"); + configurer.setSharedEngine(false); + return configurer; + } +} +``` + +Kotlin + +``` +@Configuration +@EnableWebMvc +class WebConfig : WebMvcConfigurer { + + override fun configureViewResolvers(registry: ViewResolverRegistry) { + registry.scriptTemplate() + } + + @Bean + fun configurer() = ScriptTemplateConfigurer().apply { + engineName = "nashorn" + setScripts("polyfill.js", "handlebars.js", "render.js") + renderFunction = "render" + isSharedEngine = false + } +} +``` + +| |Setting the `sharedEngine` property to `false` is required when using non-thread-safe
script engines with templating libraries not designed for concurrency, such as Handlebars or
React running on Nashorn. In that case, Java SE 8 update 60 is required, due to[this bug](https://bugs.openjdk.java.net/browse/JDK-8076099), but it is generally
recommended to use a recent Java SE patch release in any case.| +|---|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +`polyfill.js` defines only the `window` object needed by Handlebars to run properly, as follows: + +``` +var window = {}; +``` + +This basic `render.js` implementation compiles the template before using it. A production-ready +implementation should also store any reused cached templates or pre-compiled templates. +You can do so on the script side (and handle any customization you need — managing +template engine configuration, for example). The following example shows how to do so: + +``` +function render(template, model) { + var compiledTemplate = Handlebars.compile(template); + return compiledTemplate(model); +} +``` + +Check out the Spring Framework unit tests,[Java](https://github.com/spring-projects/spring-framework/tree/main/spring-webmvc/src/test/java/org/springframework/web/servlet/view/script), and[resources](https://github.com/spring-projects/spring-framework/tree/main/spring-webmvc/src/test/resources/org/springframework/web/servlet/view/script), +for more configuration examples. + +#### 1.10.5. JSP and JSTL + +The Spring Framework has a built-in integration for using Spring MVC with JSP and JSTL. + +##### View Resolvers + +When developing with JSPs, you typically declare an `InternalResourceViewResolver` bean. + +`InternalResourceViewResolver` can be used for dispatching to any Servlet resource but in +particular for JSPs. As a best practice, we strongly encourage placing your JSP files in +a directory under the `'WEB-INF'` directory so there can be no direct access by clients. + +``` + + + + + +``` + +##### JSPs versus JSTL + +When using the JSP Standard Tag Library (JSTL) you must use a special view class, the`JstlView`, as JSTL needs some preparation before things such as the I18N features can +work. + +##### Spring’s JSP Tag Library + +Spring provides data binding of request parameters to command objects, as described in +earlier chapters. To facilitate the development of JSP pages in combination with those +data binding features, Spring provides a few tags that make things even easier. All +Spring tags have HTML escaping features to enable or disable escaping of characters. + +The `spring.tld` tag library descriptor (TLD) is included in the `spring-webmvc.jar`. +For a comprehensive reference on individual tags, browse the[API reference](https://docs.spring.io/spring-framework/docs/5.3.16/javadoc-api/org/springframework/web/servlet/tags/package-summary.html#package.description)or see the tag library description. + +##### Spring’s form tag library + +As of version 2.0, Spring provides a comprehensive set of data binding-aware tags for +handling form elements when using JSP and Spring Web MVC. Each tag provides support for +the set of attributes of its corresponding HTML tag counterpart, making the tags +familiar and intuitive to use. The tag-generated HTML is HTML 4.01/XHTML 1.0 compliant. + +Unlike other form/input tag libraries, Spring’s form tag library is integrated with +Spring Web MVC, giving the tags access to the command object and reference data your +controller deals with. As we show in the following examples, the form tags make +JSPs easier to develop, read, and maintain. + +We go through the form tags and look at an example of how each tag is used. We have +included generated HTML snippets where certain tags require further commentary. + +###### Configuration + +The form tag library comes bundled in `spring-webmvc.jar`. The library descriptor is +called `spring-form.tld`. + +To use the tags from this library, add the following directive to the top of your JSP +page: + +``` +<%@ taglib prefix="form" uri="http://www.springframework.org/tags/form" %> +``` + +where `form` is the tag name prefix you want to use for the tags from this library. + +###### The Form Tag + +This tag renders an HTML 'form' element and exposes a binding path to inner tags for +binding. It puts the command object in the `PageContext` so that the command object can +be accessed by inner tags. All the other tags in this library are nested tags of the`form` tag. + +Assume that we have a domain object called `User`. It is a JavaBean with properties +such as `firstName` and `lastName`. We can use it as the form-backing object of our +form controller, which returns `form.jsp`. The following example shows what `form.jsp` could +look like: + +``` + + + + + + + + + + + + + +
First Name:
Last Name:
+ +
+
+``` + +The `firstName` and `lastName` values are retrieved from the command object placed in +the `PageContext` by the page controller. Keep reading to see more complex examples of +how inner tags are used with the `form` tag. + +The following listing shows the generated HTML, which looks like a standard form: + +``` +
+ + + + + + + + + + + + +
First Name:
Last Name:
+ +
+
+``` + +The preceding JSP assumes that the variable name of the form-backing object is`command`. If you have put the form-backing object into the model under another name +(definitely a best practice), you can bind the form to the named variable, as the +following example shows: + +``` + + + + + + + + + + + + + +
First Name:
Last Name:
+ +
+
+``` + +###### The `input` Tag + +This tag renders an HTML `input` element with the bound value and `type='text'` by default. +For an example of this tag, see [The Form Tag](#mvc-view-jsp-formtaglib-formtag). You can also use +HTML5-specific types, such as `email`, `tel`, `date`, and others. + +###### The `checkbox` Tag + +This tag renders an HTML `input` tag with the `type` set to `checkbox`. + +Assume that our `User` has preferences such as newsletter subscription and a list of +hobbies. The following example shows the `Preferences` class: + +Java + +``` +public class Preferences { + + private boolean receiveNewsletter; + private String[] interests; + private String favouriteWord; + + public boolean isReceiveNewsletter() { + return receiveNewsletter; + } + + public void setReceiveNewsletter(boolean receiveNewsletter) { + this.receiveNewsletter = receiveNewsletter; + } + + public String[] getInterests() { + return interests; + } + + public void setInterests(String[] interests) { + this.interests = interests; + } + + public String getFavouriteWord() { + return favouriteWord; + } + + public void setFavouriteWord(String favouriteWord) { + this.favouriteWord = favouriteWord; + } +} +``` + +Kotlin + +``` +class Preferences( + var receiveNewsletter: Boolean, + var interests: StringArray, + var favouriteWord: String +) +``` + +The corresponding `form.jsp` could then resemble the following: + +``` + + + + + <%-- Approach 1: Property is of type java.lang.Boolean --%> + + + + + + <%-- Approach 2: Property is of an array or of type java.util.Collection --%> + + + + + + <%-- Approach 3: Property is of type java.lang.Object --%> + + +
Subscribe to newsletter?:
Interests: + Quidditch: + Herbology: + Defence Against the Dark Arts: +
Favourite Word: + Magic: +
+
+``` + +There are three approaches to the `checkbox` tag, which should meet all your checkbox needs. + +* Approach One: When the bound value is of type `java.lang.Boolean`, the`input(checkbox)` is marked as `checked` if the bound value is `true`. The `value`attribute corresponds to the resolved value of the `setValue(Object)` value property. + +* Approach Two: When the bound value is of type `array` or `java.util.Collection`, the`input(checkbox)` is marked as `checked` if the configured `setValue(Object)` value is + present in the bound `Collection`. + +* Approach Three: For any other bound value type, the `input(checkbox)` is marked as`checked` if the configured `setValue(Object)` is equal to the bound value. + +Note that, regardless of the approach, the same HTML structure is generated. The following +HTML snippet defines some checkboxes: + +``` + + Interests: + + Quidditch: + + Herbology: + + Defence Against the Dark Arts: + + + +``` + +You might not expect to see the additional hidden field after each checkbox. +When a checkbox in an HTML page is not checked, its value is not sent to the +server as part of the HTTP request parameters once the form is submitted, so we need a +workaround for this quirk in HTML for Spring form data binding to work. The`checkbox` tag follows the existing Spring convention of including a hidden parameter +prefixed by an underscore (`_`) for each checkbox. By doing this, you are effectively +telling Spring that “the checkbox was visible in the form, and I want my object to +which the form data binds to reflect the state of the checkbox, no matter what.” + +###### The `checkboxes` Tag + +This tag renders multiple HTML `input` tags with the `type` set to `checkbox`. + +This section build on the example from the previous `checkbox` tag section. Sometimes, you prefer +not to have to list all the possible hobbies in your JSP page. You would rather provide +a list at runtime of the available options and pass that in to the tag. That is the +purpose of the `checkboxes` tag. You can pass in an `Array`, a `List`, or a `Map` that contains +the available options in the `items` property. Typically, the bound property is a +collection so that it can hold multiple values selected by the user. The following example +shows a JSP that uses this tag: + +``` + + + + + + +
Interests: + <%-- Property is of an array or of type java.util.Collection --%> + +
+
+``` + +This example assumes that the `interestList` is a `List` available as a model attribute +that contains strings of the values to be selected from. If you use a `Map`, +the map entry key is used as the value, and the map entry’s value is used as +the label to be displayed. You can also use a custom object where you can provide the +property names for the value by using `itemValue` and the label by using `itemLabel`. + +###### The `radiobutton` Tag + +This tag renders an HTML `input` element with the `type` set to `radio`. + +A typical usage pattern involves multiple tag instances bound to the same property +but with different values, as the following example shows: + +``` + + Sex: + + Male:
+ Female: + + +``` + +###### The `radiobuttons` Tag + +This tag renders multiple HTML `input` elements with the `type` set to `radio`. + +As with the [`checkboxes` tag](#mvc-view-jsp-formtaglib-checkboxestag), you might want to +pass in the available options as a runtime variable. For this usage, you can use the`radiobuttons` tag. You pass in an `Array`, a `List`, or a `Map` that contains the +available options in the `items` property. If you use a `Map`, the map entry key is +used as the value and the map entry’s value are used as the label to be displayed. +You can also use a custom object where you can provide the property names for the value +by using `itemValue` and the label by using `itemLabel`, as the following example shows: + +``` + + Sex: + + +``` + +###### The `password` Tag + +This tag renders an HTML `input` tag with the type set to `password` with the bound value. + +``` + + Password: + + + + +``` + +Note that, by default, the password value is not shown. If you do want the +password value to be shown, you can set the value of the `showPassword` attribute to`true`, as the following example shows: + +``` + + Password: + + + + +``` + +###### The `select` Tag + +This tag renders an HTML 'select' element. It supports data binding to the selected +option as well as the use of nested `option` and `options` tags. + +Assume that a `User` has a list of skills. The corresponding HTML could be as follows: + +``` + + Skills: + + +``` + +If the `User’s` skill are in Herbology, the HTML source of the 'Skills' row could be +as follows: + +``` + + Skills: + + + + +``` + +###### The `option` Tag + +This tag renders an HTML `option` element. It sets `selected`, based on the bound +value. The following HTML shows typical output for it: + +``` + + House: + + + + + + + + + +``` + +If the `User’s` house was in Gryffindor, the HTML source of the 'House' row would be +as follows: + +``` + + House: + + + + +``` + +|**1**|Note the addition of a `selected` attribute.| +|-----|--------------------------------------------| + +###### The `options` Tag + +This tag renders a list of HTML `option` elements. It sets the `selected` attribute, +based on the bound value. The following HTML shows typical output for it: + +``` + + Country: + + + + + + + +``` + +If the `User` lived in the UK, the HTML source of the 'Country' row would be as follows: + +``` + + Country: + + + + +``` + +|**1**|Note the addition of a `selected` attribute.| +|-----|--------------------------------------------| + +As the preceding example shows, the combined usage of an `option` tag with the `options` tag +generates the same standard HTML but lets you explicitly specify a value in the +JSP that is for display only (where it belongs), such as the default string in the +example: "-- Please Select". + +The `items` attribute is typically populated with a collection or array of item objects.`itemValue` and `itemLabel` refer to bean properties of those item objects, if +specified. Otherwise, the item objects themselves are turned into strings. Alternatively, +you can specify a `Map` of items, in which case the map keys are interpreted as option +values and the map values correspond to option labels. If `itemValue` or `itemLabel` (or both) +happen to be specified as well, the item value property applies to the map key, and +the item label property applies to the map value. + +###### The `textarea` Tag + +This tag renders an HTML `textarea` element. The following HTML shows typical output for it: + +``` + + Notes: + + + +``` + +###### The `hidden` Tag + +This tag renders an HTML `input` tag with the `type` set to `hidden` with the bound value. To submit +an unbound hidden value, use the HTML `input` tag with the `type` set to `hidden`. +The following HTML shows typical output for it: + +``` + +``` + +If we choose to submit the `house` value as a hidden one, the HTML would be as follows: + +``` + +``` + +###### The `errors` Tag + +This tag renders field errors in an HTML `span` element. It provides access to the errors +created in your controller or those that were created by any validators associated with +your controller. + +Assume that we want to display all error messages for the `firstName` and `lastName`fields once we submit the form. We have a validator for instances of the `User` class +called `UserValidator`, as the following example shows: + +Java + +``` +public class UserValidator implements Validator { + + public boolean supports(Class candidate) { + return User.class.isAssignableFrom(candidate); + } + + public void validate(Object obj, Errors errors) { + ValidationUtils.rejectIfEmptyOrWhitespace(errors, "firstName", "required", "Field is required."); + ValidationUtils.rejectIfEmptyOrWhitespace(errors, "lastName", "required", "Field is required."); + } +} +``` + +Kotlin + +``` +class UserValidator : Validator { + + override fun supports(candidate: Class<*>): Boolean { + return User::class.java.isAssignableFrom(candidate) + } + + override fun validate(obj: Any, errors: Errors) { + ValidationUtils.rejectIfEmptyOrWhitespace(errors, "firstName", "required", "Field is required.") + ValidationUtils.rejectIfEmptyOrWhitespace(errors, "lastName", "required", "Field is required.") + } +} +``` + +The `form.jsp` could be as follows: + +``` + + + + + + <%-- Show errors for firstName field --%> + + + + + + + <%-- Show errors for lastName field --%> + + + + + +
First Name:
Last Name:
+ +
+
+``` + +If we submit a form with empty values in the `firstName` and `lastName` fields, +the HTML would be as follows: + +``` +
+ + + + + <%-- Associated errors to firstName field displayed --%> + + + + + + + <%-- Associated errors to lastName field displayed --%> + + + + + +
First Name:Field is required.
Last Name:Field is required.
+ +
+
+``` + +What if we want to display the entire list of errors for a given page? The next example +shows that the `errors` tag also supports some basic wildcarding functionality. + +* `path="*"`: Displays all errors. + +* `path="lastName"`: Displays all errors associated with the `lastName` field. + +* If `path` is omitted, only object errors are displayed. + +The following example displays a list of errors at the top of the page, followed by +field-specific errors next to the fields: + +``` + + + + + + + + + + + + + + + + +
First Name:
Last Name:
+ +
+
+``` + +The HTML would be as follows: + +``` +
+ Field is required.
Field is required.
+ + + + + + + + + + + + + + + +
First Name:Field is required.
Last Name:Field is required.
+ +
+
+``` + +The `spring-form.tld` tag library descriptor (TLD) is included in the `spring-webmvc.jar`. +For a comprehensive reference on individual tags, browse the[API reference](https://docs.spring.io/spring-framework/docs/5.3.16/javadoc-api/org/springframework/web/servlet/tags/form/package-summary.html#package.description)or see the tag library description. + +###### HTTP Method Conversion + +A key principle of REST is the use of the “Uniform Interface”. This means that all +resources (URLs) can be manipulated by using the same four HTTP methods: GET, PUT, POST, +and DELETE. For each method, the HTTP specification defines the exact semantics. For +instance, a GET should always be a safe operation, meaning that it has no side effects, +and a PUT or DELETE should be idempotent, meaning that you can repeat these operations +over and over again, but the end result should be the same. While HTTP defines these +four methods, HTML only supports two: GET and POST. Fortunately, there are two possible +workarounds: you can either use JavaScript to do your PUT or DELETE, or you can do a POST +with the “real” method as an additional parameter (modeled as a hidden input field in an +HTML form). Spring’s `HiddenHttpMethodFilter` uses this latter trick. This +filter is a plain Servlet filter and, therefore, it can be used in combination with any +web framework (not just Spring MVC). Add this filter to your web.xml, and a POST +with a hidden `method` parameter is converted into the corresponding HTTP method +request. + +To support HTTP method conversion, the Spring MVC form tag was updated to support setting +the HTTP method. For example, the following snippet comes from the Pet Clinic sample: + +``` + +

+
+``` + +The preceding example performs an HTTP POST, with the “real” DELETE method hidden behind +a request parameter. It is picked up by the `HiddenHttpMethodFilter`, which is defined in +web.xml, as the following example shows: + +``` + + httpMethodFilter + org.springframework.web.filter.HiddenHttpMethodFilter + + + + httpMethodFilter + petclinic + +``` + +The following example shows the corresponding `@Controller` method: + +Java + +``` +@RequestMapping(method = RequestMethod.DELETE) +public String deletePet(@PathVariable int ownerId, @PathVariable int petId) { + this.clinic.deletePet(petId); + return "redirect:/owners/" + ownerId; +} +``` + +Kotlin + +``` +@RequestMapping(method = [RequestMethod.DELETE]) +fun deletePet(@PathVariable ownerId: Int, @PathVariable petId: Int): String { + clinic.deletePet(petId) + return "redirect:/owners/$ownerId" +} +``` + +###### HTML5 Tags + +The Spring form tag library allows entering dynamic attributes, which means you can +enter any HTML5 specific attributes. + +The form `input` tag supports entering a type attribute other than `text`. This is +intended to allow rendering new HTML5 specific input types, such as `email`, `date`,`range`, and others. Note that entering `type='text'` is not required, since `text`is the default type. + +#### 1.10.6. Tiles + +You can integrate Tiles - just as any other view technology - in web +applications that use Spring. This section describes, in a broad way, how to do so. + +| |This section focuses on Spring’s support for Tiles version 3 in the`org.springframework.web.servlet.view.tiles3` package.| +|---|-------------------------------------------------------------------------------------------------------------------------| + +##### Dependencies + +To be able to use Tiles, you have to add a dependency on Tiles version 3.0.1 or higher +and [its transitive dependencies](https://tiles.apache.org/framework/dependency-management.html)to your project. + +##### Configuration + +To be able to use Tiles, you have to configure it by using files that contain definitions +(for basic information on definitions and other Tiles concepts, see[https://tiles.apache.org](https://tiles.apache.org)). In Spring, this is done by using the `TilesConfigurer`. +The following example `ApplicationContext` configuration shows how to do so: + +``` + + + + /WEB-INF/defs/general.xml + /WEB-INF/defs/widgets.xml + /WEB-INF/defs/administrator.xml + /WEB-INF/defs/customer.xml + /WEB-INF/defs/templates.xml + + + +``` + +The preceding example defines five files that contain definitions. The files are all +located in the `WEB-INF/defs` directory. At initialization of the `WebApplicationContext`, +the files are loaded, and the definitions factory are initialized. After that has +been done, the Tiles included in the definition files can be used as views within your +Spring web application. To be able to use the views, you have to have a `ViewResolver`as with any other view technology in Spring: typically a convenient `TilesViewResolver`. + +You can specify locale-specific Tiles definitions by adding an underscore and then +the locale, as the following example shows: + +``` + + + + /WEB-INF/defs/tiles.xml + /WEB-INF/defs/tiles_fr_FR.xml + + + +``` + +With the preceding configuration, `tiles_fr_FR.xml` is used for requests with the `fr_FR` locale, +and `tiles.xml` is used by default. + +| |Since underscores are used to indicate locales, we recommended not using
them otherwise in the file names for Tiles definitions.| +|---|------------------------------------------------------------------------------------------------------------------------------------| + +###### `UrlBasedViewResolver` + +The `UrlBasedViewResolver` instantiates the given `viewClass` for each view it has to +resolve. The following bean defines a `UrlBasedViewResolver`: + +``` + + + +``` + +###### `SimpleSpringPreparerFactory` and `SpringBeanPreparerFactory` + +As an advanced feature, Spring also supports two special Tiles `PreparerFactory`implementations. See the Tiles documentation for details on how to use`ViewPreparer` references in your Tiles definition files. + +You can specify `SimpleSpringPreparerFactory` to autowire `ViewPreparer` instances based on +specified preparer classes, applying Spring’s container callbacks as well as applying +configured Spring BeanPostProcessors. If Spring’s context-wide annotation configuration has +been activated, annotations in `ViewPreparer` classes are automatically detected and +applied. Note that this expects preparer classes in the Tiles definition files, as +the default `PreparerFactory` does. + +You can specify `SpringBeanPreparerFactory` to operate on specified preparer names (instead +of classes), obtaining the corresponding Spring bean from the DispatcherServlet’s +application context. The full bean creation process is in the control of the Spring +application context in this case, allowing for the use of explicit dependency injection +configuration, scoped beans, and so on. Note that you need to define one Spring bean definition +for each preparer name (as used in your Tiles definitions). The following example shows +how to define a `SpringBeanPreparerFactory` property on a `TilesConfigurer` bean: + +``` + + + + /WEB-INF/defs/general.xml + /WEB-INF/defs/widgets.xml + /WEB-INF/defs/administrator.xml + /WEB-INF/defs/customer.xml + /WEB-INF/defs/templates.xml + + + + + + + +``` + +#### 1.10.7. RSS and Atom + +Both `AbstractAtomFeedView` and `AbstractRssFeedView` inherit from the`AbstractFeedView` base class and are used to provide Atom and RSS Feed views, respectively. They +are based on [ROME](https://rometools.github.io/rome/) project and are located in the +package `org.springframework.web.servlet.view.feed`. + +`AbstractAtomFeedView` requires you to implement the `buildFeedEntries()` method and +optionally override the `buildFeedMetadata()` method (the default implementation is +empty). The following example shows how to do so: + +Java + +``` +public class SampleContentAtomView extends AbstractAtomFeedView { + + @Override + protected void buildFeedMetadata(Map model, + Feed feed, HttpServletRequest request) { + // implementation omitted + } + + @Override + protected List buildFeedEntries(Map model, + HttpServletRequest request, HttpServletResponse response) throws Exception { + // implementation omitted + } +} +``` + +Kotlin + +``` +class SampleContentAtomView : AbstractAtomFeedView() { + + override fun buildFeedMetadata(model: Map, + feed: Feed, request: HttpServletRequest) { + // implementation omitted + } + + override fun buildFeedEntries(model: Map, + request: HttpServletRequest, response: HttpServletResponse): List { + // implementation omitted + } +} +``` + +Similar requirements apply for implementing `AbstractRssFeedView`, as the following example shows: + +Java + +``` +public class SampleContentRssView extends AbstractRssFeedView { + + @Override + protected void buildFeedMetadata(Map model, + Channel feed, HttpServletRequest request) { + // implementation omitted + } + + @Override + protected List buildFeedItems(Map model, + HttpServletRequest request, HttpServletResponse response) throws Exception { + // implementation omitted + } +} +``` + +Kotlin + +``` +class SampleContentRssView : AbstractRssFeedView() { + + override fun buildFeedMetadata(model: Map, + feed: Channel, request: HttpServletRequest) { + // implementation omitted + } + + override fun buildFeedItems(model: Map, + request: HttpServletRequest, response: HttpServletResponse): List { + // implementation omitted + } +} +``` + +The `buildFeedItems()` and `buildFeedEntries()` methods pass in the HTTP request, in case +you need to access the Locale. The HTTP response is passed in only for the setting of +cookies or other HTTP headers. The feed is automatically written to the response +object after the method returns. + +For an example of creating an Atom view, see Alef Arendsen’s Spring Team Blog[entry](https://spring.io/blog/2009/03/16/adding-an-atom-view-to-an-application-using-spring-s-rest-support). + +#### 1.10.8. PDF and Excel + +Spring offers ways to return output other than HTML, including PDF and Excel spreadsheets. +This section describes how to use those features. + +##### Introduction to Document Views + +An HTML page is not always the best way for the user to view the model output, +and Spring makes it simple to generate a PDF document or an Excel spreadsheet +dynamically from the model data. The document is the view and is streamed from the +server with the correct content type, to (hopefully) enable the client PC to run their +spreadsheet or PDF viewer application in response. + +In order to use Excel views, you need to add the Apache POI library to your classpath. +For PDF generation, you need to add (preferably) the OpenPDF library. + +| |You should use the latest versions of the underlying document-generation libraries,
if possible. In particular, we strongly recommend OpenPDF (for example, OpenPDF 1.2.12)
instead of the outdated original iText 2.1.7, since OpenPDF is actively maintained and
fixes an important vulnerability for untrusted PDF content.| +|---|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +##### PDF Views + +A simple PDF view for a word list could extend`org.springframework.web.servlet.view.document.AbstractPdfView` and implement the`buildPdfDocument()` method, as the following example shows: + +Java + +``` +public class PdfWordList extends AbstractPdfView { + + protected void buildPdfDocument(Map model, Document doc, PdfWriter writer, + HttpServletRequest request, HttpServletResponse response) throws Exception { + + List words = (List) model.get("wordList"); + for (String word : words) { + doc.add(new Paragraph(word)); + } + } +} +``` + +Kotlin + +``` +class PdfWordList : AbstractPdfView() { + + override fun buildPdfDocument(model: Map, doc: Document, writer: PdfWriter, + request: HttpServletRequest, response: HttpServletResponse) { + + val words = model["wordList"] as List + for (word in words) { + doc.add(Paragraph(word)) + } + } +} +``` + +A controller can return such a view either from an external view definition +(referencing it by name) or as a `View` instance from the handler method. + +##### Excel Views + +Since Spring Framework 4.2,`org.springframework.web.servlet.view.document.AbstractXlsView` is provided as a base +class for Excel views. It is based on Apache POI, with specialized subclasses (`AbstractXlsxView`and `AbstractXlsxStreamingView`) that supersede the outdated `AbstractExcelView` class. + +The programming model is similar to `AbstractPdfView`, with `buildExcelDocument()`as the central template method and controllers being able to return such a view from +an external definition (by name) or as a `View` instance from the handler method. + +#### 1.10.9. Jackson + +[WebFlux](web-reactive.html#webflux-view-httpmessagewriter) + +Spring offers support for the Jackson JSON library. + +##### Jackson-based JSON MVC Views + +[WebFlux](web-reactive.html#webflux-view-httpmessagewriter) + +The `MappingJackson2JsonView` uses the Jackson library’s `ObjectMapper` to render the response +content as JSON. By default, the entire contents of the model map (with the exception of +framework-specific classes) are encoded as JSON. For cases where the contents of the +map need to be filtered, you can specify a specific set of model attributes to encode +by using the `modelKeys` property. You can also use the `extractValueFromSingleKeyModel`property to have the value in single-key models extracted and serialized directly rather +than as a map of model attributes. + +You can customize JSON mapping as needed by using Jackson’s provided +annotations. When you need further control, you can inject a custom `ObjectMapper`through the `ObjectMapper` property, for cases where you need to provide custom JSON +serializers and deserializers for specific types. + +##### Jackson-based XML Views + +[WebFlux](web-reactive.html#webflux-view-httpmessagewriter) + +`MappingJackson2XmlView` uses the[Jackson XML extension’s](https://github.com/FasterXML/jackson-dataformat-xml) `XmlMapper`to render the response content as XML. If the model contains multiple entries, you should +explicitly set the object to be serialized by using the `modelKey` bean property. If the +model contains a single entry, it is serialized automatically. + +You can customized XML mapping as needed by using JAXB or Jackson’s provided +annotations. When you need further control, you can inject a custom `XmlMapper`through the `ObjectMapper` property, for cases where custom XML +you need to provide serializers and deserializers for specific types. + +#### 1.10.10. XML Marshalling + +The `MarshallingView` uses an XML `Marshaller` (defined in the `org.springframework.oxm`package) to render the response content as XML. You can explicitly set the object to be +marshalled by using a `MarshallingView` instance’s `modelKey` bean property. Alternatively, +the view iterates over all model properties and marshals the first type that is supported +by the `Marshaller`. For more information on the functionality in the`org.springframework.oxm` package, see [Marshalling XML using O/X Mappers](data-access.html#oxm). + +#### 1.10.11. XSLT Views + +XSLT is a transformation language for XML and is popular as a view technology within web +applications. XSLT can be a good choice as a view technology if your application +naturally deals with XML or if your model can easily be converted to XML. The following +section shows how to produce an XML document as model data and have it transformed with +XSLT in a Spring Web MVC application. + +This example is a trivial Spring application that creates a list of words in the`Controller` and adds them to the model map. The map is returned, along with the view +name of our XSLT view. See [Annotated Controllers](#mvc-controller) for details of Spring Web MVC’s`Controller` interface. The XSLT controller turns the list of words into a simple XML +document ready for transformation. + +##### Beans + +Configuration is standard for a simple Spring web application: The MVC configuration +has to define an `XsltViewResolver` bean and regular MVC annotation configuration. +The following example shows how to do so: + +Java + +``` +@EnableWebMvc +@ComponentScan +@Configuration +public class WebConfig implements WebMvcConfigurer { + + @Bean + public XsltViewResolver xsltViewResolver() { + XsltViewResolver viewResolver = new XsltViewResolver(); + viewResolver.setPrefix("/WEB-INF/xsl/"); + viewResolver.setSuffix(".xslt"); + return viewResolver; + } +} +``` + +Kotlin + +``` +@EnableWebMvc +@ComponentScan +@Configuration +class WebConfig : WebMvcConfigurer { + + @Bean + fun xsltViewResolver() = XsltViewResolver().apply { + setPrefix("/WEB-INF/xsl/") + setSuffix(".xslt") + } +} +``` + +##### Controller + +We also need a Controller that encapsulates our word-generation logic. + +The controller logic is encapsulated in a `@Controller` class, with the +handler method being defined as follows: + +Java + +``` +@Controller +public class XsltController { + + @RequestMapping("/") + public String home(Model model) throws Exception { + Document document = DocumentBuilderFactory.newInstance().newDocumentBuilder().newDocument(); + Element root = document.createElement("wordList"); + + List words = Arrays.asList("Hello", "Spring", "Framework"); + for (String word : words) { + Element wordNode = document.createElement("word"); + Text textNode = document.createTextNode(word); + wordNode.appendChild(textNode); + root.appendChild(wordNode); + } + + model.addAttribute("wordList", root); + return "home"; + } +} +``` + +Kotlin + +``` +import org.springframework.ui.set + +@Controller +class XsltController { + + @RequestMapping("/") + fun home(model: Model): String { + val document = DocumentBuilderFactory.newInstance().newDocumentBuilder().newDocument() + val root = document.createElement("wordList") + + val words = listOf("Hello", "Spring", "Framework") + for (word in words) { + val wordNode = document.createElement("word") + val textNode = document.createTextNode(word) + wordNode.appendChild(textNode) + root.appendChild(wordNode) + } + + model["wordList"] = root + return "home" + } +} +``` + +So far, we have only created a DOM document and added it to the Model map. Note that you +can also load an XML file as a `Resource` and use it instead of a custom DOM document. + +There are software packages available that automatically 'domify' +an object graph, but, within Spring, you have complete flexibility to create the DOM +from your model in any way you choose. This prevents the transformation of XML playing +too great a part in the structure of your model data, which is a danger when using tools +to manage the DOMification process. + +##### Transformation + +Finally, the `XsltViewResolver` resolves the “home” XSLT template file and merges the +DOM document into it to generate our view. As shown in the `XsltViewResolver`configuration, XSLT templates live in the `war` file in the `WEB-INF/xsl` directory +and end with an `xslt` file extension. + +The following example shows an XSLT transform: + +``` + + + + + + + + Hello! + +

My First Words

+
    + +
+ + +
+ + +
  • +
    + +
    +``` + +The preceding transform is rendered as the following HTML: + +``` + + + + Hello! + + +

    My First Words

    +
      +
    • Hello
    • +
    • Spring
    • +
    • Framework
    • +
    + + +``` + +### 1.11. MVC Config + +[WebFlux](web-reactive.html#webflux-config) + +The MVC Java configuration and the MVC XML namespace provide default configuration +suitable for most applications and a configuration API to customize it. + +For more advanced customizations, which are not available in the configuration API, +see [Advanced Java Config](#mvc-config-advanced-java) and [Advanced XML Config](#mvc-config-advanced-xml). + +You do not need to understand the underlying beans created by the MVC Java configuration +and the MVC namespace. If you want to learn more, see [Special Bean Types](#mvc-servlet-special-bean-types)and [Web MVC Config](#mvc-servlet-config). + +#### 1.11.1. Enable MVC Configuration + +[WebFlux](web-reactive.html#webflux-config-enable) + +In Java configuration, you can use the `@EnableWebMvc` annotation to enable MVC +configuration, as the following example shows: + +Java + +``` +@Configuration +@EnableWebMvc +public class WebConfig { +} +``` + +Kotlin + +``` +@Configuration +@EnableWebMvc +class WebConfig +``` + +In XML configuration, you can use the `` element to enable MVC +configuration, as the following example shows: + +``` + + + + + + +``` + +The preceding example registers a number of Spring MVC[infrastructure beans](#mvc-servlet-special-bean-types) and adapts to dependencies +available on the classpath (for example, payload converters for JSON, XML, and others). + +#### 1.11.2. MVC Config API + +[WebFlux](web-reactive.html#webflux-config-customize) + +In Java configuration, you can implement the `WebMvcConfigurer` interface, as the +following example shows: + +Java + +``` +@Configuration +@EnableWebMvc +public class WebConfig implements WebMvcConfigurer { + + // Implement configuration methods... +} +``` + +Kotlin + +``` +@Configuration +@EnableWebMvc +class WebConfig : WebMvcConfigurer { + + // Implement configuration methods... +} +``` + +In XML, you can check attributes and sub-elements of ``. You can +view the [Spring MVC XML schema](https://schema.spring.io/mvc/spring-mvc.xsd) or use +the code completion feature of your IDE to discover what attributes and +sub-elements are available. + +#### 1.11.3. Type Conversion + +[WebFlux](web-reactive.html#webflux-config-conversion) + +By default, formatters for various number and date types are installed, along with support +for customization via `@NumberFormat` and `@DateTimeFormat` on fields. + +To register custom formatters and converters in Java config, use the following: + +Java + +``` +@Configuration +@EnableWebMvc +public class WebConfig implements WebMvcConfigurer { + + @Override + public void addFormatters(FormatterRegistry registry) { + // ... + } +} +``` + +Kotlin + +``` +@Configuration +@EnableWebMvc +class WebConfig : WebMvcConfigurer { + + override fun addFormatters(registry: FormatterRegistry) { + // ... + } +} +``` + +To do the same in XML config, use the following: + +``` + + + + + + + + + + + + + + + + + + + + + + + + + +``` + +By default Spring MVC considers the request Locale when parsing and formatting date +values. This works for forms where dates are represented as Strings with "input" form +fields. For "date" and "time" form fields, however, browsers use a fixed format defined +in the HTML spec. For such cases date and time formatting can be customized as follows: + +Java + +``` +@Configuration +@EnableWebMvc +public class WebConfig implements WebMvcConfigurer { + + @Override + public void addFormatters(FormatterRegistry registry) { + DateTimeFormatterRegistrar registrar = new DateTimeFormatterRegistrar(); + registrar.setUseIsoFormat(true); + registrar.registerFormatters(registry); + } +} +``` + +Kotlin + +``` +@Configuration +@EnableWebMvc +class WebConfig : WebMvcConfigurer { + + override fun addFormatters(registry: FormatterRegistry) { + val registrar = DateTimeFormatterRegistrar() + registrar.setUseIsoFormat(true) + registrar.registerFormatters(registry) + } +} +``` + +| |See [the `FormatterRegistrar` SPI](core.html#format-FormatterRegistrar-SPI)and the `FormattingConversionServiceFactoryBean` for more information on when to use
    FormatterRegistrar implementations.| +|---|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +#### 1.11.4. Validation + +[WebFlux](web-reactive.html#webflux-config-validation) + +By default, if [Bean Validation](core.html#validation-beanvalidation-overview) is present +on the classpath (for example, Hibernate Validator), the `LocalValidatorFactoryBean` is +registered as a global [Validator](core.html#validator) for use with `@Valid` and`Validated` on controller method arguments. + +In Java configuration, you can customize the global `Validator` instance, as the +following example shows: + +Java + +``` +@Configuration +@EnableWebMvc +public class WebConfig implements WebMvcConfigurer { + + @Override + public Validator getValidator() { + // ... + } +} +``` + +Kotlin + +``` +@Configuration +@EnableWebMvc +class WebConfig : WebMvcConfigurer { + + override fun getValidator(): Validator { + // ... + } +} +``` + +The following example shows how to achieve the same configuration in XML: + +``` + + + + + + +``` + +Note that you can also register `Validator` implementations locally, as the following +example shows: + +Java + +``` +@Controller +public class MyController { + + @InitBinder + protected void initBinder(WebDataBinder binder) { + binder.addValidators(new FooValidator()); + } +} +``` + +Kotlin + +``` +@Controller +class MyController { + + @InitBinder + protected fun initBinder(binder: WebDataBinder) { + binder.addValidators(FooValidator()) + } +} +``` + +| |If you need to have a `LocalValidatorFactoryBean` injected somewhere, create a bean and
    mark it with `@Primary` in order to avoid conflict with the one declared in the MVC configuration.| +|---|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +#### 1.11.5. Interceptors + +In Java configuration, you can register interceptors to apply to incoming requests, as +the following example shows: + +Java + +``` +@Configuration +@EnableWebMvc +public class WebConfig implements WebMvcConfigurer { + + @Override + public void addInterceptors(InterceptorRegistry registry) { + registry.addInterceptor(new LocaleChangeInterceptor()); + registry.addInterceptor(new ThemeChangeInterceptor()).addPathPatterns("/**").excludePathPatterns("/admin/**"); + registry.addInterceptor(new SecurityInterceptor()).addPathPatterns("/secure/*"); + } +} +``` + +Kotlin + +``` +@Configuration +@EnableWebMvc +class WebConfig : WebMvcConfigurer { + + override fun addInterceptors(registry: InterceptorRegistry) { + registry.addInterceptor(LocaleChangeInterceptor()) + registry.addInterceptor(ThemeChangeInterceptor()).addPathPatterns("/**").excludePathPatterns("/admin/**") + registry.addInterceptor(SecurityInterceptor()).addPathPatterns("/secure/*") + } +} +``` + +The following example shows how to achieve the same configuration in XML: + +``` + + + + + + + + + + + + +``` + +#### 1.11.6. Content Types + +[WebFlux](web-reactive.html#webflux-config-content-negotiation) + +You can configure how Spring MVC determines the requested media types from the request +(for example, `Accept` header, URL path extension, query parameter, and others). + +By default, only the `Accept` header is checked. + +If you must use URL-based content type resolution, consider using the query parameter +strategy over path extensions. See[Suffix Match](#mvc-ann-requestmapping-suffix-pattern-match) and [Suffix Match and RFD](#mvc-ann-requestmapping-rfd) for +more details. + +In Java configuration, you can customize requested content type resolution, as the +following example shows: + +Java + +``` +@Configuration +@EnableWebMvc +public class WebConfig implements WebMvcConfigurer { + + @Override + public void configureContentNegotiation(ContentNegotiationConfigurer configurer) { + configurer.mediaType("json", MediaType.APPLICATION_JSON); + configurer.mediaType("xml", MediaType.APPLICATION_XML); + } +} +``` + +Kotlin + +``` +@Configuration +@EnableWebMvc +class WebConfig : WebMvcConfigurer { + + override fun configureContentNegotiation(configurer: ContentNegotiationConfigurer) { + configurer.mediaType("json", MediaType.APPLICATION_JSON) + configurer.mediaType("xml", MediaType.APPLICATION_XML) + } +} +``` + +The following example shows how to achieve the same configuration in XML: + +``` + + + + + + json=application/json + xml=application/xml + + + +``` + +#### 1.11.7. Message Converters + +[WebFlux](web-reactive.html#webflux-config-message-codecs) + +You can customize `HttpMessageConverter` in Java configuration by overriding[`configureMessageConverters()`](https://docs.spring.io/spring-framework/docs/5.3.16/javadoc-api/org/springframework/web/servlet/config/annotation/WebMvcConfigurer.html#configureMessageConverters-java.util.List-)(to replace the default converters created by Spring MVC) or by overriding[`extendMessageConverters()`](https://docs.spring.io/spring-framework/docs/5.3.16/javadoc-api/org/springframework/web/servlet/config/annotation/WebMvcConfigurer.html#extendMessageConverters-java.util.List-)(to customize the default converters or add additional converters to the default ones). + +The following example adds XML and Jackson JSON converters with a customized`ObjectMapper` instead of the default ones: + +Java + +``` +@Configuration +@EnableWebMvc +public class WebConfiguration implements WebMvcConfigurer { + + @Override + public void configureMessageConverters(List> converters) { + Jackson2ObjectMapperBuilder builder = new Jackson2ObjectMapperBuilder() + .indentOutput(true) + .dateFormat(new SimpleDateFormat("yyyy-MM-dd")) + .modulesToInstall(new ParameterNamesModule()); + converters.add(new MappingJackson2HttpMessageConverter(builder.build())); + converters.add(new MappingJackson2XmlHttpMessageConverter(builder.createXmlMapper(true).build())); + } +} +``` + +Kotlin + +``` +@Configuration +@EnableWebMvc +class WebConfiguration : WebMvcConfigurer { + + override fun configureMessageConverters(converters: MutableList>) { + val builder = Jackson2ObjectMapperBuilder() + .indentOutput(true) + .dateFormat(SimpleDateFormat("yyyy-MM-dd")) + .modulesToInstall(ParameterNamesModule()) + converters.add(MappingJackson2HttpMessageConverter(builder.build())) + converters.add(MappingJackson2XmlHttpMessageConverter(builder.createXmlMapper(true).build())) +``` + +In the preceding example,[`Jackson2ObjectMapperBuilder`](https://docs.spring.io/spring-framework/docs/5.3.16/javadoc-api/org/springframework/http/converter/json/Jackson2ObjectMapperBuilder.html)is used to create a common configuration for both `MappingJackson2HttpMessageConverter` and`MappingJackson2XmlHttpMessageConverter` with indentation enabled, a customized date format, +and the registration of[`jackson-module-parameter-names`](https://github.com/FasterXML/jackson-module-parameter-names), +Which adds support for accessing parameter names (a feature added in Java 8). + +This builder customizes Jackson’s default properties as follows: + +* [`DeserializationFeature.FAIL_ON_UNKNOWN_PROPERTIES`](https://fasterxml.github.io/jackson-databind/javadoc/2.6/com/fasterxml/jackson/databind/DeserializationFeature.html#FAIL_ON_UNKNOWN_PROPERTIES) is disabled. + +* [`MapperFeature.DEFAULT_VIEW_INCLUSION`](https://fasterxml.github.io/jackson-databind/javadoc/2.6/com/fasterxml/jackson/databind/MapperFeature.html#DEFAULT_VIEW_INCLUSION) is disabled. + +It also automatically registers the following well-known modules if they are detected on the classpath: + +* [jackson-datatype-joda](https://github.com/FasterXML/jackson-datatype-joda): Support for Joda-Time types. + +* [jackson-datatype-jsr310](https://github.com/FasterXML/jackson-datatype-jsr310): Support for Java 8 Date and Time API types. + +* [jackson-datatype-jdk8](https://github.com/FasterXML/jackson-datatype-jdk8): Support for other Java 8 types, such as `Optional`. + +* [`jackson-module-kotlin`](https://github.com/FasterXML/jackson-module-kotlin): Support for Kotlin classes and data classes. + +| |Enabling indentation with Jackson XML support requires[`woodstox-core-asl`](https://search.maven.org/#search%7Cgav%7C1%7Cg%3A%22org.codehaus.woodstox%22%20AND%20a%3A%22woodstox-core-asl%22)dependency in addition to [`jackson-dataformat-xml`](https://search.maven.org/#search%7Cga%7C1%7Ca%3A%22jackson-dataformat-xml%22) one.| +|---|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +Other interesting Jackson modules are available: + +* [jackson-datatype-money](https://github.com/zalando/jackson-datatype-money): Support for `javax.money` types (unofficial module). + +* [jackson-datatype-hibernate](https://github.com/FasterXML/jackson-datatype-hibernate): Support for Hibernate-specific types and properties (including lazy-loading aspects). + +The following example shows how to achieve the same configuration in XML: + +``` + + + + + + + + + + + + + + +``` + +#### 1.11.8. View Controllers + +This is a shortcut for defining a `ParameterizableViewController` that immediately +forwards to a view when invoked. You can use it in static cases when there is no Java controller +logic to run before the view generates the response. + +The following example of Java configuration forwards a request for `/` to a view called `home`: + +Java + +``` +@Configuration +@EnableWebMvc +public class WebConfig implements WebMvcConfigurer { + + @Override + public void addViewControllers(ViewControllerRegistry registry) { + registry.addViewController("/").setViewName("home"); + } +} +``` + +Kotlin + +``` +@Configuration +@EnableWebMvc +class WebConfig : WebMvcConfigurer { + + override fun addViewControllers(registry: ViewControllerRegistry) { + registry.addViewController("/").setViewName("home") + } +} +``` + +The following example achieves the same thing as the preceding example, but with XML, by +using the `` element: + +``` + +``` + +If an `@RequestMapping` method is mapped to a URL for any HTTP method then a view +controller cannot be used to handle the same URL. This is because a match by URL to an +annotated controller is considered a strong enough indication of endpoint ownership so +that a 405 (METHOD\_NOT\_ALLOWED), a 415 (UNSUPPORTED\_MEDIA\_TYPE), or similar response can +be sent to the client to help with debugging. For this reason it is recommended to avoid +splitting URL handling across an annotated controller and a view controller. + +#### 1.11.9. View Resolvers + +[WebFlux](web-reactive.html#webflux-config-view-resolvers) + +The MVC configuration simplifies the registration of view resolvers. + +The following Java configuration example configures content negotiation view +resolution by using JSP and Jackson as a default `View` for JSON rendering: + +Java + +``` +@Configuration +@EnableWebMvc +public class WebConfig implements WebMvcConfigurer { + + @Override + public void configureViewResolvers(ViewResolverRegistry registry) { + registry.enableContentNegotiation(new MappingJackson2JsonView()); + registry.jsp(); + } +} +``` + +Kotlin + +``` +@Configuration +@EnableWebMvc +class WebConfig : WebMvcConfigurer { + + override fun configureViewResolvers(registry: ViewResolverRegistry) { + registry.enableContentNegotiation(MappingJackson2JsonView()) + registry.jsp() + } +} +``` + +The following example shows how to achieve the same configuration in XML: + +``` + + + + + + + + +``` + +Note, however, that FreeMarker, Tiles, Groovy Markup, and script templates also require +configuration of the underlying view technology. + +The MVC namespace provides dedicated elements. The following example works with FreeMarker: + +``` + + + + + + + + + + + + +``` + +In Java configuration, you can add the respective `Configurer` bean, +as the following example shows: + +Java + +``` +@Configuration +@EnableWebMvc +public class WebConfig implements WebMvcConfigurer { + + @Override + public void configureViewResolvers(ViewResolverRegistry registry) { + registry.enableContentNegotiation(new MappingJackson2JsonView()); + registry.freeMarker().cache(false); + } + + @Bean + public FreeMarkerConfigurer freeMarkerConfigurer() { + FreeMarkerConfigurer configurer = new FreeMarkerConfigurer(); + configurer.setTemplateLoaderPath("/freemarker"); + return configurer; + } +} +``` + +Kotlin + +``` +@Configuration +@EnableWebMvc +class WebConfig : WebMvcConfigurer { + + override fun configureViewResolvers(registry: ViewResolverRegistry) { + registry.enableContentNegotiation(MappingJackson2JsonView()) + registry.freeMarker().cache(false) + } + + @Bean + fun freeMarkerConfigurer() = FreeMarkerConfigurer().apply { + setTemplateLoaderPath("/freemarker") + } +} +``` + +#### 1.11.10. Static Resources + +[WebFlux](web-reactive.html#webflux-config-static-resources) + +This option provides a convenient way to serve static resources from a list of[`Resource`](https://docs.spring.io/spring-framework/docs/5.3.16/javadoc-api/org/springframework/core/io/Resource.html)-based locations. + +In the next example, given a request that starts with `/resources`, the relative path is +used to find and serve static resources relative to `/public` under the web application +root or on the classpath under `/static`. The resources are served with a one-year future +expiration to ensure maximum use of the browser cache and a reduction in HTTP requests +made by the browser. The `Last-Modified` information is deduced from `Resource#lastModified`so that HTTP conditional requests are supported with `"Last-Modified"` headers. + +The following listing shows how to do so with Java configuration: + +Java + +``` +@Configuration +@EnableWebMvc +public class WebConfig implements WebMvcConfigurer { + + @Override + public void addResourceHandlers(ResourceHandlerRegistry registry) { + registry.addResourceHandler("/resources/**") + .addResourceLocations("/public", "classpath:/static/") + .setCacheControl(CacheControl.maxAge(Duration.ofDays(365))); + } +} +``` + +Kotlin + +``` +@Configuration +@EnableWebMvc +class WebConfig : WebMvcConfigurer { + + override fun addResourceHandlers(registry: ResourceHandlerRegistry) { + registry.addResourceHandler("/resources/**") + .addResourceLocations("/public", "classpath:/static/") + .setCacheControl(CacheControl.maxAge(Duration.ofDays(365))) + } +} +``` + +The following example shows how to achieve the same configuration in XML: + +``` + +``` + +See also[HTTP caching support for static resources](#mvc-caching-static-resources). + +The resource handler also supports a chain of[`ResourceResolver`](https://docs.spring.io/spring-framework/docs/5.3.16/javadoc-api/org/springframework/web/servlet/resource/ResourceResolver.html) implementations and[`ResourceTransformer`](https://docs.spring.io/spring-framework/docs/5.3.16/javadoc-api/org/springframework/web/servlet/resource/ResourceTransformer.html) implementations, +which you can use to create a toolchain for working with optimized resources. + +You can use the `VersionResourceResolver` for versioned resource URLs based on an MD5 hash +computed from the content, a fixed application version, or other. A`ContentVersionStrategy` (MD5 hash) is a good choice — with some notable exceptions, such as +JavaScript resources used with a module loader. + +The following example shows how to use `VersionResourceResolver` in Java configuration: + +Java + +``` +@Configuration +@EnableWebMvc +public class WebConfig implements WebMvcConfigurer { + + @Override + public void addResourceHandlers(ResourceHandlerRegistry registry) { + registry.addResourceHandler("/resources/**") + .addResourceLocations("/public/") + .resourceChain(true) + .addResolver(new VersionResourceResolver().addContentVersionStrategy("/**")); + } +} +``` + +Kotlin + +``` +@Configuration +@EnableWebMvc +class WebConfig : WebMvcConfigurer { + + override fun addResourceHandlers(registry: ResourceHandlerRegistry) { + registry.addResourceHandler("/resources/**") + .addResourceLocations("/public/") + .resourceChain(true) + .addResolver(VersionResourceResolver().addContentVersionStrategy("/**")) + } +} +``` + +The following example shows how to achieve the same configuration in XML: + +``` + + + + + + + + + +``` + +You can then use `ResourceUrlProvider` to rewrite URLs and apply the full chain of resolvers and +transformers — for example, to insert versions. The MVC configuration provides a `ResourceUrlProvider`bean so that it can be injected into others. You can also make the rewrite transparent with the`ResourceUrlEncodingFilter` for Thymeleaf, JSPs, FreeMarker, and others with URL tags that +rely on `HttpServletResponse#encodeURL`. + +Note that, when using both `EncodedResourceResolver` (for example, for serving gzipped or +brotli-encoded resources) and `VersionResourceResolver`, you must register them in this order. +That ensures content-based versions are always computed reliably, based on the unencoded file. + +[WebJars](https://www.webjars.org/documentation) are also supported through the`WebJarsResourceResolver` which is automatically registered when the`org.webjars:webjars-locator-core` library is present on the classpath. The resolver can +re-write URLs to include the version of the jar and can also match against incoming URLs +without versions — for example, from `/jquery/jquery.min.js` to`/jquery/1.2.0/jquery.min.js`. + +| |The Java configuration based on `ResourceHandlerRegistry` provides further options
    for fine-grained control, e.g. last-modified behavior and optimized resource resolution.| +|---|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +#### 1.11.11. Default Servlet + +Spring MVC allows for mapping the `DispatcherServlet` to `/` (thus overriding the mapping +of the container’s default Servlet), while still allowing static resource requests to be +handled by the container’s default Servlet. It configures a`DefaultServletHttpRequestHandler` with a URL mapping of `/**` and the lowest priority +relative to other URL mappings. + +This handler forwards all requests to the default Servlet. Therefore, it must +remain last in the order of all other URL `HandlerMappings`. That is the +case if you use ``. Alternatively, if you set up your +own customized `HandlerMapping` instance, be sure to set its `order` property to a value +lower than that of the `DefaultServletHttpRequestHandler`, which is `Integer.MAX_VALUE`. + +The following example shows how to enable the feature by using the default setup: + +Java + +``` +@Configuration +@EnableWebMvc +public class WebConfig implements WebMvcConfigurer { + + @Override + public void configureDefaultServletHandling(DefaultServletHandlerConfigurer configurer) { + configurer.enable(); + } +} +``` + +Kotlin + +``` +@Configuration +@EnableWebMvc +class WebConfig : WebMvcConfigurer { + + override fun configureDefaultServletHandling(configurer: DefaultServletHandlerConfigurer) { + configurer.enable() + } +} +``` + +The following example shows how to achieve the same configuration in XML: + +``` + +``` + +The caveat to overriding the `/` Servlet mapping is that the `RequestDispatcher` for the +default Servlet must be retrieved by name rather than by path. The`DefaultServletHttpRequestHandler` tries to auto-detect the default Servlet for +the container at startup time, using a list of known names for most of the major Servlet +containers (including Tomcat, Jetty, GlassFish, JBoss, Resin, WebLogic, and WebSphere). +If the default Servlet has been custom-configured with a different name, or if a +different Servlet container is being used where the default Servlet name is unknown, +then you must explicitly provide the default Servlet’s name, as the following example shows: + +Java + +``` +@Configuration +@EnableWebMvc +public class WebConfig implements WebMvcConfigurer { + + @Override + public void configureDefaultServletHandling(DefaultServletHandlerConfigurer configurer) { + configurer.enable("myCustomDefaultServlet"); + } +} +``` + +Kotlin + +``` +@Configuration +@EnableWebMvc +class WebConfig : WebMvcConfigurer { + + override fun configureDefaultServletHandling(configurer: DefaultServletHandlerConfigurer) { + configurer.enable("myCustomDefaultServlet") + } +} +``` + +The following example shows how to achieve the same configuration in XML: + +``` + +``` + +#### 1.11.12. Path Matching + +[WebFlux](web-reactive.html#webflux-config-path-matching) + +You can customize options related to path matching and treatment of the URL. +For details on the individual options, see the[`PathMatchConfigurer`](https://docs.spring.io/spring-framework/docs/5.3.16/javadoc-api/org/springframework/web/servlet/config/annotation/PathMatchConfigurer.html) javadoc. + +The following example shows how to customize path matching in Java configuration: + +Java + +``` +@Configuration +@EnableWebMvc +public class WebConfig implements WebMvcConfigurer { + + @Override + public void configurePathMatch(PathMatchConfigurer configurer) { + configurer + .setPatternParser(new PathPatternParser()) + .addPathPrefix("/api", HandlerTypePredicate.forAnnotation(RestController.class)); + } + + private PathPatternParser patternParser() { + // ... + } +} +``` + +Kotlin + +``` +@Configuration +@EnableWebMvc +class WebConfig : WebMvcConfigurer { + + override fun configurePathMatch(configurer: PathMatchConfigurer) { + configurer + .setPatternParser(patternParser) + .addPathPrefix("/api", HandlerTypePredicate.forAnnotation(RestController::class.java)) + } + + fun patternParser(): PathPatternParser { + //... + } +} +``` + +The following example shows how to achieve the same configuration in XML: + +``` + + + + + + +``` + +#### 1.11.13. Advanced Java Config + +[WebFlux](web-reactive.html#webflux-config-advanced-java) + +`@EnableWebMvc` imports `DelegatingWebMvcConfiguration`, which: + +* Provides default Spring configuration for Spring MVC applications + +* Detects and delegates to `WebMvcConfigurer` implementations to customize that configuration. + +For advanced mode, you can remove `@EnableWebMvc` and extend directly from`DelegatingWebMvcConfiguration` instead of implementing `WebMvcConfigurer`, +as the following example shows: + +Java + +``` +@Configuration +public class WebConfig extends DelegatingWebMvcConfiguration { + + // ... +} +``` + +Kotlin + +``` +@Configuration +class WebConfig : DelegatingWebMvcConfiguration() { + + // ... +} +``` + +You can keep existing methods in `WebConfig`, but you can now also override bean declarations +from the base class, and you can still have any number of other `WebMvcConfigurer` implementations on +the classpath. + +#### 1.11.14. Advanced XML Config + +The MVC namespace does not have an advanced mode. If you need to customize a property on +a bean that you cannot change otherwise, you can use the `BeanPostProcessor` lifecycle +hook of the Spring `ApplicationContext`, as the following example shows: + +Java + +``` +@Component +public class MyPostProcessor implements BeanPostProcessor { + + public Object postProcessBeforeInitialization(Object bean, String name) throws BeansException { + // ... + } +} +``` + +Kotlin + +``` +@Component +class MyPostProcessor : BeanPostProcessor { + + override fun postProcessBeforeInitialization(bean: Any, name: String): Any { + // ... + } +} +``` + +Note that you need to declare `MyPostProcessor` as a bean, either explicitly in XML or +by letting it be detected through a `` declaration. + +### 1.12. HTTP/2 + +[WebFlux](web-reactive.html#webflux-http2) + +Servlet 4 containers are required to support HTTP/2, and Spring Framework 5 is compatible +with Servlet API 4. From a programming model perspective, there is nothing specific that +applications need to do. However, there are considerations related to server configuration. +For more details, see the[HTTP/2 wiki page](https://github.com/spring-projects/spring-framework/wiki/HTTP-2-support). + +The Servlet API does expose one construct related to HTTP/2. You can use the`javax.servlet.http.PushBuilder` to proactively push resources to clients, and it +is supported as a [method argument](#mvc-ann-arguments) to `@RequestMapping` methods. + +## 2. REST Clients + +This section describes options for client-side access to REST endpoints. + +### 2.1. `RestTemplate` + +`RestTemplate` is a synchronous client to perform HTTP requests. It is the original +Spring REST client and exposes a simple, template-method API over underlying HTTP client +libraries. + +| |As of 5.0 the `RestTemplate` is in maintenance mode, with only minor requests for
    changes and bugs to be accepted going forward. Please, consider using the[WebClient](web-reactive.html#webflux-client) which offers a more modern API and
    supports sync, async, and streaming scenarios.| +|---|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +See [REST Endpoints](integration.html#rest-client-access) for details. + +### 2.2. `WebClient` + +`WebClient` is a non-blocking, reactive client to perform HTTP requests. It was +introduced in 5.0 and offers a modern alternative to the `RestTemplate`, with efficient +support for both synchronous and asynchronous, as well as streaming scenarios. + +In contrast to `RestTemplate`, `WebClient` supports the following: + +* Non-blocking I/O. + +* Reactive Streams back pressure. + +* High concurrency with fewer hardware resources. + +* Functional-style, fluent API that takes advantage of Java 8 lambdas. + +* Synchronous and asynchronous interactions. + +* Streaming up to or streaming down from a server. + +See [WebClient](web-reactive.html#webflux-client) for more details. + +## 3. Testing + +[Same in Spring WebFlux](web-reactive.html#webflux-test) + +This section summarizes the options available in `spring-test` for Spring MVC applications. + +* Servlet API Mocks: Mock implementations of Servlet API contracts for unit testing controllers, + filters, and other web components. See [Servlet API](testing.html#mock-objects-servlet)mock objects for more details. + +* TestContext Framework: Support for loading Spring configuration in JUnit and TestNG tests, + including efficient caching of the loaded configuration across test methods and support for + loading a `WebApplicationContext` with a `MockServletContext`. + See [TestContext Framework](testing.html#testcontext-framework) for more details. + +* Spring MVC Test: A framework, also known as `MockMvc`, for testing annotated controllers + through the `DispatcherServlet` (that is, supporting annotations), complete with the + Spring MVC infrastructure but without an HTTP server. + See [Spring MVC Test](testing.html#spring-mvc-test-framework) for more details. + +* Client-side REST: `spring-test` provides a `MockRestServiceServer` that you can use as + a mock server for testing client-side code that internally uses the `RestTemplate`. + See [Client REST Tests](testing.html#spring-mvc-test-client) for more details. + +* `WebTestClient`: Built for testing WebFlux applications, but it can also be used for + end-to-end integration testing, to any server, over an HTTP connection. It is a + non-blocking, reactive client and is well suited for testing asynchronous and streaming + scenarios. + +## 4. WebSockets + +[WebFlux](web-reactive.html#webflux-websocket) + +This part of the reference documentation covers support for Servlet stack, WebSocket +messaging that includes raw WebSocket interactions, WebSocket emulation through SockJS, and +publish-subscribe messaging through STOMP as a sub-protocol over WebSocket. + +### 4.1. Introduction to WebSocket + +The WebSocket protocol, [RFC 6455](https://tools.ietf.org/html/rfc6455), provides a standardized +way to establish a full-duplex, two-way communication channel between client and server +over a single TCP connection. It is a different TCP protocol from HTTP but is designed to +work over HTTP, using ports 80 and 443 and allowing re-use of existing firewall rules. + +A WebSocket interaction begins with an HTTP request that uses the HTTP `Upgrade` header +to upgrade or, in this case, to switch to the WebSocket protocol. The following example +shows such an interaction: + +``` +GET /spring-websocket-portfolio/portfolio HTTP/1.1 +Host: localhost:8080 +Upgrade: websocket (1) +Connection: Upgrade (2) +Sec-WebSocket-Key: Uc9l9TMkWGbHFD2qnFHltg== +Sec-WebSocket-Protocol: v10.stomp, v11.stomp +Sec-WebSocket-Version: 13 +Origin: http://localhost:8080 +``` + +|**1**| The `Upgrade` header. | +|-----|-------------------------------| +|**2**|Using the `Upgrade` connection.| + +Instead of the usual 200 status code, a server with WebSocket support returns output +similar to the following: + +``` +HTTP/1.1 101 Switching Protocols (1) +Upgrade: websocket +Connection: Upgrade +Sec-WebSocket-Accept: 1qVdfYHU9hPOl4JYYNXF623Gzn0= +Sec-WebSocket-Protocol: v10.stomp +``` + +|**1**|Protocol switch| +|-----|---------------| + +After a successful handshake, the TCP socket underlying the HTTP upgrade request remains +open for both the client and the server to continue to send and receive messages. + +A complete introduction of how WebSockets work is beyond the scope of this document. +See RFC 6455, the WebSocket chapter of HTML5, or any of the many introductions and +tutorials on the Web. + +Note that, if a WebSocket server is running behind a web server (e.g. nginx), you +likely need to configure it to pass WebSocket upgrade requests on to the WebSocket +server. Likewise, if the application runs in a cloud environment, check the +instructions of the cloud provider related to WebSocket support. + +#### 4.1.1. HTTP Versus WebSocket + +Even though WebSocket is designed to be HTTP-compatible and starts with an HTTP request, +it is important to understand that the two protocols lead to very different +architectures and application programming models. + +In HTTP and REST, an application is modeled as many URLs. To interact with the application, +clients access those URLs, request-response style. Servers route requests to the +appropriate handler based on the HTTP URL, method, and headers. + +By contrast, in WebSockets, there is usually only one URL for the initial connect. +Subsequently, all application messages flow on that same TCP connection. This points to +an entirely different asynchronous, event-driven, messaging architecture. + +WebSocket is also a low-level transport protocol, which, unlike HTTP, does not prescribe +any semantics to the content of messages. That means that there is no way to route or process +a message unless the client and the server agree on message semantics. + +WebSocket clients and servers can negotiate the use of a higher-level, messaging protocol +(for example, STOMP), through the `Sec-WebSocket-Protocol` header on the HTTP handshake request. +In the absence of that, they need to come up with their own conventions. + +#### 4.1.2. When to Use WebSockets + +WebSockets can make a web page be dynamic and interactive. However, in many cases, +a combination of Ajax and HTTP streaming or long polling can provide a simple and +effective solution. + +For example, news, mail, and social feeds need to update dynamically, but it may be +perfectly okay to do so every few minutes. Collaboration, games, and financial apps, on +the other hand, need to be much closer to real-time. + +Latency alone is not a deciding factor. If the volume of messages is relatively low (for example, +monitoring network failures) HTTP streaming or polling can provide an effective solution. +It is the combination of low latency, high frequency, and high volume that make the best +case for the use of WebSocket. + +Keep in mind also that over the Internet, restrictive proxies that are outside of your control +may preclude WebSocket interactions, either because they are not configured to pass on the`Upgrade` header or because they close long-lived connections that appear idle. This +means that the use of WebSocket for internal applications within the firewall is a more +straightforward decision than it is for public facing applications. + +### 4.2. WebSocket API + +[WebFlux](web-reactive.html#webflux-websocket-server) + +The Spring Framework provides a WebSocket API that you can use to write client- and +server-side applications that handle WebSocket messages. + +#### 4.2.1. `WebSocketHandler` + +[WebFlux](web-reactive.html#webflux-websocket-server-handler) + +Creating a WebSocket server is as simple as implementing `WebSocketHandler` or, more +likely, extending either `TextWebSocketHandler` or `BinaryWebSocketHandler`. The following +example uses `TextWebSocketHandler`: + +``` +import org.springframework.web.socket.WebSocketHandler; +import org.springframework.web.socket.WebSocketSession; +import org.springframework.web.socket.TextMessage; + +public class MyHandler extends TextWebSocketHandler { + + @Override + public void handleTextMessage(WebSocketSession session, TextMessage message) { + // ... + } + +} +``` + +There is dedicated WebSocket Java configuration and XML namespace support for mapping the preceding +WebSocket handler to a specific URL, as the following example shows: + +``` +import org.springframework.web.socket.config.annotation.EnableWebSocket; +import org.springframework.web.socket.config.annotation.WebSocketConfigurer; +import org.springframework.web.socket.config.annotation.WebSocketHandlerRegistry; + +@Configuration +@EnableWebSocket +public class WebSocketConfig implements WebSocketConfigurer { + + @Override + public void registerWebSocketHandlers(WebSocketHandlerRegistry registry) { + registry.addHandler(myHandler(), "/myHandler"); + } + + @Bean + public WebSocketHandler myHandler() { + return new MyHandler(); + } + +} +``` + +The following example shows the XML configuration equivalent of the preceding example: + +``` + + + + + + + + + +``` + +The preceding example is for use in Spring MVC applications and should be included +in the configuration of a [`DispatcherServlet`](#mvc-servlet). However, Spring’s +WebSocket support does not depend on Spring MVC. It is relatively simple to +integrate a `WebSocketHandler` into other HTTP-serving environments with the help of[`WebSocketHttpRequestHandler`](https://docs.spring.io/spring-framework/docs/5.3.16/javadoc-api/org/springframework/web/socket/server/support/WebSocketHttpRequestHandler.html). + +When using the `WebSocketHandler` API directly vs indirectly, e.g. through the[STOMP](#websocket-stomp) messaging, the application must synchronize the sending of messages +since the underlying standard WebSocket session (JSR-356) does not allow concurrent +sending. One option is to wrap the `WebSocketSession` with[`ConcurrentWebSocketSessionDecorator`](https://docs.spring.io/spring-framework/docs/5.3.16/javadoc-api/org/springframework/web/socket/handler/ConcurrentWebSocketSessionDecorator.html). + +#### 4.2.2. WebSocket Handshake + +[WebFlux](web-reactive.html#webflux-websocket-server-handshake) + +The easiest way to customize the initial HTTP WebSocket handshake request is through +a `HandshakeInterceptor`, which exposes methods for “before” and “after” the handshake. +You can use such an interceptor to preclude the handshake or to make any attributes +available to the `WebSocketSession`. The following example uses a built-in interceptor +to pass HTTP session attributes to the WebSocket session: + +``` +@Configuration +@EnableWebSocket +public class WebSocketConfig implements WebSocketConfigurer { + + @Override + public void registerWebSocketHandlers(WebSocketHandlerRegistry registry) { + registry.addHandler(new MyHandler(), "/myHandler") + .addInterceptors(new HttpSessionHandshakeInterceptor()); + } + +} +``` + +The following example shows the XML configuration equivalent of the preceding example: + +``` + + + + + + + + + + + + +``` + +A more advanced option is to extend the `DefaultHandshakeHandler` that performs +the steps of the WebSocket handshake, including validating the client origin, +negotiating a sub-protocol, and other details. An application may also need to use this +option if it needs to configure a custom `RequestUpgradeStrategy` in order to +adapt to a WebSocket server engine and version that is not yet supported +(see [Deployment](#websocket-server-deployment) for more on this subject). +Both the Java configuration and XML namespace make it possible to configure a custom`HandshakeHandler`. + +| |Spring provides a `WebSocketHandlerDecorator` base class that you can use to decorate
    a `WebSocketHandler` with additional behavior. Logging and exception handling
    implementations are provided and added by default when using the WebSocket Java configuration
    or XML namespace. The `ExceptionWebSocketHandlerDecorator` catches all uncaught
    exceptions that arise from any `WebSocketHandler` method and closes the WebSocket
    session with status `1011`, which indicates a server error.| +|---|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +#### 4.2.3. Deployment + +The Spring WebSocket API is easy to integrate into a Spring MVC application where +the `DispatcherServlet` serves both HTTP WebSocket handshake and other +HTTP requests. It is also easy to integrate into other HTTP processing scenarios +by invoking `WebSocketHttpRequestHandler`. This is convenient and easy to +understand. However, special considerations apply with regards to JSR-356 runtimes. + +The Java WebSocket API (JSR-356) provides two deployment mechanisms. The first +involves a Servlet container classpath scan (a Servlet 3 feature) at startup. +The other is a registration API to use at Servlet container initialization. +Neither of these mechanism makes it possible to use a single “front controller” +for all HTTP processing — including WebSocket handshake and all other HTTP +requests — such as Spring MVC’s `DispatcherServlet`. + +This is a significant limitation of JSR-356 that Spring’s WebSocket support addresses with +server-specific `RequestUpgradeStrategy` implementations even when running in a JSR-356 runtime. +Such strategies currently exist for Tomcat, Jetty, GlassFish, WebLogic, WebSphere, and +Undertow (and WildFly). + +| |A request to overcome the preceding limitation in the Java WebSocket API has been
    created and can be followed at[eclipse-ee4j/websocket-api#211](https://github.com/eclipse-ee4j/websocket-api/issues/211).
    Tomcat, Undertow, and WebSphere provide their own API alternatives that
    make it possible to do this, and it is also possible with Jetty. We are hopeful
    that more servers will do the same.| +|---|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +A secondary consideration is that Servlet containers with JSR-356 support are expected +to perform a `ServletContainerInitializer` (SCI) scan that can slow down application +startup — in some cases, dramatically. If a significant impact is observed after an +upgrade to a Servlet container version with JSR-356 support, it should +be possible to selectively enable or disable web fragments (and SCI scanning) +through the use of the `` element in `web.xml`, as the following example shows: + +``` + + + + + +``` + +You can then selectively enable web fragments by name, such as Spring’s own`SpringServletContainerInitializer` that provides support for the Servlet 3 +Java initialization API. The following example shows how to do so: + +``` + + + + spring_web + + + +``` + +#### 4.2.4. Server Configuration + +[WebFlux](web-reactive.html#webflux-websocket-server-config) + +Each underlying WebSocket engine exposes configuration properties that control +runtime characteristics, such as the size of message buffer sizes, idle timeout, +and others. + +For Tomcat, WildFly, and GlassFish, you can add a `ServletServerContainerFactoryBean` to your +WebSocket Java config, as the following example shows: + +``` +@Configuration +@EnableWebSocket +public class WebSocketConfig implements WebSocketConfigurer { + + @Bean + public ServletServerContainerFactoryBean createWebSocketContainer() { + ServletServerContainerFactoryBean container = new ServletServerContainerFactoryBean(); + container.setMaxTextMessageBufferSize(8192); + container.setMaxBinaryMessageBufferSize(8192); + return container; + } + +} +``` + +The following example shows the XML configuration equivalent of the preceding example: + +``` + + + + + + + + +``` + +| |For client-side WebSocket configuration, you should use `WebSocketContainerFactoryBean`(XML) or `ContainerProvider.getWebSocketContainer()` (Java configuration).| +|---|-----------------------------------------------------------------------------------------------------------------------------------------------------------------| + +For Jetty, you need to supply a pre-configured Jetty `WebSocketServerFactory` and plug +that into Spring’s `DefaultHandshakeHandler` through your WebSocket Java config. +The following example shows how to do so: + +``` +@Configuration +@EnableWebSocket +public class WebSocketConfig implements WebSocketConfigurer { + + @Override + public void registerWebSocketHandlers(WebSocketHandlerRegistry registry) { + registry.addHandler(echoWebSocketHandler(), + "/echo").setHandshakeHandler(handshakeHandler()); + } + + @Bean + public DefaultHandshakeHandler handshakeHandler() { + + WebSocketPolicy policy = new WebSocketPolicy(WebSocketBehavior.SERVER); + policy.setInputBufferSize(8192); + policy.setIdleTimeout(600000); + + return new DefaultHandshakeHandler( + new JettyRequestUpgradeStrategy(new WebSocketServerFactory(policy))); + } + +} +``` + +The following example shows the XML configuration equivalent of the preceding example: + +``` + + + + + + + + + + + + + + + + + + + + + + + + + + +``` + +#### 4.2.5. Allowed Origins + +[WebFlux](web-reactive.html#webflux-websocket-server-cors) + +As of Spring Framework 4.1.5, the default behavior for WebSocket and SockJS is to accept +only same-origin requests. It is also possible to allow all or a specified list of origins. +This check is mostly designed for browser clients. Nothing prevents other types +of clients from modifying the `Origin` header value (see[RFC 6454: The Web Origin Concept](https://tools.ietf.org/html/rfc6454) for more details). + +The three possible behaviors are: + +* Allow only same-origin requests (default): In this mode, when SockJS is enabled, the + Iframe HTTP response header `X-Frame-Options` is set to `SAMEORIGIN`, and JSONP + transport is disabled, since it does not allow checking the origin of a request. + As a consequence, IE6 and IE7 are not supported when this mode is enabled. + +* Allow a specified list of origins: Each allowed origin must start with `http://`or `https://`. In this mode, when SockJS is enabled, IFrame transport is disabled. + As a consequence, IE6 through IE9 are not supported when this + mode is enabled. + +* Allow all origins: To enable this mode, you should provide `*` as the allowed origin + value. In this mode, all transports are available. + +You can configure WebSocket and SockJS allowed origins, as the following example shows: + +``` +import org.springframework.web.socket.config.annotation.EnableWebSocket; +import org.springframework.web.socket.config.annotation.WebSocketConfigurer; +import org.springframework.web.socket.config.annotation.WebSocketHandlerRegistry; + +@Configuration +@EnableWebSocket +public class WebSocketConfig implements WebSocketConfigurer { + + @Override + public void registerWebSocketHandlers(WebSocketHandlerRegistry registry) { + registry.addHandler(myHandler(), "/myHandler").setAllowedOrigins("https://mydomain.com"); + } + + @Bean + public WebSocketHandler myHandler() { + return new MyHandler(); + } + +} +``` + +The following example shows the XML configuration equivalent of the preceding example: + +``` + + + + + + + + + +``` + +### 4.3. SockJS Fallback + +Over the public Internet, restrictive proxies outside your control may preclude WebSocket +interactions, either because they are not configured to pass on the `Upgrade` header or +because they close long-lived connections that appear to be idle. + +The solution to this problem is WebSocket emulation — that is, attempting to use WebSocket +first and then falling back on HTTP-based techniques that emulate a WebSocket +interaction and expose the same application-level API. + +On the Servlet stack, the Spring Framework provides both server (and also client) support +for the SockJS protocol. + +#### 4.3.1. Overview + +The goal of SockJS is to let applications use a WebSocket API but fall back to +non-WebSocket alternatives when necessary at runtime, without the need to +change application code. + +SockJS consists of: + +* The [SockJS protocol](https://github.com/sockjs/sockjs-protocol)defined in the form of executable[narrated tests](https://sockjs.github.io/sockjs-protocol/sockjs-protocol-0.3.3.html). + +* The [SockJS JavaScript client](https://github.com/sockjs/sockjs-client/) — a client library for use in browsers. + +* SockJS server implementations, including one in the Spring Framework `spring-websocket` module. + +* A SockJS Java client in the `spring-websocket` module (since version 4.1). + +SockJS is designed for use in browsers. It uses a variety of techniques +to support a wide range of browser versions. +For the full list of SockJS transport types and browsers, see the[SockJS client](https://github.com/sockjs/sockjs-client/) page. Transports +fall in three general categories: WebSocket, HTTP Streaming, and HTTP Long Polling. +For an overview of these categories, see[this blog post](https://spring.io/blog/2012/05/08/spring-mvc-3-2-preview-techniques-for-real-time-updates/). + +The SockJS client begins by sending `GET /info` to +obtain basic information from the server. After that, it must decide what transport +to use. If possible, WebSocket is used. If not, in most browsers, +there is at least one HTTP streaming option. If not, then HTTP (long) +polling is used. + +All transport requests have the following URL structure: + +``` +https://host:port/myApp/myEndpoint/{server-id}/{session-id}/{transport} +``` + +where: + +* `{server-id}` is useful for routing requests in a cluster but is not used otherwise. + +* `{session-id}` correlates HTTP requests belonging to a SockJS session. + +* `{transport}` indicates the transport type (for example, `websocket`, `xhr-streaming`, and others). + +The WebSocket transport needs only a single HTTP request to do the WebSocket handshake. +All messages thereafter are exchanged on that socket. + +HTTP transports require more requests. Ajax/XHR streaming, for example, relies on +one long-running request for server-to-client messages and additional HTTP POST +requests for client-to-server messages. Long polling is similar, except that it +ends the current request after each server-to-client send. + +SockJS adds minimal message framing. For example, the server sends the letter `o`(“open” frame) initially, messages are sent as `a["message1","message2"]`(JSON-encoded array), the letter `h` (“heartbeat” frame) if no messages flow +for 25 seconds (by default), and the letter `c` (“close” frame) to close the session. + +To learn more, run an example in a browser and watch the HTTP requests. +The SockJS client allows fixing the list of transports, so it is possible to +see each transport one at a time. The SockJS client also provides a debug flag, +which enables helpful messages in the browser console. On the server side, you can enable`TRACE` logging for `org.springframework.web.socket`. +For even more detail, see the SockJS protocol[narrated test](https://sockjs.github.io/sockjs-protocol/sockjs-protocol-0.3.3.html). + +#### 4.3.2. Enabling SockJS + +You can enable SockJS through Java configuration, as the following example shows: + +``` +@Configuration +@EnableWebSocket +public class WebSocketConfig implements WebSocketConfigurer { + + @Override + public void registerWebSocketHandlers(WebSocketHandlerRegistry registry) { + registry.addHandler(myHandler(), "/myHandler").withSockJS(); + } + + @Bean + public WebSocketHandler myHandler() { + return new MyHandler(); + } + +} +``` + +The following example shows the XML configuration equivalent of the preceding example: + +``` + + + + + + + + + + +``` + +The preceding example is for use in Spring MVC applications and should be included in the +configuration of a [`DispatcherServlet`](#mvc-servlet). However, Spring’s WebSocket +and SockJS support does not depend on Spring MVC. It is relatively simple to +integrate into other HTTP serving environments with the help of[`SockJsHttpRequestHandler`](https://docs.spring.io/spring-framework/docs/5.3.16/javadoc-api/org/springframework/web/socket/sockjs/support/SockJsHttpRequestHandler.html). + +On the browser side, applications can use the[`sockjs-client`](https://github.com/sockjs/sockjs-client/) (version 1.0.x). It +emulates the W3C WebSocket API and communicates with the server to select the best +transport option, depending on the browser in which it runs. See the[sockjs-client](https://github.com/sockjs/sockjs-client/) page and the list of +transport types supported by browser. The client also provides several +configuration options — for example, to specify which transports to include. + +#### 4.3.3. IE 8 and 9 + +Internet Explorer 8 and 9 remain in use. They are +a key reason for having SockJS. This section covers important +considerations about running in those browsers. + +The SockJS client supports Ajax/XHR streaming in IE 8 and 9 by using Microsoft’s[`XDomainRequest`](https://blogs.msdn.com/b/ieinternals/archive/2010/05/13/xdomainrequest-restrictions-limitations-and-workarounds.aspx). +That works across domains but does not support sending cookies. +Cookies are often essential for Java applications. +However, since the SockJS client can be used with many server +types (not just Java ones), it needs to know whether cookies matter. +If so, the SockJS client prefers Ajax/XHR for streaming. Otherwise, it +relies on an iframe-based technique. + +The first `/info` request from the SockJS client is a request for +information that can influence the client’s choice of transports. +One of those details is whether the server application relies on cookies +(for example, for authentication purposes or clustering with sticky sessions). +Spring’s SockJS support includes a property called `sessionCookieNeeded`. +It is enabled by default, since most Java applications rely on the `JSESSIONID`cookie. If your application does not need it, you can turn off this option, +and SockJS client should then choose `xdr-streaming` in IE 8 and 9. + +If you do use an iframe-based transport, keep in mind +that browsers can be instructed to block the use of IFrames on a given page by +setting the HTTP response header `X-Frame-Options` to `DENY`,`SAMEORIGIN`, or `ALLOW-FROM `. This is used to prevent[clickjacking](https://www.owasp.org/index.php/Clickjacking). + +| |Spring Security 3.2+ provides support for setting `X-Frame-Options` on every
    response. By default, the Spring Security Java configuration sets it to `DENY`.
    In 3.2, the Spring Security XML namespace does not set that header by default
    but can be configured to do so. In the future, it may set it by default.

    See [Default Security Headers](https://docs.spring.io/spring-security/site/docs/current/reference/htmlsingle/#headers)of the Spring Security documentation for details on how to configure the
    setting of the `X-Frame-Options` header. You can also see[SEC-2501](https://jira.spring.io/browse/SEC-2501) for additional background.| +|---|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +If your application adds the `X-Frame-Options` response header (as it should!) +and relies on an iframe-based transport, you need to set the header value to`SAMEORIGIN` or `ALLOW-FROM `. The Spring SockJS +support also needs to know the location of the SockJS client, because it is loaded +from the iframe. By default, the iframe is set to download the SockJS client +from a CDN location. It is a good idea to configure this option to use +a URL from the same origin as the application. + +The following example shows how to do so in Java configuration: + +``` +@Configuration +@EnableWebSocketMessageBroker +public class WebSocketConfig implements WebSocketMessageBrokerConfigurer { + + @Override + public void registerStompEndpoints(StompEndpointRegistry registry) { + registry.addEndpoint("/portfolio").withSockJS() + .setClientLibraryUrl("http://localhost:8080/myapp/js/sockjs-client.js"); + } + + // ... + +} +``` + +The XML namespace provides a similar option through the `` element. + +| |During initial development, do enable the SockJS client `devel` mode that prevents
    the browser from caching SockJS requests (like the iframe) that would otherwise
    be cached. For details on how to enable it see the[SockJS client](https://github.com/sockjs/sockjs-client/) page.| +|---|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +#### 4.3.4. Heartbeats + +The SockJS protocol requires servers to send heartbeat messages to preclude proxies +from concluding that a connection is hung. The Spring SockJS configuration has a property +called `heartbeatTime` that you can use to customize the frequency. By default, a +heartbeat is sent after 25 seconds, assuming no other messages were sent on that +connection. This 25-second value is in line with the following[IETF recommendation](https://tools.ietf.org/html/rfc6202) for public Internet applications. + +| |When using STOMP over WebSocket and SockJS, if the STOMP client and server negotiate
    heartbeats to be exchanged, the SockJS heartbeats are disabled.| +|---|--------------------------------------------------------------------------------------------------------------------------------------------------------| + +The Spring SockJS support also lets you configure the `TaskScheduler` to +schedule heartbeats tasks. The task scheduler is backed by a thread pool, +with default settings based on the number of available processors. Your +should consider customizing the settings according to your specific needs. + +#### 4.3.5. Client Disconnects + +HTTP streaming and HTTP long polling SockJS transports require a connection to remain +open longer than usual. For an overview of these techniques, see[this blog post](https://spring.io/blog/2012/05/08/spring-mvc-3-2-preview-techniques-for-real-time-updates/). + +In Servlet containers, this is done through Servlet 3 asynchronous support that +allows exiting the Servlet container thread, processing a request, and continuing +to write to the response from another thread. + +A specific issue is that the Servlet API does not provide notifications for a client +that has gone away. See [eclipse-ee4j/servlet-api#44](https://github.com/eclipse-ee4j/servlet-api/issues/44). +However, Servlet containers raise an exception on subsequent attempts to write +to the response. Since Spring’s SockJS Service supports server-sent heartbeats (every +25 seconds by default), that means a client disconnect is usually detected within that +time period (or earlier, if messages are sent more frequently). + +| |As a result, network I/O failures can occur because a client has disconnected, which
    can fill the log with unnecessary stack traces. Spring makes a best effort to identify
    such network failures that represent client disconnects (specific to each server) and log
    a minimal message by using the dedicated log category, `DISCONNECTED_CLIENT_LOG_CATEGORY`(defined in `AbstractSockJsSession`). If you need to see the stack traces, you can set that
    log category to TRACE.| +|---|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +#### 4.3.6. SockJS and CORS + +If you allow cross-origin requests (see [Allowed Origins](#websocket-server-allowed-origins)), the SockJS protocol +uses CORS for cross-domain support in the XHR streaming and polling transports. Therefore, +CORS headers are added automatically, unless the presence of CORS headers in the response +is detected. So, if an application is already configured to provide CORS support (for example, +through a Servlet Filter), Spring’s `SockJsService` skips this part. + +It is also possible to disable the addition of these CORS headers by setting the`suppressCors` property in Spring’s SockJsService. + +SockJS expects the following headers and values: + +* `Access-Control-Allow-Origin`: Initialized from the value of the `Origin` request header. + +* `Access-Control-Allow-Credentials`: Always set to `true`. + +* `Access-Control-Request-Headers`: Initialized from values from the equivalent request header. + +* `Access-Control-Allow-Methods`: The HTTP methods a transport supports (see `TransportType` enum). + +* `Access-Control-Max-Age`: Set to 31536000 (1 year). + +For the exact implementation, see `addCorsHeaders` in `AbstractSockJsService` and +the `TransportType` enum in the source code. + +Alternatively, if the CORS configuration allows it, consider excluding URLs with the +SockJS endpoint prefix, thus letting Spring’s `SockJsService` handle it. + +#### 4.3.7. `SockJsClient` + +Spring provides a SockJS Java client to connect to remote SockJS endpoints without +using a browser. This can be especially useful when there is a need for bidirectional +communication between two servers over a public network (that is, where network proxies can +preclude the use of the WebSocket protocol). A SockJS Java client is also very useful +for testing purposes (for example, to simulate a large number of concurrent users). + +The SockJS Java client supports the `websocket`, `xhr-streaming`, and `xhr-polling`transports. The remaining ones only make sense for use in a browser. + +You can configure the `WebSocketTransport` with: + +* `StandardWebSocketClient` in a JSR-356 runtime. + +* `JettyWebSocketClient` by using the Jetty 9+ native WebSocket API. + +* Any implementation of Spring’s `WebSocketClient`. + +An `XhrTransport`, by definition, supports both `xhr-streaming` and `xhr-polling`, since, +from a client perspective, there is no difference other than in the URL used to connect +to the server. At present there are two implementations: + +* `RestTemplateXhrTransport` uses Spring’s `RestTemplate` for HTTP requests. + +* `JettyXhrTransport` uses Jetty’s `HttpClient` for HTTP requests. + +The following example shows how to create a SockJS client and connect to a SockJS endpoint: + +``` +List transports = new ArrayList<>(2); +transports.add(new WebSocketTransport(new StandardWebSocketClient())); +transports.add(new RestTemplateXhrTransport()); + +SockJsClient sockJsClient = new SockJsClient(transports); +sockJsClient.doHandshake(new MyWebSocketHandler(), "ws://example.com:8080/sockjs"); +``` + +| |SockJS uses JSON formatted arrays for messages. By default, Jackson 2 is used and needs
    to be on the classpath. Alternatively, you can configure a custom implementation of`SockJsMessageCodec` and configure it on the `SockJsClient`.| +|---|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +To use `SockJsClient` to simulate a large number of concurrent users, you +need to configure the underlying HTTP client (for XHR transports) to allow a sufficient +number of connections and threads. The following example shows how to do so with Jetty: + +``` +HttpClient jettyHttpClient = new HttpClient(); +jettyHttpClient.setMaxConnectionsPerDestination(1000); +jettyHttpClient.setExecutor(new QueuedThreadPool(1000)); +``` + +The following example shows the server-side SockJS-related properties (see javadoc for details) +that you should also consider customizing: + +``` +@Configuration +public class WebSocketConfig extends WebSocketMessageBrokerConfigurationSupport { + + @Override + public void registerStompEndpoints(StompEndpointRegistry registry) { + registry.addEndpoint("/sockjs").withSockJS() + .setStreamBytesLimit(512 * 1024) (1) + .setHttpMessageCacheSize(1000) (2) + .setDisconnectDelay(30 * 1000); (3) + } + + // ... +} +``` + +|**1**| Set the `streamBytesLimit` property to 512KB (the default is 128KB — `128 * 1024`). | +|-----|-----------------------------------------------------------------------------------------------------| +|**2**| Set the `httpMessageCacheSize` property to 1,000 (the default is `100`). | +|**3**|Set the `disconnectDelay` property to 30 property seconds (the default is five seconds — `5 * 1000`).| + +### 4.4. STOMP + +The WebSocket protocol defines two types of messages (text and binary), but their +content is undefined. The protocol defines a mechanism for client and server to negotiate a +sub-protocol (that is, a higher-level messaging protocol) to use on top of WebSocket to +define what kind of messages each can send, what the format is, the content of each +message, and so on. The use of a sub-protocol is optional but, either way, the client and +the server need to agree on some protocol that defines message content. + +#### 4.4.1. Overview + +[STOMP](https://stomp.github.io/stomp-specification-1.2.html#Abstract) (Simple +Text Oriented Messaging Protocol) was originally created for scripting languages +(such as Ruby, Python, and Perl) to connect to enterprise message brokers. It is +designed to address a minimal subset of commonly used messaging patterns. STOMP can be +used over any reliable two-way streaming network protocol, such as TCP and WebSocket. +Although STOMP is a text-oriented protocol, message payloads can be +either text or binary. + +STOMP is a frame-based protocol whose frames are modeled on HTTP. The following listing shows the structure +of a STOMP frame: + +``` +COMMAND +header1:value1 +header2:value2 + +Body^@ +``` + +Clients can use the `SEND` or `SUBSCRIBE` commands to send or subscribe for +messages, along with a `destination` header that describes what the +message is about and who should receive it. This enables a simple +publish-subscribe mechanism that you can use to send messages through the broker +to other connected clients or to send messages to the server to request that +some work be performed. + +When you use Spring’s STOMP support, the Spring WebSocket application acts +as the STOMP broker to clients. Messages are routed to `@Controller` message-handling +methods or to a simple in-memory broker that keeps track of subscriptions and +broadcasts messages to subscribed users. You can also configure Spring to work +with a dedicated STOMP broker (such as RabbitMQ, ActiveMQ, and others) for the actual +broadcasting of messages. In that case, Spring maintains +TCP connections to the broker, relays messages to it, and passes messages +from it down to connected WebSocket clients. Thus, Spring web applications can +rely on unified HTTP-based security, common validation, and a familiar programming +model for message handling. + +The following example shows a client subscribing to receive stock quotes, which +the server may emit periodically (for example, via a scheduled task that sends messages +through a `SimpMessagingTemplate` to the broker): + +``` +SUBSCRIBE +id:sub-1 +destination:/topic/price.stock.* + +^@ +``` + +The following example shows a client that sends a trade request, which the server +can handle through an `@MessageMapping` method: + +``` +SEND +destination:/queue/trade +content-type:application/json +content-length:44 + +{"action":"BUY","ticker":"MMM","shares",44}^@ +``` + +After the execution, the server can +broadcast a trade confirmation message and details down to the client. + +The meaning of a destination is intentionally left opaque in the STOMP spec. It can +be any string, and it is entirely up to STOMP servers to define the semantics and +the syntax of the destinations that they support. It is very common, however, for +destinations to be path-like strings where `/topic/..` implies publish-subscribe +(one-to-many) and `/queue/` implies point-to-point (one-to-one) message +exchanges. + +STOMP servers can use the `MESSAGE` command to broadcast messages to all subscribers. +The following example shows a server sending a stock quote to a subscribed client: + +``` +MESSAGE +message-id:nxahklf6-1 +subscription:sub-1 +destination:/topic/price.stock.MMM + +{"ticker":"MMM","price":129.45}^@ +``` + +A server cannot send unsolicited messages. All messages +from a server must be in response to a specific client subscription, and the`subscription-id` header of the server message must match the `id` header of the +client subscription. + +The preceding overview is intended to provide the most basic understanding of the +STOMP protocol. We recommended reviewing the protocol[specification](https://stomp.github.io/stomp-specification-1.2.html) in full. + +#### 4.4.2. Benefits + +Using STOMP as a sub-protocol lets the Spring Framework and Spring Security +provide a richer programming model versus using raw WebSockets. The same point can be +made about HTTP versus raw TCP and how it lets Spring MVC and other web frameworks +provide rich functionality. The following is a list of benefits: + +* No need to invent a custom messaging protocol and message format. + +* STOMP clients, including a [Java client](#websocket-stomp-client)in the Spring Framework, are available. + +* You can (optionally) use message brokers (such as RabbitMQ, ActiveMQ, and others) to + manage subscriptions and broadcast messages. + +* Application logic can be organized in any number of `@Controller` instances and messages can be + routed to them based on the STOMP destination header versus handling raw WebSocket messages + with a single `WebSocketHandler` for a given connection. + +* You can use Spring Security to secure messages based on STOMP destinations and message types. + +#### 4.4.3. Enable STOMP + +STOMP over WebSocket support is available in the `spring-messaging` and`spring-websocket` modules. Once you have those dependencies, you can expose a STOMP +endpoints, over WebSocket with [SockJS Fallback](#websocket-fallback), as the following example shows: + +``` +import org.springframework.web.socket.config.annotation.EnableWebSocketMessageBroker; +import org.springframework.web.socket.config.annotation.StompEndpointRegistry; + +@Configuration +@EnableWebSocketMessageBroker +public class WebSocketConfig implements WebSocketMessageBrokerConfigurer { + + @Override + public void registerStompEndpoints(StompEndpointRegistry registry) { + registry.addEndpoint("/portfolio").withSockJS(); (1) + } + + @Override + public void configureMessageBroker(MessageBrokerRegistry config) { + config.setApplicationDestinationPrefixes("/app"); (2) + config.enableSimpleBroker("/topic", "/queue"); (3) + } +} +``` + +|**1**| `/portfolio` is the HTTP URL for the endpoint to which a WebSocket (or SockJS)
    client needs to connect for the WebSocket handshake. | +|-----|------------------------------------------------------------------------------------------------------------------------------------------------------------------| +|**2**| STOMP messages whose destination header begins with `/app` are routed to`@MessageMapping` methods in `@Controller` classes. | +|**3**|Use the built-in message broker for subscriptions and broadcasting and
    route messages whose destination header begins with `/topic `or `/queue` to the broker.| + +The following example shows the XML configuration equivalent of the preceding example: + +``` + + + + + + + + + + +``` + +| |For the built-in simple broker, the `/topic` and `/queue` prefixes do not have any special
    meaning. They are merely a convention to differentiate between pub-sub versus point-to-point
    messaging (that is, many subscribers versus one consumer). When you use an external broker,
    check the STOMP page of the broker to understand what kind of STOMP destinations and
    prefixes it supports.| +|---|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +To connect from a browser, for SockJS, you can use the[`sockjs-client`](https://github.com/sockjs/sockjs-client). For STOMP, many applications have +used the [jmesnil/stomp-websocket](https://github.com/jmesnil/stomp-websocket) library +(also known as stomp.js), which is feature-complete and has been used in production for +years but is no longer maintained. At present the[JSteunou/webstomp-client](https://github.com/JSteunou/webstomp-client) is the most +actively maintained and evolving successor of that library. The following example code +is based on it: + +``` +var socket = new SockJS("/spring-websocket-portfolio/portfolio"); +var stompClient = webstomp.over(socket); + +stompClient.connect({}, function(frame) { +} +``` + +Alternatively, if you connect through WebSocket (without SockJS), you can use the following code: + +``` +var socket = new WebSocket("/spring-websocket-portfolio/portfolio"); +var stompClient = Stomp.over(socket); + +stompClient.connect({}, function(frame) { +} +``` + +Note that `stompClient` in the preceding example does not need to specify `login`and `passcode` headers. Even if it did, they would be ignored (or, rather, +overridden) on the server side. See [Connecting to a Broker](#websocket-stomp-handle-broker-relay-configure)and [Authentication](#websocket-stomp-authentication) for more information on authentication. + +For more example code see: + +* [Using WebSocket to build an + interactive web application](https://spring.io/guides/gs/messaging-stomp-websocket/) — a getting started guide. + +* [Stock Portfolio](https://github.com/rstoyanchev/spring-websocket-portfolio) — a sample + application. + +#### 4.4.4. WebSocket Server + +To configure the underlying WebSocket server, the information in[Server Configuration](#websocket-server-runtime-configuration) applies. For Jetty, however you need to set +the `HandshakeHandler` and `WebSocketPolicy` through the `StompEndpointRegistry`: + +``` +@Configuration +@EnableWebSocketMessageBroker +public class WebSocketConfig implements WebSocketMessageBrokerConfigurer { + + @Override + public void registerStompEndpoints(StompEndpointRegistry registry) { + registry.addEndpoint("/portfolio").setHandshakeHandler(handshakeHandler()); + } + + @Bean + public DefaultHandshakeHandler handshakeHandler() { + + WebSocketPolicy policy = new WebSocketPolicy(WebSocketBehavior.SERVER); + policy.setInputBufferSize(8192); + policy.setIdleTimeout(600000); + + return new DefaultHandshakeHandler( + new JettyRequestUpgradeStrategy(new WebSocketServerFactory(policy))); + } +} +``` + +#### 4.4.5. Flow of Messages + +Once a STOMP endpoint is exposed, the Spring application becomes a STOMP broker for +connected clients. This section describes the flow of messages on the server side. + +The `spring-messaging` module contains foundational support for messaging applications +that originated in [Spring Integration](https://spring.io/spring-integration) and was +later extracted and incorporated into the Spring Framework for broader use across many[Spring projects](https://spring.io/projects) and application scenarios. +The following list briefly describes a few of the available messaging abstractions: + +* [Message](https://docs.spring.io/spring-framework/docs/5.3.16/javadoc-api/org/springframework/messaging/Message.html): + Simple representation for a message, including headers and payload. + +* [MessageHandler](https://docs.spring.io/spring-framework/docs/5.3.16/javadoc-api/org/springframework/messaging/MessageHandler.html): + Contract for handling a message. + +* [MessageChannel](https://docs.spring.io/spring-framework/docs/5.3.16/javadoc-api/org/springframework/messaging/MessageChannel.html): + Contract for sending a message that enables loose coupling between producers and consumers. + +* [SubscribableChannel](https://docs.spring.io/spring-framework/docs/5.3.16/javadoc-api/org/springframework/messaging/SubscribableChannel.html):`MessageChannel` with `MessageHandler` subscribers. + +* [ExecutorSubscribableChannel](https://docs.spring.io/spring-framework/docs/5.3.16/javadoc-api/org/springframework/messaging/support/ExecutorSubscribableChannel.html):`SubscribableChannel` that uses an `Executor` for delivering messages. + +Both the Java configuration (that is, `@EnableWebSocketMessageBroker`) and the XML namespace configuration +(that is, ``) use the preceding components to assemble a message +workflow. The following diagram shows the components used when the simple built-in message +broker is enabled: + +![message flow simple broker](images/message-flow-simple-broker.png) + +The preceding diagram shows three message channels: + +* `clientInboundChannel`: For passing messages received from WebSocket clients. + +* `clientOutboundChannel`: For sending server messages to WebSocket clients. + +* `brokerChannel`: For sending messages to the message broker from within + server-side application code. + +The next diagram shows the components used when an external broker (such as RabbitMQ) +is configured for managing subscriptions and broadcasting messages: + +![message flow broker relay](images/message-flow-broker-relay.png) + +The main difference between the two preceding diagrams is the use of the “broker relay” for passing +messages up to the external STOMP broker over TCP and for passing messages down from the +broker to subscribed clients. + +When messages are received from a WebSocket connection, they are decoded to STOMP frames, +turned into a Spring `Message` representation, and sent to the`clientInboundChannel` for further processing. For example, STOMP messages whose +destination headers start with `/app` may be routed to `@MessageMapping` methods in +annotated controllers, while `/topic` and `/queue` messages may be routed directly +to the message broker. + +An annotated `@Controller` that handles a STOMP message from a client may send a message to +the message broker through the `brokerChannel`, and the broker broadcasts the +message to matching subscribers through the `clientOutboundChannel`. The same +controller can also do the same in response to HTTP requests, so a client can perform an +HTTP POST, and then a `@PostMapping` method can send a message to the message broker +to broadcast to subscribed clients. + +We can trace the flow through a simple example. Consider the following example, which sets up a server: + +``` +@Configuration +@EnableWebSocketMessageBroker +public class WebSocketConfig implements WebSocketMessageBrokerConfigurer { + + @Override + public void registerStompEndpoints(StompEndpointRegistry registry) { + registry.addEndpoint("/portfolio"); + } + + @Override + public void configureMessageBroker(MessageBrokerRegistry registry) { + registry.setApplicationDestinationPrefixes("/app"); + registry.enableSimpleBroker("/topic"); + } +} + +@Controller +public class GreetingController { + + @MessageMapping("/greeting") + public String handle(String greeting) { + return "[" + getTimestamp() + ": " + greeting; + } +} +``` + +The preceding example supports the following flow: + +1. The client connects to `[http://localhost:8080/portfolio](http://localhost:8080/portfolio)` and, once a WebSocket connection + is established, STOMP frames begin to flow on it. + +2. The client sends a SUBSCRIBE frame with a destination header of `/topic/greeting`. Once received + and decoded, the message is sent to the `clientInboundChannel` and is then routed to the + message broker, which stores the client subscription. + +3. The client sends a SEND frame to `/app/greeting`. The `/app` prefix helps to route it to + annotated controllers. After the `/app` prefix is stripped, the remaining `/greeting`part of the destination is mapped to the `@MessageMapping` method in `GreetingController`. + +4. The value returned from `GreetingController` is turned into a Spring `Message` with + a payload based on the return value and a default destination header of`/topic/greeting` (derived from the input destination with `/app` replaced by`/topic`). The resulting message is sent to the `brokerChannel` and handled + by the message broker. + +5. The message broker finds all matching subscribers and sends a MESSAGE frame to each one + through the `clientOutboundChannel`, from where messages are encoded as STOMP frames + and sent on the WebSocket connection. + +The next section provides more details on annotated methods, including the +kinds of arguments and return values that are supported. + +#### 4.4.6. Annotated Controllers + +Applications can use annotated `@Controller` classes to handle messages from clients. +Such classes can declare `@MessageMapping`, `@SubscribeMapping`, and `@ExceptionHandler`methods, as described in the following topics: + +* [`@MessageMapping`](#websocket-stomp-message-mapping) + +* [`@SubscribeMapping`](#websocket-stomp-subscribe-mapping) + +* [`@MessageExceptionHandler`](#websocket-stomp-exception-handler) + +##### `@MessageMapping` + +You can use `@MessageMapping` to annotate methods that route messages based on their +destination. It is supported at the method level as well as at the type level. At the type +level, `@MessageMapping` is used to express shared mappings across all methods in a +controller. + +By default, the mapping values are Ant-style path patterns (for example `/thing*`, `/thing/**`), +including support for template variables (for example, `/thing/{id}`). The values can be +referenced through `@DestinationVariable` method arguments. Applications can also switch to +a dot-separated destination convention for mappings, as explained in[Dots as Separators](#websocket-stomp-destination-separator). + +###### Supported Method Arguments + +The following table describes the method arguments: + +| Method argument | Description | +|-------------------------------------------------------------------------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| `Message` | For access to the complete message. | +| `MessageHeaders` | For access to the headers within the `Message`. | +|`MessageHeaderAccessor`, `SimpMessageHeaderAccessor`, and `StompHeaderAccessor`| For access to the headers through typed accessor methods. | +| `@Payload` |For access to the payload of the message, converted (for example, from JSON) by a configured`MessageConverter`.

    The presence of this annotation is not required since it is, by default, assumed if no
    other argument is matched.

    You can annotate payload arguments with `@javax.validation.Valid` or Spring’s `@Validated`,
    to have the payload arguments be automatically validated.| +| `@Header` | For access to a specific header value — along with type conversion using an`org.springframework.core.convert.converter.Converter`, if necessary. | +| `@Headers` | For access to all headers in the message. This argument must be assignable to`java.util.Map`. | +| `@DestinationVariable` | For access to template variables extracted from the message destination.
    Values are converted to the declared method argument type as necessary. | +| `java.security.Principal` | Reflects the user logged in at the time of the WebSocket HTTP handshake. | + +###### Return Values + +By default, the return value from a `@MessageMapping` method is serialized to a payload +through a matching `MessageConverter` and sent as a `Message` to the `brokerChannel`, +from where it is broadcast to subscribers. The destination of the outbound message is the +same as that of the inbound message but prefixed with `/topic`. + +You can use the `@SendTo` and `@SendToUser` annotations to customize the destination of +the output message. `@SendTo` is used to customize the target destination or to +specify multiple destinations. `@SendToUser` is used to direct the output message +to only the user associated with the input message. See [User Destinations](#websocket-stomp-user-destination). + +You can use both `@SendTo` and `@SendToUser` at the same time on the same method, and both +are supported at the class level, in which case they act as a default for methods in the +class. However, keep in mind that any method-level `@SendTo` or `@SendToUser` annotations +override any such annotations at the class level. + +Messages can be handled asynchronously and a `@MessageMapping` method can return`ListenableFuture`, `CompletableFuture`, or `CompletionStage`. + +Note that `@SendTo` and `@SendToUser` are merely a convenience that amounts to using the`SimpMessagingTemplate` to send messages. If necessary, for more advanced scenarios,`@MessageMapping` methods can fall back on using the `SimpMessagingTemplate` directly. +This can be done instead of, or possibly in addition to, returning a value. +See [Sending Messages](#websocket-stomp-handle-send). + +##### `@SubscribeMapping` + +`@SubscribeMapping` is similar to `@MessageMapping` but narrows the mapping to +subscription messages only. It supports the same[method arguments](#websocket-stomp-message-mapping) as `@MessageMapping`. However +for the return value, by default, a message is sent directly to the client (through`clientOutboundChannel`, in response to the subscription) and not to the broker (through`brokerChannel`, as a broadcast to matching subscriptions). Adding `@SendTo` or`@SendToUser` overrides this behavior and sends to the broker instead. + +When is this useful? Assume that the broker is mapped to `/topic` and `/queue`, while +application controllers are mapped to `/app`. In this setup, the broker stores all +subscriptions to `/topic` and `/queue` that are intended for repeated broadcasts, and +there is no need for the application to get involved. A client could also subscribe to +some `/app` destination, and a controller could return a value in response to that +subscription without involving the broker without storing or using the subscription again +(effectively a one-time request-reply exchange). One use case for this is populating a UI +with initial data on startup. + +When is this not useful? Do not try to map broker and controllers to the same destination +prefix unless you want both to independently process messages, including subscriptions, +for some reason. Inbound messages are handled in parallel. There are no guarantees whether +a broker or a controller processes a given message first. If the goal is to be notified +when a subscription is stored and ready for broadcasts, a client should ask for a +receipt if the server supports it (simple broker does not). For example, with the Java[STOMP client](#websocket-stomp-client), you could do the following to add a receipt: + +``` +@Autowired +private TaskScheduler messageBrokerTaskScheduler; + +// During initialization.. +stompClient.setTaskScheduler(this.messageBrokerTaskScheduler); + +// When subscribing.. +StompHeaders headers = new StompHeaders(); +headers.setDestination("/topic/..."); +headers.setReceipt("r1"); +FrameHandler handler = ...; +stompSession.subscribe(headers, handler).addReceiptTask(() -> { + // Subscription ready... +}); +``` + +A server side option is [to register](#websocket-stomp-interceptors) an`ExecutorChannelInterceptor` on the `brokerChannel` and implement the `afterMessageHandled`method that is invoked after messages, including subscriptions, have been handled. + +##### `@MessageExceptionHandler` + +An application can use `@MessageExceptionHandler` methods to handle exceptions from`@MessageMapping` methods. You can declare exceptions in the annotation +itself or through a method argument if you want to get access to the exception instance. +The following example declares an exception through a method argument: + +``` +@Controller +public class MyController { + + // ... + + @MessageExceptionHandler + public ApplicationError handleException(MyException exception) { + // ... + return appError; + } +} +``` + +`@MessageExceptionHandler` methods support flexible method signatures and support +the same method argument types and return values as[`@MessageMapping`](#websocket-stomp-message-mapping) methods. + +Typically, `@MessageExceptionHandler` methods apply within the `@Controller` class +(or class hierarchy) in which they are declared. If you want such methods to apply +more globally (across controllers), you can declare them in a class marked with`@ControllerAdvice`. This is comparable to the[similar support](#mvc-ann-controller-advice) available in Spring MVC. + +#### 4.4.7. Sending Messages + +What if you want to send messages to connected clients from any part of the +application? Any application component can send messages to the `brokerChannel`. +The easiest way to do so is to inject a `SimpMessagingTemplate` and +use it to send messages. Typically, you would inject it by +type, as the following example shows: + +``` +@Controller +public class GreetingController { + + private SimpMessagingTemplate template; + + @Autowired + public GreetingController(SimpMessagingTemplate template) { + this.template = template; + } + + @RequestMapping(path="/greetings", method=POST) + public void greet(String greeting) { + String text = "[" + getTimestamp() + "]:" + greeting; + this.template.convertAndSend("/topic/greetings", text); + } + +} +``` + +However, you can also qualify it by its name (`brokerMessagingTemplate`), if another +bean of the same type exists. + +#### 4.4.8. Simple Broker + +The built-in simple message broker handles subscription requests from clients, +stores them in memory, and broadcasts messages to connected clients that have matching +destinations. The broker supports path-like destinations, including subscriptions +to Ant-style destination patterns. + +| |Applications can also use dot-separated (rather than slash-separated) destinations.
    See [Dots as Separators](#websocket-stomp-destination-separator).| +|---|---------------------------------------------------------------------------------------------------------------------------------------------------------| + +If configured with a task scheduler, the simple broker supports[STOMP heartbeats](https://stomp.github.io/stomp-specification-1.2.html#Heart-beating). +To configure a scheduler, you can declare your own `TaskScheduler` bean and set it through +the `MessageBrokerRegistry`. Alternatively, you can use the one that is automatically +declared in the built-in WebSocket configuration, however, you’ll' need `@Lazy` to avoid +a cycle between the built-in WebSocket configuration and your`WebSocketMessageBrokerConfigurer`. For example: + +``` +@Configuration +@EnableWebSocketMessageBroker +public class WebSocketConfig implements WebSocketMessageBrokerConfigurer { + + private TaskScheduler messageBrokerTaskScheduler; + + @Autowired + public void setMessageBrokerTaskScheduler(@Lazy TaskScheduler taskScheduler) { + this.messageBrokerTaskScheduler = taskScheduler; + } + + @Override + public void configureMessageBroker(MessageBrokerRegistry registry) { + registry.enableSimpleBroker("/queue/", "/topic/") + .setHeartbeatValue(new long[] {10000, 20000}) + .setTaskScheduler(this.messageBrokerTaskScheduler); + + // ... + } +} +``` + +#### 4.4.9. External Broker + +The simple broker is great for getting started but supports only a subset of +STOMP commands (it does not support acks, receipts, and some other features), +relies on a simple message-sending loop, and is not suitable for clustering. +As an alternative, you can upgrade your applications to use a full-featured +message broker. + +See the STOMP documentation for your message broker of choice (such as[RabbitMQ](https://www.rabbitmq.com/stomp.html),[ActiveMQ](https://activemq.apache.org/stomp.html), and others), install the broker, +and run it with STOMP support enabled. Then you can enable the STOMP broker relay +(instead of the simple broker) in the Spring configuration. + +The following example configuration enables a full-featured broker: + +``` +@Configuration +@EnableWebSocketMessageBroker +public class WebSocketConfig implements WebSocketMessageBrokerConfigurer { + + @Override + public void registerStompEndpoints(StompEndpointRegistry registry) { + registry.addEndpoint("/portfolio").withSockJS(); + } + + @Override + public void configureMessageBroker(MessageBrokerRegistry registry) { + registry.enableStompBrokerRelay("/topic", "/queue"); + registry.setApplicationDestinationPrefixes("/app"); + } + +} +``` + +The following example shows the XML configuration equivalent of the preceding example: + +``` + + + + + + + + + + +``` + +The STOMP broker relay in the preceding configuration is a Spring[`MessageHandler`](https://docs.spring.io/spring-framework/docs/5.3.16/javadoc-api/org/springframework/messaging/MessageHandler.html)that handles messages by forwarding them to an external message broker. +To do so, it establishes TCP connections to the broker, forwards all messages to it, +and then forwards all messages received from the broker to clients through their +WebSocket sessions. Essentially, it acts as a “relay” that forwards messages +in both directions. + +| |Add `io.projectreactor.netty:reactor-netty` and `io.netty:netty-all`dependencies to your project for TCP connection management.| +|---|-------------------------------------------------------------------------------------------------------------------------------| + +Furthermore, application components (such as HTTP request handling methods, +business services, and others) can also send messages to the broker relay, as described +in [Sending Messages](#websocket-stomp-handle-send), to broadcast messages to subscribed WebSocket clients. + +In effect, the broker relay enables robust and scalable message broadcasting. + +#### 4.4.10. Connecting to a Broker + +A STOMP broker relay maintains a single “system” TCP connection to the broker. +This connection is used for messages originating from the server-side application +only, not for receiving messages. You can configure the STOMP credentials (that is, +the STOMP frame `login` and `passcode` headers) for this connection. This is exposed +in both the XML namespace and Java configuration as the `systemLogin` and`systemPasscode` properties with default values of `guest` and `guest`. + +The STOMP broker relay also creates a separate TCP connection for every connected +WebSocket client. You can configure the STOMP credentials that are used for all TCP +connections created on behalf of clients. This is exposed in both the XML namespace +and Java configuration as the `clientLogin` and `clientPasscode` properties with default +values of `guest` and `guest`. + +| |The STOMP broker relay always sets the `login` and `passcode` headers on every `CONNECT`frame that it forwards to the broker on behalf of clients. Therefore, WebSocket clients
    need not set those headers. They are ignored. As the [Authentication](#websocket-stomp-authentication)section explains, WebSocket clients should instead rely on HTTP authentication to protect
    the WebSocket endpoint and establish the client identity.| +|---|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +The STOMP broker relay also sends and receives heartbeats to and from the message +broker over the “system” TCP connection. You can configure the intervals for sending +and receiving heartbeats (10 seconds each by default). If connectivity to the broker +is lost, the broker relay continues to try to reconnect, every 5 seconds, +until it succeeds. + +Any Spring bean can implement `ApplicationListener`to receive notifications when the “system” connection to the broker is lost and +re-established. For example, a Stock Quote service that broadcasts stock quotes can +stop trying to send messages when there is no active “system” connection. + +By default, the STOMP broker relay always connects, and reconnects as needed if +connectivity is lost, to the same host and port. If you wish to supply multiple addresses, +on each attempt to connect, you can configure a supplier of addresses, instead of a +fixed host and port. The following example shows how to do that: + +``` +@Configuration +@EnableWebSocketMessageBroker +public class WebSocketConfig extends AbstractWebSocketMessageBrokerConfigurer { + + // ... + + @Override + public void configureMessageBroker(MessageBrokerRegistry registry) { + registry.enableStompBrokerRelay("/queue/", "/topic/").setTcpClient(createTcpClient()); + registry.setApplicationDestinationPrefixes("/app"); + } + + private ReactorNettyTcpClient createTcpClient() { + return new ReactorNettyTcpClient<>( + client -> client.addressSupplier(() -> ... ), + new StompReactorNettyCodec()); + } +} +``` + +You can also configure the STOMP broker relay with a `virtualHost` property. +The value of this property is set as the `host` header of every `CONNECT` frame +and can be useful (for example, in a cloud environment where the actual host to which +the TCP connection is established differs from the host that provides the +cloud-based STOMP service). + +#### 4.4.11. Dots as Separators + +When messages are routed to `@MessageMapping` methods, they are matched with`AntPathMatcher`. By default, patterns are expected to use slash (`/`) as the separator. +This is a good convention in web applications and similar to HTTP URLs. However, if +you are more used to messaging conventions, you can switch to using dot (`.`) as the separator. + +The following example shows how to do so in Java configuration: + +``` +@Configuration +@EnableWebSocketMessageBroker +public class WebSocketConfig implements WebSocketMessageBrokerConfigurer { + + // ... + + @Override + public void configureMessageBroker(MessageBrokerRegistry registry) { + registry.setPathMatcher(new AntPathMatcher(".")); + registry.enableStompBrokerRelay("/queue", "/topic"); + registry.setApplicationDestinationPrefixes("/app"); + } +} +``` + +The following example shows the XML configuration equivalent of the preceding example: + +``` + + + + + + + + + + + + +``` + +After that, a controller can use a dot (`.`) as the separator in `@MessageMapping` methods, +as the following example shows: + +``` +@Controller +@MessageMapping("red") +public class RedController { + + @MessageMapping("blue.{green}") + public void handleGreen(@DestinationVariable String green) { + // ... + } +} +``` + +The client can now send a message to `/app/red.blue.green123`. + +In the preceding example, we did not change the prefixes on the “broker relay”, because those +depend entirely on the external message broker. See the STOMP documentation pages for +the broker you use to see what conventions it supports for the destination header. + +The “simple broker”, on the other hand, does rely on the configured `PathMatcher`, so, if +you switch the separator, that change also applies to the broker and the way the broker matches +destinations from a message to patterns in subscriptions. + +#### 4.4.12. Authentication + +Every STOMP over WebSocket messaging session begins with an HTTP request. +That can be a request to upgrade to WebSockets (that is, a WebSocket handshake) +or, in the case of SockJS fallbacks, a series of SockJS HTTP transport requests. + +Many web applications already have authentication and authorization in place to +secure HTTP requests. Typically, a user is authenticated through Spring Security +by using some mechanism such as a login page, HTTP basic authentication, or another way. +The security context for the authenticated user is saved in the HTTP session +and is associated with subsequent requests in the same cookie-based session. + +Therefore, for a WebSocket handshake or for SockJS HTTP transport requests, +typically, there is already an authenticated user accessible through`HttpServletRequest#getUserPrincipal()`. Spring automatically associates that user +with a WebSocket or SockJS session created for them and, subsequently, with all +STOMP messages transported over that session through a user header. + +In short, a typical web application needs to do nothing +beyond what it already does for security. The user is authenticated at +the HTTP request level with a security context that is maintained through a cookie-based +HTTP session (which is then associated with WebSocket or SockJS sessions created +for that user) and results in a user header being stamped on every `Message` flowing +through the application. + +The STOMP protocol does have `login` and `passcode` headers on the `CONNECT` frame. +Those were originally designed for and are needed for STOMP over TCP. However, for STOMP +over WebSocket, by default, Spring ignores authentication headers at the STOMP protocol +level, and assumes that the user is already authenticated at the HTTP transport level. +The expectation is that the WebSocket or SockJS session contain the authenticated user. + +#### 4.4.13. Token Authentication + +[Spring Security OAuth](https://github.com/spring-projects/spring-security-oauth)provides support for token based security, including JSON Web Token (JWT). +You can use this as the authentication mechanism in Web applications, +including STOMP over WebSocket interactions, as described in the previous +section (that is, to maintain identity through a cookie-based session). + +At the same time, cookie-based sessions are not always the best fit (for example, +in applications that do not maintain a server-side session or in +mobile applications where it is common to use headers for authentication). + +The [WebSocket protocol, RFC 6455](https://tools.ietf.org/html/rfc6455#section-10.5)"doesn’t prescribe any particular way that servers can authenticate clients during +the WebSocket handshake." In practice, however, browser clients can use only standard +authentication headers (that is, basic HTTP authentication) or cookies and cannot (for example) +provide custom headers. Likewise, the SockJS JavaScript client does not provide +a way to send HTTP headers with SockJS transport requests. See[sockjs-client issue 196](https://github.com/sockjs/sockjs-client/issues/196). +Instead, it does allow sending query parameters that you can use to send a token, +but that has its own drawbacks (for example, the token may be inadvertently +logged with the URL in server logs). + +| |The preceding limitations are for browser-based clients and do not apply to the
    Spring Java-based STOMP client, which does support sending headers with both
    WebSocket and SockJS requests.| +|---|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +Therefore, applications that wish to avoid the use of cookies may not have any good +alternatives for authentication at the HTTP protocol level. Instead of using cookies, +they may prefer to authenticate with headers at the STOMP messaging protocol level. +Doing so requires two simple steps: + +1. Use the STOMP client to pass authentication headers at connect time. + +2. Process the authentication headers with a `ChannelInterceptor`. + +The next example uses server-side configuration to register a custom authentication +interceptor. Note that an interceptor needs only to authenticate and set +the user header on the CONNECT `Message`. Spring notes and saves the authenticated +user and associate it with subsequent STOMP messages on the same session. The following +example shows how register a custom authentication interceptor: + +``` +@Configuration +@EnableWebSocketMessageBroker +public class MyConfig implements WebSocketMessageBrokerConfigurer { + + @Override + public void configureClientInboundChannel(ChannelRegistration registration) { + registration.interceptors(new ChannelInterceptor() { + @Override + public Message preSend(Message message, MessageChannel channel) { + StompHeaderAccessor accessor = + MessageHeaderAccessor.getAccessor(message, StompHeaderAccessor.class); + if (StompCommand.CONNECT.equals(accessor.getCommand())) { + Authentication user = ... ; // access authentication header(s) + accessor.setUser(user); + } + return message; + } + }); + } +} +``` + +Also, note that, when you use Spring Security’s authorization for messages, at present, +you need to ensure that the authentication `ChannelInterceptor` config is ordered +ahead of Spring Security’s. This is best done by declaring the custom interceptor in +its own implementation of `WebSocketMessageBrokerConfigurer` that is marked with`@Order(Ordered.HIGHEST_PRECEDENCE + 99)`. + +#### 4.4.14. Authorization + +Spring Security provides[WebSocket sub-protocol authorization](https://docs.spring.io/spring-security/reference/servlet/integrations/websocket.html#websocket-authorization)that uses a `ChannelInterceptor` to authorize messages based on the user header in them. +Also, Spring Session provides[WebSocket integration](https://docs.spring.io/spring-session/reference/web-socket.html)that ensures the user’s HTTP session does not expire while the WebSocket session is still active. + +#### 4.4.15. User Destinations + +An application can send messages that target a specific user, and Spring’s STOMP support +recognizes destinations prefixed with `/user/` for this purpose. +For example, a client might subscribe to the `/user/queue/position-updates` destination.`UserDestinationMessageHandler` handles this destination and transforms it into a +destination unique to the user session (such as `/queue/position-updates-user123`). +This provides the convenience of subscribing to a generically named destination while, +at the same time, ensuring no collisions with other users who subscribe to the same +destination so that each user can receive unique stock position updates. + +| |When working with user destinations, it is important to configure broker and
    application destination prefixes as shown in [Enable STOMP](#websocket-stomp-enable), or otherwise the
    broker would handle "/user" prefixed messages that should only be handled by`UserDestinationMessageHandler`.| +|---|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +On the sending side, messages can be sent to a destination such as`/user/{username}/queue/position-updates`, which in turn is translated +by the `UserDestinationMessageHandler` into one or more destinations, one for each +session associated with the user. This lets any component within the application +send messages that target a specific user without necessarily knowing anything more +than their name and the generic destination. This is also supported through an +annotation and a messaging template. + +A message-handling method can send messages to the user associated with +the message being handled through the `@SendToUser` annotation (also supported on +the class-level to share a common destination), as the following example shows: + +``` +@Controller +public class PortfolioController { + + @MessageMapping("/trade") + @SendToUser("/queue/position-updates") + public TradeResult executeTrade(Trade trade, Principal principal) { + // ... + return tradeResult; + } +} +``` + +If the user has more than one session, by default, all of the sessions subscribed +to the given destination are targeted. However, sometimes, it may be necessary to +target only the session that sent the message being handled. You can do so by +setting the `broadcast` attribute to false, as the following example shows: + +``` +@Controller +public class MyController { + + @MessageMapping("/action") + public void handleAction() throws Exception{ + // raise MyBusinessException here + } + + @MessageExceptionHandler + @SendToUser(destinations="/queue/errors", broadcast=false) + public ApplicationError handleException(MyBusinessException exception) { + // ... + return appError; + } +} +``` + +| |While user destinations generally imply an authenticated user, it is not strictly required.
    A WebSocket session that is not associated with an authenticated user
    can subscribe to a user destination. In such cases, the `@SendToUser` annotation
    behaves exactly the same as with `broadcast=false` (that is, targeting only the
    session that sent the message being handled).| +|---|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +You can send a message to user destinations from any application +component by, for example, injecting the `SimpMessagingTemplate` created by the Java configuration or +the XML namespace. (The bean name is `brokerMessagingTemplate` if required +for qualification with `@Qualifier`.) The following example shows how to do so: + +``` +@Service +public class TradeServiceImpl implements TradeService { + + private final SimpMessagingTemplate messagingTemplate; + + @Autowired + public TradeServiceImpl(SimpMessagingTemplate messagingTemplate) { + this.messagingTemplate = messagingTemplate; + } + + // ... + + public void afterTradeExecuted(Trade trade) { + this.messagingTemplate.convertAndSendToUser( + trade.getUserName(), "/queue/position-updates", trade.getResult()); + } +} +``` + +| |When you use user destinations with an external message broker, you should check the broker
    documentation on how to manage inactive queues, so that, when the user session is
    over, all unique user queues are removed. For example, RabbitMQ creates auto-delete
    queues when you use destinations such as `/exchange/amq.direct/position-updates`.
    So, in that case, the client could subscribe to `/user/exchange/amq.direct/position-updates`.
    Similarly, ActiveMQ has[configuration options](https://activemq.apache.org/delete-inactive-destinations.html)for purging inactive destinations.| +|---|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +In a multi-application server scenario, a user destination may remain unresolved because +the user is connected to a different server. In such cases, you can configure a +destination to broadcast unresolved messages so that other servers have a chance to try. +This can be done through the `userDestinationBroadcast` property of the`MessageBrokerRegistry` in Java configuration and the `user-destination-broadcast` attribute +of the `message-broker` element in XML. + +#### 4.4.16. Order of Messages + +Messages from the broker are published to the `clientOutboundChannel`, from where they are +written to WebSocket sessions. As the channel is backed by a `ThreadPoolExecutor`, messages +are processed in different threads, and the resulting sequence received by the client may +not match the exact order of publication. + +If this is an issue, enable the `setPreservePublishOrder` flag, as the following example shows: + +``` +@Configuration +@EnableWebSocketMessageBroker +public class MyConfig implements WebSocketMessageBrokerConfigurer { + + @Override + protected void configureMessageBroker(MessageBrokerRegistry registry) { + // ... + registry.setPreservePublishOrder(true); + } + +} +``` + +The following example shows the XML configuration equivalent of the preceding example: + +``` + + + + + + + +``` + +When the flag is set, messages within the same client session are published to the`clientOutboundChannel` one at a time, so that the order of publication is guaranteed. +Note that this incurs a small performance overhead, so you should enable it only if it is required. + +#### 4.4.17. Events + +Several `ApplicationContext` events are published and can be +received by implementing Spring’s `ApplicationListener` interface: + +* `BrokerAvailabilityEvent`: Indicates when the broker becomes available or unavailable. + While the “simple” broker becomes available immediately on startup and remains so while + the application is running, the STOMP “broker relay” can lose its connection + to the full featured broker (for example, if the broker is restarted). The broker relay + has reconnect logic and re-establishes the “system” connection to the broker + when it comes back. As a result, this event is published whenever the state changes from connected + to disconnected and vice-versa. Components that use the `SimpMessagingTemplate` should + subscribe to this event and avoid sending messages at times when the broker is not + available. In any case, they should be prepared to handle `MessageDeliveryException`when sending a message. + +* `SessionConnectEvent`: Published when a new STOMP CONNECT is received to + indicate the start of a new client session. The event contains the message that represents the + connect, including the session ID, user information (if any), and any custom headers the client + sent. This is useful for tracking client sessions. Components subscribed + to this event can wrap the contained message with `SimpMessageHeaderAccessor` or`StompMessageHeaderAccessor`. + +* `SessionConnectedEvent`: Published shortly after a `SessionConnectEvent` when the + broker has sent a STOMP CONNECTED frame in response to the CONNECT. At this point, the + STOMP session can be considered fully established. + +* `SessionSubscribeEvent`: Published when a new STOMP SUBSCRIBE is received. + +* `SessionUnsubscribeEvent`: Published when a new STOMP UNSUBSCRIBE is received. + +* `SessionDisconnectEvent`: Published when a STOMP session ends. The DISCONNECT may + have been sent from the client or it may be automatically generated when the + WebSocket session is closed. In some cases, this event is published more than once + per session. Components should be idempotent with regard to multiple disconnect events. + +| |When you use a full-featured broker, the STOMP “broker relay” automatically reconnects the
    “system” connection if broker becomes temporarily unavailable. Client connections,
    however, are not automatically reconnected. Assuming heartbeats are enabled, the client
    typically notices the broker is not responding within 10 seconds. Clients need to
    implement their own reconnecting logic.| +|---|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +#### 4.4.18. Interception + +[Events](#websocket-stomp-appplication-context-events) provide notifications for the lifecycle +of a STOMP connection but not for every client message. Applications can also register a`ChannelInterceptor` to intercept any message and in any part of the processing chain. +The following example shows how to intercept inbound messages from clients: + +``` +@Configuration +@EnableWebSocketMessageBroker +public class WebSocketConfig implements WebSocketMessageBrokerConfigurer { + + @Override + public void configureClientInboundChannel(ChannelRegistration registration) { + registration.interceptors(new MyChannelInterceptor()); + } +} +``` + +A custom `ChannelInterceptor` can use `StompHeaderAccessor` or `SimpMessageHeaderAccessor`to access information about the message, as the following example shows: + +``` +public class MyChannelInterceptor implements ChannelInterceptor { + + @Override + public Message preSend(Message message, MessageChannel channel) { + StompHeaderAccessor accessor = StompHeaderAccessor.wrap(message); + StompCommand command = accessor.getStompCommand(); + // ... + return message; + } +} +``` + +Applications can also implement `ExecutorChannelInterceptor`, which is a sub-interface +of `ChannelInterceptor` with callbacks in the thread in which the messages are handled. +While a `ChannelInterceptor` is invoked once for each message sent to a channel, the`ExecutorChannelInterceptor` provides hooks in the thread of each `MessageHandler`subscribed to messages from the channel. + +Note that, as with the `SessionDisconnectEvent` described earlier, a DISCONNECT message +can be from the client or it can also be automatically generated when +the WebSocket session is closed. In some cases, an interceptor may intercept this +message more than once for each session. Components should be idempotent with regard to +multiple disconnect events. + +#### 4.4.19. STOMP Client + +Spring provides a STOMP over WebSocket client and a STOMP over TCP client. + +To begin, you can create and configure `WebSocketStompClient`, as the following example shows: + +``` +WebSocketClient webSocketClient = new StandardWebSocketClient(); +WebSocketStompClient stompClient = new WebSocketStompClient(webSocketClient); +stompClient.setMessageConverter(new StringMessageConverter()); +stompClient.setTaskScheduler(taskScheduler); // for heartbeats +``` + +In the preceding example, you could replace `StandardWebSocketClient` with `SockJsClient`, +since that is also an implementation of `WebSocketClient`. The `SockJsClient` can +use WebSocket or HTTP-based transport as a fallback. For more details, see[`SockJsClient`](#websocket-fallback-sockjs-client). + +Next, you can establish a connection and provide a handler for the STOMP session, +as the following example shows: + +``` +String url = "ws://127.0.0.1:8080/endpoint"; +StompSessionHandler sessionHandler = new MyStompSessionHandler(); +stompClient.connect(url, sessionHandler); +``` + +When the session is ready for use, the handler is notified, as the following example shows: + +``` +public class MyStompSessionHandler extends StompSessionHandlerAdapter { + + @Override + public void afterConnected(StompSession session, StompHeaders connectedHeaders) { + // ... + } +} +``` + +Once the session is established, any payload can be sent and is +serialized with the configured `MessageConverter`, as the following example shows: + +``` +session.send("/topic/something", "payload"); +``` + +You can also subscribe to destinations. The `subscribe` methods require a handler +for messages on the subscription and returns a `Subscription` handle that you can +use to unsubscribe. For each received message, the handler can specify the target`Object` type to which the payload should be deserialized, as the following example shows: + +``` +session.subscribe("/topic/something", new StompFrameHandler() { + + @Override + public Type getPayloadType(StompHeaders headers) { + return String.class; + } + + @Override + public void handleFrame(StompHeaders headers, Object payload) { + // ... + } + +}); +``` + +To enable STOMP heartbeat, you can configure `WebSocketStompClient` with a `TaskScheduler`and optionally customize the heartbeat intervals (10 seconds for write inactivity, +which causes a heartbeat to be sent, and 10 seconds for read inactivity, which +closes the connection). + +`WebSocketStompClient` sends a heartbeat only in case of inactivity, i.e. when no +other messages are sent. This can present a challenge when using an external broker +since messages with a non-broker destination represent activity but aren’t actually +forwarded to the broker. In that case you can configure a `TaskScheduler`when initializing the [External Broker](#websocket-stomp-handle-broker-relay) which ensures a +heartbeat is forwarded to the broker also when only messages with a non-broker +destination are sent. + +| |When you use `WebSocketStompClient` for performance tests to simulate thousands
    of clients from the same machine, consider turning off heartbeats, since each
    connection schedules its own heartbeat tasks and that is not optimized for
    a large number of clients running on the same machine.| +|---|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +The STOMP protocol also supports receipts, where the client must add a `receipt`header to which the server responds with a RECEIPT frame after the send or +subscribe are processed. To support this, the `StompSession` offers`setAutoReceipt(boolean)` that causes a `receipt` header to be +added on every subsequent send or subscribe event. +Alternatively, you can also manually add a receipt header to the `StompHeaders`. +Both send and subscribe return an instance of `Receiptable`that you can use to register for receipt success and failure callbacks. +For this feature, you must configure the client with a `TaskScheduler`and the amount of time before a receipt expires (15 seconds by default). + +Note that `StompSessionHandler` itself is a `StompFrameHandler`, which lets +it handle ERROR frames in addition to the `handleException` callback for +exceptions from the handling of messages and `handleTransportError` for +transport-level errors including `ConnectionLostException`. + +#### 4.4.20. WebSocket Scope + +Each WebSocket session has a map of attributes. The map is attached as a header to +inbound client messages and may be accessed from a controller method, as the following example shows: + +``` +@Controller +public class MyController { + + @MessageMapping("/action") + public void handle(SimpMessageHeaderAccessor headerAccessor) { + Map attrs = headerAccessor.getSessionAttributes(); + // ... + } +} +``` + +You can declare a Spring-managed bean in the `websocket` scope. +You can inject WebSocket-scoped beans into controllers and any channel interceptors +registered on the `clientInboundChannel`. Those are typically singletons and live +longer than any individual WebSocket session. Therefore, you need to use a +scope proxy mode for WebSocket-scoped beans, as the following example shows: + +``` +@Component +@Scope(scopeName = "websocket", proxyMode = ScopedProxyMode.TARGET_CLASS) +public class MyBean { + + @PostConstruct + public void init() { + // Invoked after dependencies injected + } + + // ... + + @PreDestroy + public void destroy() { + // Invoked when the WebSocket session ends + } +} + +@Controller +public class MyController { + + private final MyBean myBean; + + @Autowired + public MyController(MyBean myBean) { + this.myBean = myBean; + } + + @MessageMapping("/action") + public void handle() { + // this.myBean from the current WebSocket session + } +} +``` + +As with any custom scope, Spring initializes a new `MyBean` instance the first +time it is accessed from the controller and stores the instance in the WebSocket +session attributes. The same instance is subsequently returned until the session +ends. WebSocket-scoped beans have all Spring lifecycle methods invoked, as +shown in the preceding examples. + +#### 4.4.21. Performance + +There is no silver bullet when it comes to performance. Many factors +affect it, including the size and volume of messages, whether application +methods perform work that requires blocking, and external factors +(such as network speed and other issues). The goal of this section is to provide +an overview of the available configuration options along with some thoughts +on how to reason about scaling. + +In a messaging application, messages are passed through channels for asynchronous +executions that are backed by thread pools. Configuring such an application requires +good knowledge of the channels and the flow of messages. Therefore, it is +recommended to review [Flow of Messages](#websocket-stomp-message-flow). + +The obvious place to start is to configure the thread pools that back the`clientInboundChannel` and the `clientOutboundChannel`. By default, both +are configured at twice the number of available processors. + +If the handling of messages in annotated methods is mainly CPU-bound, the +number of threads for the `clientInboundChannel` should remain close to the +number of processors. If the work they do is more IO-bound and requires blocking +or waiting on a database or other external system, the thread pool size +probably needs to be increased. + +| |`ThreadPoolExecutor` has three important properties: the core thread pool size,
    the max thread pool size, and the capacity for the queue to store
    tasks for which there are no available threads.

    A common point of confusion is that configuring the core pool size (for example, 10)
    and max pool size (for example, 20) results in a thread pool with 10 to 20 threads.
    In fact, if the capacity is left at its default value of Integer.MAX\_VALUE,
    the thread pool never increases beyond the core pool size, since
    all additional tasks are queued.

    See the javadoc of `ThreadPoolExecutor` to learn how these properties work and
    understand the various queuing strategies.| +|---|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +On the `clientOutboundChannel` side, it is all about sending messages to WebSocket +clients. If clients are on a fast network, the number of threads should +remain close to the number of available processors. If they are slow or on +low bandwidth, they take longer to consume messages and put a burden on the +thread pool. Therefore, increasing the thread pool size becomes necessary. + +While the workload for the `clientInboundChannel` is possible to predict — after all, it is based on what the application does — how to configure the +"clientOutboundChannel" is harder, as it is based on factors beyond +the control of the application. For this reason, two additional +properties relate to the sending of messages: `sendTimeLimit`and `sendBufferSizeLimit`. You can use those methods to configure how long a +send is allowed to take and how much data can be buffered when sending +messages to a client. + +The general idea is that, at any given time, only a single thread can be used +to send to a client. All additional messages, meanwhile, get buffered, and you +can use these properties to decide how long sending a message is allowed to +take and how much data can be buffered in the meantime. See the javadoc and +documentation of the XML schema for important additional details. + +The following example shows a possible configuration: + +``` +@Configuration +@EnableWebSocketMessageBroker +public class WebSocketConfig implements WebSocketMessageBrokerConfigurer { + + @Override + public void configureWebSocketTransport(WebSocketTransportRegistration registration) { + registration.setSendTimeLimit(15 * 1000).setSendBufferSizeLimit(512 * 1024); + } + + // ... + +} +``` + +The following example shows the XML configuration equivalent of the preceding example: + +``` + + + + + + + + +``` + +You can also use the WebSocket transport configuration shown earlier to configure the +maximum allowed size for incoming STOMP messages. In theory, a WebSocket +message can be almost unlimited in size. In practice, WebSocket servers impose +limits — for example, 8K on Tomcat and 64K on Jetty. For this reason, STOMP clients +(such as the JavaScript [webstomp-client](https://github.com/JSteunou/webstomp-client)and others) split larger STOMP messages at 16K boundaries and send them as multiple +WebSocket messages, which requires the server to buffer and re-assemble. + +Spring’s STOMP-over-WebSocket support does this ,so applications can configure the +maximum size for STOMP messages irrespective of WebSocket server-specific message +sizes. Keep in mind that the WebSocket message size is automatically +adjusted, if necessary, to ensure they can carry 16K WebSocket messages at a +minimum. + +The following example shows one possible configuration: + +``` +@Configuration +@EnableWebSocketMessageBroker +public class WebSocketConfig implements WebSocketMessageBrokerConfigurer { + + @Override + public void configureWebSocketTransport(WebSocketTransportRegistration registration) { + registration.setMessageSizeLimit(128 * 1024); + } + + // ... + +} +``` + +The following example shows the XML configuration equivalent of the preceding example: + +``` + + + + + + + + +``` + +An important point about scaling involves using multiple application instances. +Currently, you cannot do that with the simple broker. +However, when you use a full-featured broker (such as RabbitMQ), each application +instance connects to the broker, and messages broadcast from one application +instance can be broadcast through the broker to WebSocket clients connected +through any other application instances. + +#### 4.4.22. Monitoring + +When you use `@EnableWebSocketMessageBroker` or ``, key +infrastructure components automatically gather statisticss and counters that provide +important insight into the internal state of the application. The configuration +also declares a bean of type `WebSocketMessageBrokerStats` that gathers all +available information in one place and by default logs it at the `INFO` level once +every 30 minutes. This bean can be exported to JMX through Spring’s`MBeanExporter` for viewing at runtime (for example, through JDK’s `jconsole`). +The following list summarizes the available information: + +Client WebSocket Sessions + +Current + +Indicates how many client sessions there are +currently, with the count further broken down by WebSocket versus HTTP +streaming and polling SockJS sessions. + +Total + +Indicates how many total sessions have been established. + +Abnormally Closed + +Connect Failures + +Sessions that got established but were +closed after not having received any messages within 60 seconds. This is +usually an indication of proxy or network issues. + +Send Limit Exceeded + +Sessions closed after exceeding the configured send +timeout or the send buffer limits, which can occur with slow clients +(see previous section). + +Transport Errors + +Sessions closed after a transport error, such as +failure to read or write to a WebSocket connection or +HTTP request or response. + +STOMP Frames + +The total number of CONNECT, CONNECTED, and DISCONNECT frames +processed, indicating how many clients connected on the STOMP level. Note that +the DISCONNECT count may be lower when sessions get closed abnormally or when +clients close without sending a DISCONNECT frame. + +STOMP Broker Relay + +TCP Connections + +Indicates how many TCP connections on behalf of client +WebSocket sessions are established to the broker. This should be equal to the +number of client WebSocket sessions + 1 additional shared “system” connection +for sending messages from within the application. + +STOMP Frames + +The total number of CONNECT, CONNECTED, and DISCONNECT frames +forwarded to or received from the broker on behalf of clients. Note that a +DISCONNECT frame is sent to the broker regardless of how the client WebSocket +session was closed. Therefore, a lower DISCONNECT frame count is an indication +that the broker is pro-actively closing connections (maybe because of a +heartbeat that did not arrive in time, an invalid input frame, or other issue). + +Client Inbound Channel + +Statistics from the thread pool that backs the `clientInboundChannel`that provide insight into the health of incoming message processing. Tasks queueing +up here is an indication that the application may be too slow to handle messages. +If there I/O bound tasks (for example, slow database queries, HTTP requests to third party +REST API, and so on), consider increasing the thread pool size. + +Client Outbound Channel + +Statistics from the thread pool that backs the `clientOutboundChannel`that provides insight into the health of broadcasting messages to clients. Tasks +queueing up here is an indication clients are too slow to consume messages. +One way to address this is to increase the thread pool size to accommodate the +expected number of concurrent slow clients. Another option is to reduce the +send timeout and send buffer size limits (see the previous section). + +SockJS Task Scheduler + +Statistics from the thread pool of the SockJS task scheduler that +is used to send heartbeats. Note that, when heartbeats are negotiated on the +STOMP level, the SockJS heartbeats are disabled. + +#### 4.4.23. Testing + +There are two main approaches to testing applications when you use Spring’s STOMP-over-WebSocket +support. The first is to write server-side tests to verify the functionality +of controllers and their annotated message-handling methods. The second is to write +full end-to-end tests that involve running a client and a server. + +The two approaches are not mutually exclusive. On the contrary, each has a place +in an overall test strategy. Server-side tests are more focused and easier to write +and maintain. End-to-end integration tests, on the other hand, are more complete and +test much more, but they are also more involved to write and maintain. + +The simplest form of server-side tests is to write controller unit tests. However, +this is not useful enough, since much of what a controller does depends on its +annotations. Pure unit tests simply cannot test that. + +Ideally, controllers under test should be invoked as they are at runtime, much like +the approach to testing controllers that handle HTTP requests by using the Spring MVC Test +framework — that is, without running a Servlet container but relying on the Spring Framework +to invoke the annotated controllers. As with Spring MVC Test, you have two +possible alternatives here, either use a “context-based” or use a “standalone” setup: + +* Load the actual Spring configuration with the help of the + Spring TestContext framework, inject `clientInboundChannel` as a test field, and + use it to send messages to be handled by controller methods. + +* Manually set up the minimum Spring framework infrastructure required to invoke + controllers (namely the `SimpAnnotationMethodMessageHandler`) and pass messages for + controllers directly to it. + +Both of these setup scenarios are demonstrated in the[tests for the stock portfolio](https://github.com/rstoyanchev/spring-websocket-portfolio/tree/master/src/test/java/org/springframework/samples/portfolio/web)sample application. + +The second approach is to create end-to-end integration tests. For that, you need +to run a WebSocket server in embedded mode and connect to it as a WebSocket client +that sends WebSocket messages containing STOMP frames. +The [tests for the stock portfolio](https://github.com/rstoyanchev/spring-websocket-portfolio/tree/master/src/test/java/org/springframework/samples/portfolio/web)sample application also demonstrate this approach by using Tomcat as the embedded +WebSocket server and a simple STOMP client for test purposes. + +## 5. Other Web Frameworks + +This chapter details Spring’s integration with third-party web frameworks. + +One of the core value propositions of the Spring Framework is that of enabling*choice*. In a general sense, Spring does not force you to use or buy into any +particular architecture, technology, or methodology (although it certainly recommends +some over others). This freedom to pick and choose the architecture, technology, or +methodology that is most relevant to a developer and their development team is +arguably most evident in the web area, where Spring provides its own web frameworks +([Spring MVC](#mvc) and [Spring WebFlux](webflux.html#webflux)) while, at the same time, +supporting integration with a number of popular third-party web frameworks. + +### 5.1. Common Configuration + +Before diving into the integration specifics of each supported web framework, let us +first take a look at common Spring configuration that is not specific to any one web +framework. (This section is equally applicable to Spring’s own web framework variants.) + +One of the concepts (for want of a better word) espoused by Spring’s lightweight +application model is that of a layered architecture. Remember that in a “classic” +layered architecture, the web layer is but one of many layers. It serves as one of the +entry points into a server-side application, and it delegates to service objects +(facades) that are defined in a service layer to satisfy business-specific (and +presentation-technology agnostic) use cases. In Spring, these service objects, any other +business-specific objects, data-access objects, and others exist in a distinct “business +context”, which contains no web or presentation layer objects (presentation objects, +such as Spring MVC controllers, are typically configured in a distinct “presentation +context”). This section details how you can configure a Spring container (a`WebApplicationContext`) that contains all of the 'business beans' in your application. + +Moving on to specifics, all you need to do is declare a[`ContextLoaderListener`](https://docs.spring.io/spring-framework/docs/5.3.16/javadoc-api/org/springframework/web/context/ContextLoaderListener.html)in the standard Java EE servlet `web.xml` file of your web application and add a`contextConfigLocation`\ section (in the same file) that defines which +set of Spring XML configuration files to load. + +Consider the following `` configuration: + +``` + + org.springframework.web.context.ContextLoaderListener + +``` + +Further consider the following `` configuration: + +``` + + contextConfigLocation + /WEB-INF/applicationContext*.xml + +``` + +If you do not specify the `contextConfigLocation` context parameter, the`ContextLoaderListener` looks for a file called `/WEB-INF/applicationContext.xml` to +load. Once the context files are loaded, Spring creates a[`WebApplicationContext`](https://docs.spring.io/spring-framework/docs/5.3.16/javadoc-api/org/springframework/web/context/WebApplicationContext.html)object based on the bean definitions and stores it in the `ServletContext` of the web +application. + +All Java web frameworks are built on top of the Servlet API, so you can use the +following code snippet to get access to this “business context” `ApplicationContext`created by the `ContextLoaderListener`. + +The following example shows how to get the `WebApplicationContext`: + +``` +WebApplicationContext ctx = WebApplicationContextUtils.getWebApplicationContext(servletContext); +``` + +The[`WebApplicationContextUtils`](https://docs.spring.io/spring-framework/docs/5.3.16/javadoc-api/org/springframework/web/context/support/WebApplicationContextUtils.html)class is for convenience, so you need not remember the name of the `ServletContext`attribute. Its `getWebApplicationContext()` method returns `null` if an object +does not exist under the `WebApplicationContext.ROOT_WEB_APPLICATION_CONTEXT_ATTRIBUTE`key. Rather than risk getting `NullPointerExceptions` in your application, it is better +to use the `getRequiredWebApplicationContext()` method. This method throws an exception +when the `ApplicationContext` is missing. + +Once you have a reference to the `WebApplicationContext`, you can retrieve beans by their +name or type. Most developers retrieve beans by name and then cast them to one of their +implemented interfaces. + +Fortunately, most of the frameworks in this section have simpler ways of looking up beans. +Not only do they make it easy to get beans from a Spring container, but they also let you +use dependency injection on their controllers. Each web framework section has more detail +on its specific integration strategies. + +### 5.2. JSF + +JavaServer Faces (JSF) is the JCP’s standard component-based, event-driven web +user interface framework. It is an official part of the Java EE umbrella but also +individually usable, e.g. through embedding Mojarra or MyFaces within Tomcat. + +Please note that recent versions of JSF became closely tied to CDI infrastructure +in application servers, with some new JSF functionality only working in such an +environment. Spring’s JSF support is not actively evolved anymore and primarily +exists for migration purposes when modernizing older JSF-based applications. + +The key element in Spring’s JSF integration is the JSF `ELResolver` mechanism. + +#### 5.2.1. Spring Bean Resolver + +`SpringBeanFacesELResolver` is a JSF compliant `ELResolver` implementation, +integrating with the standard Unified EL as used by JSF and JSP. It delegates to +Spring’s “business context” `WebApplicationContext` first and then to the +default resolver of the underlying JSF implementation. + +Configuration-wise, you can define `SpringBeanFacesELResolver` in your JSF`faces-context.xml` file, as the following example shows: + +``` + + + org.springframework.web.jsf.el.SpringBeanFacesELResolver + ... + + +``` + +#### 5.2.2. Using `FacesContextUtils` + +A custom `ELResolver` works well when mapping your properties to beans in`faces-config.xml`, but, at times, you may need to explicitly grab a bean. +The [`FacesContextUtils`](https://docs.spring.io/spring-framework/docs/5.3.16/javadoc-api/org/springframework/web/jsf/FacesContextUtils.html)class makes this easy. It is similar to `WebApplicationContextUtils`, except that +it takes a `FacesContext` parameter rather than a `ServletContext` parameter. + +The following example shows how to use `FacesContextUtils`: + +``` +ApplicationContext ctx = FacesContextUtils.getWebApplicationContext(FacesContext.getCurrentInstance()); +``` + +### 5.3. Apache Struts 2.x + +Invented by Craig McClanahan, [Struts](https://struts.apache.org) is an open-source project +hosted by the Apache Software Foundation. At the time, it greatly simplified the +JSP/Servlet programming paradigm and won over many developers who were using proprietary +frameworks. It simplified the programming model, it was open source (and thus free as in +beer), and it had a large community, which let the project grow and become popular among +Java web developers. + +As a successor to the original Struts 1.x, check out Struts 2.x and the Struts-provided[Spring Plugin](https://struts.apache.org/release/2.3.x/docs/spring-plugin.html) for the +built-in Spring integration. + +### 5.4. Apache Tapestry 5.x + +[Tapestry](https://tapestry.apache.org/) is a ""Component oriented framework for creating +dynamic, robust, highly scalable web applications in Java."" + +While Spring has its own [powerful web layer](#mvc), there are a number of unique +advantages to building an enterprise Java application by using a combination of Tapestry +for the web user interface and the Spring container for the lower layers. + +For more information, see Tapestry’s dedicated[integration module for Spring](https://tapestry.apache.org/integrating-with-spring-framework.html). + +### 5.5. Further Resources + +The following links go to further resources about the various web frameworks described in +this chapter. + +* The [JSF](https://www.oracle.com/technetwork/java/javaee/javaserverfaces-139869.html) homepage + +* The [Struts](https://struts.apache.org/) homepage + +* The [Tapestry](https://tapestry.apache.org/) homepage + diff --git a/docs/en/spring-hateoas/README.md b/docs/en/spring-hateoas/README.md new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/docs/en/spring-hateoas/spring-hateoas.md b/docs/en/spring-hateoas/spring-hateoas.md new file mode 100644 index 0000000000000000000000000000000000000000..1d9e2a0a34b3dd2b2f131c13a937c9dcd268234c --- /dev/null +++ b/docs/en/spring-hateoas/spring-hateoas.md @@ -0,0 +1,2146 @@ +# Spring HATEOAS - Reference Documentation + +## 1. Preface + +### 1.1. Migrating to Spring HATEOAS 1.0 + +For 1.0 we took the chance to re-evaluate some of the design and package structure choices we had made for the 0.x branch. +There had been an incredible amount of feedback on it and the major version bump seemed to be the most natural place to refactor those. + +#### 1.1.1. The changes + +The biggest changes in package structure were driven by the introduction of a hypermedia type registration API to support additional media types in Spring HATEOAS. +This lead to the clear separation of client and server APIs (packages named respectively) as well as media type implementations in the package `mediatype`. + +The easiest way to get your code base upgraded to the new API is by using the [migration script](#migrate-to-1.0.script). +Before we jump to that, here are the changes at a quick glance. + +##### Representation models + +The `ResourceSupport`/`Resource`/`Resources`/`PagedResources` group of classes never really felt appropriately named. +After all, these types do not actually manifest resources but rather representation models that can be enriched with hypermedia information and affordances. +Here’s how new names map to the old ones: + +* `ResourceSupport` is now `RepresentationModel` + +* `Resource` is now `EntityModel` + +* `Resources` is now `CollectionModel` + +* `PagedResources` is now `PagedModel` + +Consequently, `ResourceAssembler` has been renamed to `RepresentationModelAssembler` and its methods `toResource(…)` and `toResources(…)` have been renamed to `toModel(…)` and `toCollectionModel(…)` respectively. +Also the name changes have been reflected in the classes contained in `TypeReferences`. + +* `RepresentationModel.getLinks()` now exposes a `Links` instance (over a `List`) as that exposes additional API to concatenate and merge different `Links` instances using various strategies. + Also it has been turned into a self-bound generic type to allow the methods that add links to the instance return the instance itself. + +* The `LinkDiscoverer` API has been moved to the `client` package. + +* The `LinkBuilder` and `EntityLinks` APIs have been moved to the `server` package. + +* `ControllerLinkBuilder` has been moved into `server.mvc` and deprecated to be replaced by `WebMvcLinkBuilder`. + +* `RelProvider` has been renamed to `LinkRelationProvider` and returns `LinkRelation` instances instead of `String`s. + +* `VndError` has been moved to the `mediatype.vnderror` package. + +#### 1.1.2. The migration script + +You can find [a script](https://github.com/spring-projects/spring-hateoas/tree/master/etc) to run from your application root that will update all import statements and static method references to Spring HATEOAS types that moved in our source code repository. +Simply download that, run it from your project root. +By default it will inspect all Java source files and replace the legacy Spring HATEOAS type references with the new ones. + +Example 1. Sample application of the migration script + +``` +$ ./migrate-to-1.0.sh + +Migrating Spring HATEOAS references to 1.0 for files : *.java + +Adapting ./src/main/java/… +… + +Done! +``` + +Note that the script will not necessarily be able to entirely fix all changes, but it should cover the most important refactorings. + +Now verify the changes made to the files in your favorite Git client and commit as appropriate. +In case you find method or type references unmigrated, please open a ticket in out issue tracker. + +#### 1.1.3. Migrating from 1.0 M3 to 1.0 RC1 + +* `Link.andAffordance(…)` taking Affordance details have been moved to `Affordances`. To manually build up `Affordance` instances now use `Affordances.of(link).afford(…)`. Also note the new `AffordanceBuilder` type exposed from `Affordances` for fluent usage. See [Affordances](#server.affordances) for details. + +* `AffordanceModelFactory.getAffordanceModel(…)` now receives `InputPayloadMetadata` and `PayloadMetadata` instances instead of `ResolvableType`s to allow non-type-based implementations. Custom media type implementations have to be adapted to that accordingly. + +* HAL Forms now does not render property attributes if their value adheres to what’s defined as default in the spec. I.e. if previously `required` was explicitly set to `false`, we now just omit the entry for `required`. + We also now only force them to be non-required for templates that use `PATCH` as the HTTP method. + +## 2. Fundamentals + +This section covers the basics of Spring HATEOAS and its fundamental domain abstractions. + +### 2.1. Links + +The fundamental idea of hypermedia is to enrich the representation of a resource with hypermedia elements. +The simplest form of that are links. +They indicate a client that it can navigate to a certain resource. +The semantics of a related resource are defined in a so-called link relation. +You might have seen this in the header of an HTML file already: + +Example 2. A link in an HTML document + +``` + +``` + +As you can see the link points to a resource `theme.css` and indicates that it is a style sheet. +Links often carry additional information, like the media type that the resource pointed to will return. +However, the fundamental building blocks of a link are its reference and relation. + +Spring HATEOAS lets you work with links through its immutable `Link` value type. +Its constructor takes both a hypertext reference and a link relation, the latter being defaulted to the IANA link relation `self`. +Read more on the latter in [Link relations](#fundamentals.link-relations). + +Example 3. Using links + +``` +Link link = Link.of("/something"); +assertThat(link.getHref()).isEqualTo("/something"); +assertThat(link.getRel()).isEqualTo(IanaLinkRelations.SELF); + +link = Link.of("/something", "my-rel"); +assertThat(link.getHref()).isEqualTo("/something"); +assertThat(link.getRel()).isEqualTo(LinkRelation.of("my-rel")); +``` + +`Link` exposes other attributes as defined in [RFC-8288](https://tools.ietf.org/html/rfc8288). +You can set them by calling the corresponding wither method on a `Link` instance. + +Find more information on how to create links pointing to Spring MVC and Spring WebFlux controllers in [ Building links in Spring MVC](#server.link-builder.webmvc) and [Building links in Spring WebFlux](#server.link-builder.webflux). + +### 2.2. URI templates + +For a Spring HATEOAS `Link`, the hypertext reference can not only be a URI, but also a URI template according to [RFC-6570](https://tools.ietf.org/html/rfc6570). +A URI template contains so-called template variables and allows expansion of these parameters. +This allows clients to turn parameterized templates into URIs without having to know about the structure of the final URI, it only needs to know about the names of the variables. + +Example 4. Using links with templated URIs + +``` +Link link = Link.of("/{segment}/something{?parameter}"); +assertThat(link.isTemplated()).isTrue(); (1) +assertThat(link.getVariableNames()).contains("segment", "parameter"); (2) + +Map values = new HashMap<>(); +values.put("segment", "path"); +values.put("parameter", 42); + +assertThat(link.expand(values).getHref()) (3) + .isEqualTo("/path/something?parameter=42"); +``` + +|**1**|The `Link` instance indicates that is templated, i.e. it contains a URI template.| +|-----|---------------------------------------------------------------------------------| +|**2**| It exposes the parameters contained in the template. | +|**3**| It allows expansion of the parameters. | + +URI templates can be constructed manually and template variables added later on. + +Example 5. Working with URI templates + +``` +UriTemplate template = UriTemplate.of("/{segment}/something") + .with(new TemplateVariable("parameter", VariableType.REQUEST_PARAM); + +assertThat(template.toString()).isEqualTo("/{segment}/something{?parameter}"); +``` + +### 2.3. Link relations + +To indicate the relationship of the target resource to the current one so-called link relations are used. +Spring HATEOAS provides a `LinkRelation` type to easily create `String`-based instances of it. + +#### 2.3.1. IANA link relations + +The Internet Assigned Numbers Authority contains a set of [predefined link relations](https://www.iana.org/assignments/link-relations/link-relations.xhtml). +They can be referred to via `IanaLinkRelations`. + +Example 6. Using IANA link relations + +``` +Link link = Link.of("/some-resource"), IanaLinkRelations.NEXT); + +assertThat(link.getRel()).isEqualTo(LinkRelation.of("next")); +assertThat(IanaLinkRelation.isIanaRel(link.getRel())).isTrue(); +``` + +### Representation models + +To easily create hypermedia enriched representations, Spring HATEOAS provides a set of classes with `RepresentationModel` at their root. +It’s basically a container for a collection of `Link`s and has convenient methods to add those to the model. +The models can later be rendered into various media type formats that will define how the hypermedia elements look in the representation. +For more information on this, have a look at [Media types](#mediatypes). + +Example 7. The `RepresentationModel` class hierarchy + +diagram classes + +The default way to work with a `RepresentationModel` is to create a subclass of it to contain all the properties the representation is supposed to contain, create instances of that class, populate the properties and enrich it with links. + +Example 8. A sample representation model type + +``` +class PersonModel extends RepresentationModel { + + String firstname, lastname; +} +``` + +The generic self-typing is necessary to let `RepresentationModel.add(…)` return instances of itself. +The model type can now be used like this: + +Example 9. Using the person representation model + +``` +PersonModel model = new PersonModel(); +model.firstname = "Dave"; +model.lastname = "Matthews"; +model.add(Link.of("https://myhost/people/42")); +``` + +If you returned such an instance from a Spring MVC or WebFlux controller and the client sent an `Accept` header set to `application/hal+json`, the response would look as follows: + +Example 10. The HAL representation generated for the person representation model + +``` +{ + "_links" : { + "self" : { + "href" : "https://myhost/people/42" + } + }, + "firstname" : "Dave", + "lastname" : "Matthews" +} +``` + +#### 2.4.1. Item resource representation model + +For a resource that’s backed by a singular object or concept, a convenience `EntityModel` type exists. +Instead of creating a custom model type for each concept, you can just reuse an already existing type and wrap instances of it into the `EntityModel`. + +Example 11. Using `EntityModel` to wrap existing objects + +``` +Person person = new Person("Dave", "Matthews"); +EntityModel model = EntityModel.of(person); +``` + +#### 2.4.2. Collection resource representation model + +For resources that are conceptually collections, a `CollectionModel` is available. +Its elements can either be simple objects or `RepresentationModel` instances in turn. + +Example 12. Using `CollectionModel` to wrap a collection of existing objects + +``` +Collection people = Collections.singleton(new Person("Dave", "Matthews")); +CollectionModel model = CollectionModel.of(people); +``` + +## 3. Server-side support + +### Building links in Spring MVC + +Now we have the domain vocabulary in place, but the main challenge remains: how to create the actual URIs to be wrapped into `Link` instances in a less fragile way. Right now, we would have to duplicate URI strings all over the place. Doing so is brittle and unmaintainable. + +Assume you have your Spring MVC controllers implemented as follows: + +``` +@Controller +class PersonController { + + @GetMapping("/people") + HttpEntity showAll() { … } + + @GetMapping(value = "/{person}", method = RequestMethod.GET) + HttpEntity show(@PathVariable Long person) { … } +} +``` + +We see two conventions here. The first is a collection resource that is exposed through `@GetMapping` annotation of the controller method, with individual elements of that collection exposed as direct sub resources. The collection resource might be exposed at a simple URI (as just shown) or more complex ones (such as `/people/{id}/addresses`). Suppose you would like to link to the collection resource of all people. Following the approach from above would cause two problems: + +* To create an absolute URI, you would need to look up the protocol, hostname, port, servlet base, and other values. This is cumbersome and requires ugly manual string concatenation code. + +* You probably do not want to concatenate the `/people` on top of your base URI, because you would then have to maintain the information in multiple places. If you change the mapping, you then have to change all the clients pointing to it. + +Spring HATEOAS now provides a `WebMvcLinkBuilder` that lets you create links by pointing to controller classes. +The following example shows how to do so: + +``` +import static org.sfw.hateoas.server.mvc.WebMvcLinkBuilder.*; + +Link link = linkTo(PersonController.class).withRel("people"); + +assertThat(link.getRel()).isEqualTo(LinkRelation.of("people")); +assertThat(link.getHref()).endsWith("/people"); +``` + +The `WebMvcLinkBuilder` uses Spring’s `ServletUriComponentsBuilder` under the hood to obtain the basic URI information from the current request. Assuming your application runs at `[localhost:8080/your-app](http://localhost:8080/your-app)`, this is exactly the URI on top of which you are constructing additional parts. The builder now inspects the given controller class for its root mapping and thus ends up with `[localhost:8080/your-app/people](http://localhost:8080/your-app/people)`. You can also build more nested links as well. +The following example shows how to do so: + +``` +Person person = new Person(1L, "Dave", "Matthews"); +// /person / 1 +Link link = linkTo(PersonController.class).slash(person.getId()).withSelfRel(); +assertThat(link.getRel(), is(IanaLinkRelation.SELF.value())); +assertThat(link.getHref(), endsWith("/people/1")); +``` + +The builder also allows creating URI instances to build up (for example, response header values): + +``` +HttpHeaders headers = new HttpHeaders(); +headers.setLocation(linkTo(PersonController.class).slash(person).toUri()); + +return new ResponseEntity(headers, HttpStatus.CREATED); +``` + +#### 3.1.1. Building links that point to methods + +You can even build links that point to methods or create dummy controller method invocations. +The first approach is to hand a `Method` instance to the `WebMvcLinkBuilder`. +The following example shows how to do so: + +``` +Method method = PersonController.class.getMethod("show", Long.class); +Link link = linkTo(method, 2L).withSelfRel(); + +assertThat(link.getHref()).endsWith("/people/2")); +``` + +This is still a bit dissatisfying, as we have to first get a `Method` instance, which throws an exception and is generally quite cumbersome. At least we do not repeat the mapping. An even better approach is to have a dummy method invocation of the target method on a controller proxy, which we can create by using the `methodOn(…)` helper. +The following example shows how to do so: + +``` +Link link = linkTo(methodOn(PersonController.class).show(2L)).withSelfRel(); + +assertThat(link.getHref()).endsWith("/people/2"); +``` + +`methodOn(…)` creates a proxy of the controller class that records the method invocation and exposes it in a proxy created for the return type of the method. This allows the fluent expression of the method for which we want to obtain the mapping. However, there are a few constraints on the methods that can be obtained by using this technique: + +* The return type has to be capable of proxying, as we need to expose the method invocation on it. + +* The parameters handed into the methods are generally neglected (except the ones referred to through `@PathVariable`, because they make up the URI). + +### 3.2. Building links in Spring WebFlux + +TODO + +### 3.3. Affordances + +> +> +> +> +> The affordances of the environment are what it offers …​ what it provides or furnishes, either for good or ill. The verb 'to afford' is found in the dictionary, but the noun 'affordance' is not. I have made it up. +> +> +> +> + +— James J. Gibson + +> The Ecological Approach to Visual Perception (page 126) + +REST-based resources provide not just data but controls. +The last ingredient to form a flexible service are detailed **affordances** on how to use the various controls. +Because affordances are associated with links, Spring HATEOAS provides an API to attach as many related methods as needed to a link. +Just as you can create links by pointing to Spring MVC controller methods (see [ Building links in Spring MVC](#server.link-builder.webmvc) for details) you …​ + +The following code shows how to take a **self** link and associate two more affordances: + +Example 13. Connecting affordances to `GET /employees/{id}` + +``` +@GetMapping("/employees/{id}") +public EntityModel findOne(@PathVariable Integer id) { + + Class controllerClass = EmployeeController.class; + + // Start the affordance with the "self" link, i.e. this method. + Link findOneLink = linkTo(methodOn(controllerClass).findOne(id)).withSelfRel(); (1) + + // Return the affordance + a link back to the entire collection resource. + return EntityModel.of(EMPLOYEES.get(id), // + findOneLink // + .andAffordance(afford(methodOn(controllerClass).updateEmployee(null, id))) (2) + .andAffordance(afford(methodOn(controllerClass).partiallyUpdateEmployee(null, id)))); (3) +} +``` + +|**1**| Create the **self** link. | +|-----|--------------------------------------------------------------------| +|**2**| Associate the `updateEmployee` method with the `self` link. | +|**3**|Associate the `partiallyUpdateEmployee` method with the `self` link.| + +Using `.andAffordance(afford(…​))`, you can use the controller’s methods to connect a `PUT` and a `PATCH` operation to a `GET` operation. +Imagine that the related methods **afforded** above look like this: + +Example 14. `updateEmpoyee` method that responds to `PUT /employees/{id}` + +``` +@PutMapping("/employees/{id}") +public ResponseEntity updateEmployee( // + @RequestBody EntityModel employee, @PathVariable Integer id) +``` + +Example 15. `partiallyUpdateEmployee` method that responds to `PATCH /employees/{id}` + +``` +@PatchMapping("/employees/{id}") +public ResponseEntity partiallyUpdateEmployee( // + @RequestBody EntityModel employee, @PathVariable Integer id) +``` + +Pointing to those methods using the `afford(…)` methods will cause Spring HATEOAS to analyze the request body and response types and capture metadata to allow different media type implementations to use that information to translate that into descriptions of the input and outputs. + +#### 3.3.1. Building affordances manually + +While the primary way to register affordances for a link, it might be necessary to build some of them manually. +This can be achieved by using the `Affordances` API: + +Example 16. Using the `Affordances` API to manually register affordances + +``` +var methodInvocation = methodOn(EmployeeController.class).all(); + +var link = Affordances.of(linkTo(methodInvocation).withSelfRel()) (1) + + .afford(HttpMethod.POST) (2) + .withInputAndOutput(Employee.class) // + .withName("createEmployee") // + + .andAfford(HttpMethod.GET) (3) + .withOutput(Employee.class) // + .addParameters(// + QueryParameter.optional("name"), // + QueryParameter.optional("role")) // + .withName("search") // + + .toLink(); +``` + +|**1**| You start by creating an instance of `Affordances` from a `Link` instance creating the context for describing the affordances. | +|-----|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +|**2**|Each affordance starts with the HTTP method it’s supposed to support. We then register a type as payload description and name the affordance explicitly. The latter can be omitted and a default name will be derived from the HTTP method and input type name. This effectively creates the same affordance as the pointer to `EmployeeController.newEmployee(…)` created.| +|**3**| The next affordance is built to reflect what’s happening for the pointer to `EmployeeController.search(…)`. Here we define `Employee` to be the model for the response created and explicitly register `QueryParameter`s. | + +Affordances are backed by media type specific affordance models that translate the general affordance metadata into specific representations. +Please make sure to check the section on affordances in the [Media types](#mediatypes) section to find more details about how to control the exposure of that metadata. + +### 3.4. Forwarded header handling + +[RFC-7239 forwarding headers](https://tools.ietf.org/html/rfc7239) are most commonly used when your application is behind a proxy, behind a load balancer, or in the cloud. +The node that actually receives the web request is part of the infrastructure, and *forwards* the request to your application. + +Your application may be running on `localhost:8080`, but to the outside world you’re expected to be at `reallycoolsite.com` (and on the web’s standard port 80). +By having the proxy include extra headers (which many already do), Spring HATEOAS can generate links properly as it uses Spring Framework functionality to obtain the base URI of the original request. + +| |Anything that can change the root URI based on external inputs must be properly guarded.
    That’s why, by default, forwarded header handling is **disabled**.
    You MUST enable it to be operational.
    If you are deploying to the cloud or into a configuration where you control the proxies and load balancers, then you’ll certainly want to use this feature.| +|---|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +To enable forwarded header handling you need to register Spring’s `ForwardedHeaderFilter` for Spring MVC (details [here](https://docs.spring.io/spring/docs/current/spring-framework-reference/web.html#filters-forwarded-headers)) or `ForwardedHeaderTransformer` for Spring WebFlux (details [here](https://docs.spring.io/spring/docs/current/spring-framework-reference/web-reactive.html#webflux-forwarded-headers)) in your application. +In a Spring Boot application those components can be simply declared as Spring beans as described [here](https://docs.spring.io/spring-boot/docs/current/reference/html/boot-features-developing-web-applications.html#boot-features-embedded-container-servlets-filters-listeners-beans). + +Example 17. Registering a `ForwardedHeaderFilter` + +``` +@Bean +ForwardedHeaderFilter forwardedHeaderFilter() { + return new ForwardedHeaderFilter(); +} +``` + +This will create a servlet filter that processes all the `X-Forwarded-…` headers. +And it will register it properly with the servlet handlers. + +For a Spring WebFlux application, the reactive counterpart is `ForwardedHeaderTransformer`: + +Example 18. Registering a `ForwardedHeaderTransformer` + +``` +@Bean +ForwardedHeaderTransformer forwardedHeaderTransformer() { + return new ForwardedHeaderTransformer(); +} +``` + +This will create a function that transforms reactive web requests, processing `X-Forwarded-…` headers. +And it will register it properly with WebFlux. + +With configuration as shown above in place, a request passing `X-Forwarded-…` headers will see those reflected in the links generated: + +Example 19. A request using `X-Forwarded-…` headers + +``` +curl -v localhost:8080/employees \ + -H 'X-Forwarded-Proto: https' \ + -H 'X-Forwarded-Host: example.com' \ + -H 'X-Forwarded-Port: 9001' +``` + +Example 20. The corresponding response with the links generated to consider those headers + +``` +{ + "_embedded": { + "employees": [ + { + "id": 1, + "name": "Bilbo Baggins", + "role": "burglar", + "_links": { + "self": { + "href": "https://example.com:9001/employees/1" + }, + "employees": { + "href": "https://example.com:9001/employees" + } + } + } + ] + }, + "_links": { + "self": { + "href": "https://example.com:9001/employees" + }, + "root": { + "href": "https://example.com:9001" + } + } +} +``` + +### Using the EntityLinks interface + +| |`EntityLinks` and its various implementations are NOT currently provided out-of-the-box for Spring WebFlux applications.
    The contract defined in the `EntityLinks` SPI was originally aimed at Spring Web MVC and doesn’t consider Reactor types.
    Developing a comparable contract that supports reactive programming is still in progress.| +|---|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +So far, we have created links by pointing to the web framework implementations (that is, the Spring MVC controllers) and inspected the mapping. +In many cases, these classes essentially read and write representations backed by a model class. + +The `EntityLinks` interface now exposes an API to look up a `Link` or `LinkBuilder` based on the model types. +The methods essentially return links that point either to the collection resource (such as `/people`) or to an item resource (such as `/people/1`). +The following example shows how to use `EntityLinks`: + +``` +EntityLinks links = …; +LinkBuilder builder = links.linkFor(Customer.class); +Link link = links.linkToItemResource(Customer.class, 1L); +``` + +`EntityLinks` is available via dependency injection by activating `@EnableHypermediaSupport` in your Spring MVC configuration. +This will cause a variety of default implementations of `EntityLinks` being registered. +The most fundamental one is `ControllerEntityLinks` that inspects SpringMVC controller classes. +If you want to register your own implementation of `EntityLinks`, check out [this section](#server.entity-links.spi). + +#### 3.5.1. EntityLinks based on Spring MVC controllers + +Activating entity links functionality causes all the Spring MVC controllers available in the current `ApplicationContext` to be inspected for the `@ExposesResourceFor(…)` annotation. +The annotation exposes which model type the controller manages. +Beyond that, we assume that you adhere to the following URI mapping setup and conventions: + +* A type level `@ExposesResourceFor(…)` declaring which entity type the controller exposes collection and item resources for. + +* A class level base mapping that represents the collection resource. + +* An additional method level mapping that extends the mapping to append an identifier as additional path segment. + +The following example shows an implementation of an `EntityLinks`-capable controller: + +``` +@Controller +@ExposesResourceFor(Order.class) (1) +@RequestMapping("/orders") (2) +class OrderController { + + @GetMapping (3) + ResponseEntity orders(…) { … } + + @GetMapping("{id}") (4) + ResponseEntity order(@PathVariable("id") … ) { … } +} +``` + +|**1**| The controller indicates it’s exposing collection and item resources for the entity `Order`. | +|-----|------------------------------------------------------------------------------------------------------------------------------------------| +|**2**| Its collection resource is exposed under `/orders` | +|**3**| That collection resource can handle `GET` requests. Add more methods for other HTTP methods at your convenience. | +|**4**|An additional controller method to handle a subordinate resource taking a path variable to expose an item resource, i.e. a single `Order`.| + +With this in place, when you enable `EntityLinks` `@EnableHypermediaSupport` in your Spring MVC configuration, you can create links to the controller as follows: + +``` +@Controller +class PaymentController { + + private final EntityLinks entityLinks; + + PaymentController(EntityLinks entityLinks) { (1) + this.entityLinks = entityLinks; + } + + @PutMapping(…) + ResponseEntity payment(@PathVariable Long orderId) { + + Link link = entityLinks.linkToItemResource(Order.class, orderId); (2) + … + } +} +``` + +|**1**|Inject `EntityLinks` made available by `@EnableHypermediaSupport` in your configuration.| +|-----|----------------------------------------------------------------------------------------| +|**2**| Use the APIs to build links by using the entity types instead of controller classes. | + +As you can see, you can refer to resources managing `Order` instances without referring to `OrderController` explicitly. + +#### 3.5.2. EntityLinks API in detail + +Fundamentally, `EntityLinks` allows to build `LinkBuilder`s and `Link` instances to collection and item resources of an entity type. +Methods starting with `linkFor…` will produce `LinkBuilder` instances for you to extend and augment with additional path segments, parameters, etc. +Methods starting with `linkTo` produce fully prepared `Link` instances. + +While for collection resources providing an entity type is sufficient, links to item resources will need an identifier provided. +This usually looks like this: + +Example 21. Obtaining a link to an item resource + +``` +entityLinks.linkToItemResource(order, order.getId()); +``` + +If you find yourself repeating those method calls the identifier extraction step can be pulled out into a reusable `Function` to be reused throughout different invocations: + +``` +Function idExtractor = Order::getId; (1) + +entityLinks.linkToItemResource(order, idExtractor); (2) +``` + +|**1**|The identifier extraction is externalized so that it can be held in a field or constant.| +|-----|----------------------------------------------------------------------------------------| +|**2**| The link lookup using the extractor. | + +##### TypedEntityLinks + +As controller implementations are often grouped around entity types, you’ll very often find yourself using the same extractor function (see [EntityLinks API in detail](#server.entity-links.api) for details) all over the controller class. +We can centralize the identifier extraction logic even more by obtaining a `TypedEntityLinks` instance providing the extractor once, so that the actual lookups don’t have to deal with the extraction anymore at all. + +Example 22. Using TypedEntityLinks + +``` +class OrderController { + + private final TypedEntityLinks links; + + OrderController(EntityLinks entityLinks) { (1) + this.links = entityLinks.forType(Order::getId); (2) + } + + @GetMapping + ResponseEntity someMethod(…) { + + Order order = … // lookup order + + Link link = links.linkToItemResource(order); (3) + } +} +``` + +|**1**| Inject an `EntityLinks` instance. | +|-----|------------------------------------------------------------------------------------------------| +|**2**|Indicate you’re going to look up `Order` instances with a certain identifier extractor function.| +|**3**| Look up item resource links based on a sole `Order` instance. | + +#### 3.5.3. EntityLinks as SPI + +The `EntityLinks` instance created by `@EnableHypermediaSupport` is of type `DelegatingEntityLinks` which will in turn pick up all other `EntityLinks` implementations available as beans in the `ApplicationContext`. +It’s registered as primary bean so that it’s always the sole injection candidate when you inject `EntityLinks` in general.`ControllerEntityLinks` is the default implementation that will be included in the setup, but users are free to implement and register their own implementations. +Making those available to the `EntityLinks` instance available for injection is a matter of registering your implementation as Spring bean. + +Example 23. Declaring a custom EntityLinks implementation + +``` +@Configuration +class CustomEntityLinksConfiguration { + + @Bean + MyEntityLinks myEntityLinks(…) { + return new MyEntityLinks(…); + } +} +``` + +An example for the extensibility of this mechanism is Spring Data REST’s [`RepositoryEntityLinks`](https://github.com/spring-projects/spring-data-rest/blob/3a0cba94a2cc8739375ecf24086da2f7c3bbf038/spring-data-rest-webmvc/src/main/java/org/springframework/data/rest/webmvc/support/RepositoryEntityLinks.java), which uses the repository mapping information to create links pointing to resources backed by Spring Data repositories. +At the same time, it even exposes additional lookup methods for other types of resources. +If you want to make use of these, simply inject `RepositoryEntityLinks` explicitly. + +### Representation model assembler + +As the mapping from an entity to a representation model must be used in multiple places, it makes sense to create a dedicated class responsible for doing so. The conversion contains very custom steps but also a few boilerplate steps: + +1. Instantiation of the model class + +2. Adding a link with a `rel` of `self` pointing to the resource that gets rendered. + +Spring HATEOAS now provides a `RepresentationModelAssemblerSupport` base class that helps reduce the amount of code you need to write. +The following example shows how to use it: + +``` +class PersonModelAssembler extends RepresentationModelAssemblerSupport { + + public PersonModelAssembler() { + super(PersonController.class, PersonModel.class); + } + + @Override + public PersonModel toModel(Person person) { + + PersonModel resource = createResource(person); + // … do further mapping + return resource; + } +} +``` + +| |`createResource(…​)` is code you write to instantiate a `PersonModel` object given a `Person` object. It should only focus on setting attributes, not populating `Links`.| +|---|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +Setting the class up as we did in the preceding example gives you the following benefits: + +* There are a handful of `createModelWithId(…)` methods that let you create an instance of the resource and have a `Link` with a rel of `self` added to it. The href of that link is determined by the configured controller’s request mapping plus the ID of the entity (for example, `/people/1`). + +* The resource type gets instantiated by reflection and expects a no-arg constructor. If you want to use a dedicated constructor or avoid the reflection performance overhead, you can override `instantiateModel(…)`. + +You can then use the assembler to either assemble a `RepresentationModel` or a `CollectionModel`. +The following example creates a `CollectionModel` of `PersonModel` instances: + +``` +Person person = new Person(…); +Iterable people = Collections.singletonList(person); + +PersonModelAssembler assembler = new PersonModelAssembler(); +PersonModel model = assembler.toModel(person); +CollectionModel model = assembler.toCollectionModel(people); +``` + +### 3.7. Representation Model Processors + +Sometimes you need to tweak and adjust hypermedia representations after they have been [assembled](#server.representation-model-assembler). + +A perfect example is when you have a controller that deals with order fulfillment, but you need to add links related to making payments. + +Imagine having your ordering system producing this type of hypermedia: + +``` +{ + "orderId" : "42", + "state" : "AWAITING_PAYMENT", + "_links" : { + "self" : { + "href" : "http://localhost/orders/999" + } + } +} +``` + +You wish to add a link so the client can make payment, but don’t want to mix details about your `PaymentController` into +the `OrderController`. + +Instead of polluting the details of your ordering system, you can write a `RepresentationModelProcessor` like this: + +``` +public class PaymentProcessor implements RepresentationModelProcessor> { (1) + + @Override + public EntityModel process(EntityModel model) { + + model.add( (2) + Link.of("/payments/{orderId}").withRel(LinkRelation.of("payments")) // + .expand(model.getContent().getOrderId())); + + return model; (3) + } +} +``` + +|**1**| This processor will only be applied to `EntityModel` objects. | +|-----|-------------------------------------------------------------------------------| +|**2**| Manipulate the existing `EntityModel` object by adding an unconditional link. | +|**3**|Return the `EntityModel` so it can be serialized into the requested media type.| + +Register the processor with your application: + +``` +@Configuration +public class PaymentProcessingApp { + + @Bean + PaymentProcessor paymentProcessor() { + return new PaymentProcessor(); + } +} +``` + +Now when you issue a hypermedia respresentation of an `Order`, the client receives this: + +``` +{ + "orderId" : "42", + "state" : "AWAITING_PAYMENT", + "_links" : { + "self" : { + "href" : "http://localhost/orders/999" + }, + "payments" : { (1) + "href" : "/payments/42" (2) + } + } +} +``` + +|**1**|You see the `LinkRelation.of("payments")` plugged in as this link’s relation.| +|-----|-----------------------------------------------------------------------------| +|**2**| The URI was provided by the processor. | + +This example is quite simple, but you can easily: + +* Use `WebMvcLinkBuilder` or `WebFluxLinkBuilder` to construct a dynamic link to your `PaymentController`. + +* Inject any services needed to conditionally add other links (e.g. `cancel`, `amend`) that are driven by state. + +* Leverage cross cutting services like Spring Security to add, remove, or revise links based upon the current user’s context. + +Also, in this example, the `PaymentProcessor` alters the provided `EntityModel`. You also have the power to*replace* it with another object. Just be advised the API requires the return type to equal the input type. + +### Using the `LinkRelationProvider` API + +When building links, you usually need to determine the relation type to be used for the link. In most cases, the relation type is directly associated with a (domain) type. We encapsulate the detailed algorithm to look up the relation types behind a `LinkRelationProvider` API that lets you determine the relation types for single and collection resources. The algorithm for looking up the relation type follows: + +1. If the type is annotated with `@Relation`, we use the values configured in the annotation. + +2. If not, we default to the uncapitalized simple class name plus an appended `List` for the collection `rel`. + +3. If the [EVO inflector](https://github.com/atteo/evo-inflector) JAR is in the classpath, we use the plural of the single resource `rel` provided by the pluralizing algorithm. + +4. `@Controller` classes annotated with `@ExposesResourceFor` (see [ Using the EntityLinks interface](#server.entity-links) for details) transparently look up the relation types for the type configured in the annotation, so that you can use `LinkRelationProvider.getItemResourceRelFor(MyController.class)` and get the relation type of the domain type exposed. + +A `LinkRelationProvider` is automatically exposed as a Spring bean when you use `@EnableHypermediaSupport`. You can plug in custom providers by implementing the interface and exposing them as Spring beans in turn. + +## 4. Media types + +### 4.1. HAL – Hypertext Application Language + +[JSON Hypertext Application Language](https://tools.ietf.org/html/draft-kelly-json-hal-08) or HAL is one of the simplest +and most widely adopted hypermedia media types adopted when not discussing specific web stacks. + +It was the first spec-based media type adopted by Spring HATEOAS. + +#### 4.1.1. Building HAL representation models + +As of Spring HATEOAS 1.1, we ship a dedicated `HalModelBuilder` that allows to create `RepresentationModel` instances through a HAL-idiomatic API. +These are its fundamental assumptions: + +1. A HAL representation can be backed by an arbitrary object (an entity) that builds up the domain fields contained in the representation. + +2. The representation can be enriched by a variety of embedded documents, which can be either arbitrary objects or HAL representations themselves (i.e. containing nested embeddeds and links). + +3. Certain HAL specific patterns (e.g. previews) can be directly used in the API so that the code setting up the representation reads like you’d describe a HAL representation following those idioms. + +Here’s an example of the API used: + +``` +// An order +var order = new Order(…); (1) + +// The customer who placed the order +var customer = customer.findById(order.getCustomerId()); + +var customerLink = Link.of("/orders/{id}/customer") (2) + .expand(order.getId()) + .withRel("customer"); + +var additional = … + +var model = HalModelBuilder.halModelOf(order) + .preview(new CustomerSummary(customer)) (3) + .forLink(customerLink) (4) + .embed(additional) (5) + .link(Link.of(…, IanaLinkRelations.SELF)); + .build(); +``` + +|**1**| We set up some domain type. In this case, an order that has a relationship to the customer that placed it. | +|-----|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +|**2**| We prepare a link pointing to a resource that will expose customer details | +|**3**| We start building a preview by providing the payload that’s supposed to be rendered inside the `_embeddable` clause. | +|**4**| We conclude that preview by providing the target link. It transparently gets added to the `_links` object and its link relation is used as the key for the object provided in the previous step. | +|**5**|Other objects can be added to show up under `_embedded`.
    The key under which they’re listed is derived from the objects relation settings. They’re customizable via `@Relation` or a dedicated `LinkRelationProvider` (see [ Using the `LinkRelationProvider` API](#server.rel-provider) for details).| + +``` +{ + "_links" : { + "self" : { "href" : "…" }, (1) + "customer" : { "href" : "/orders/4711/customer" } (2) + }, + "_embedded" : { + "customer" : { … }, (3) + "additional" : { … } (4) + } +} +``` + +|**1**| The `self` link as explicitly provided. | +|-----|--------------------------------------------------------------------------| +|**2**|The `customer` link transparently added through `….preview(…).forLink(…)`.| +|**3**| The preview object provided. | +|**4**| Additional elements added via explicit `….embed(…)`. | + +In HAL `_embedded` is also used to represent top collections. +They’re usually grouped under the link relation derived from the object’s type. +I.e. a list of orders would look like this in HAL: + +``` +{ + "_embedded" : { + "orders : [ + … (1) + ] + } +} +``` + +|**1**|Individual order documents go here.| +|-----|-----------------------------------| + +Creating such a representation is as easy as this: + +``` +Collection orders = …; + +HalModelBuilder.emptyHalDocument() + .embed(orders); +``` + +That said, if the order is empty, there’s no way to derive the link relation to appear inside `_embedded`, so that the document will stay empty if the collection is empty. + +If you prefer to explicitly communicate an empty collection, a type can be handed into the overload of the `….embed(…)` method taking a `Collection`. +If the collection handed into the method is empty, this will cause a field rendered with its link relation derived from the given type. + +``` +HalModelBuilder.emptyHalModel() + .embed(Collections.emptyList(), Order.class); + // or + .embed(Collections.emptyList(), LinkRelation.of("orders")); +``` + +will create the following, more explicit representation. + +``` +{ + "_embedded" : { + "orders" : [] + } +} +``` + +#### 4.1.2. Configuring link rendering + +In HAL, the `_links` entry is a JSON object. The property names are [link relations](#fundamentals.link-relations) and +each value is either [a link object or an array of link objects](https://tools.ietf.org/html/draft-kelly-json-hal-07#section-4.1.1). + +For a given link relation that has two or more links, the spec is clear on representation: + +Example 24. HAL document with two links associated with one relation + +``` +{ + "_links": { + "item": [ + { "href": "https://myhost/cart/42" }, + { "href": "https://myhost/inventory/12" } + ] + }, + "customer": "Dave Matthews" +} +``` + +But if there is only one link for a given relation, the spec is ambiguous. You could render that as either a single object +or as a single-item array. + +By default, Spring HATEOAS uses the most terse approach and renders a single-link relation like this: + +Example 25. HAL document with single link rendered as an object + +``` +{ + "_links": { + "item": { "href": "https://myhost/inventory/12" } + }, + "customer": "Dave Matthews" +} +``` + +Some users prefer to not switch between arrays and objects when consuming HAL. They would prefer this type of rendering: + +Example 26. HAL with single link rendered as an array + +``` +{ + "_links": { + "item": [{ "href": "https://myhost/inventory/12" }] + }, + "customer": "Dave Matthews" +} +``` + +If you wish to customize this policy, all you have to do is inject a `HalConfiguration` bean into your application configuration. +There are multiple choices. + +Example 27. Global HAL single-link rendering policy + +``` +@Bean +public HalConfiguration globalPolicy() { + return new HalConfiguration() // + .withRenderSingleLinks(RenderSingleLinks.AS_ARRAY); (1) +} +``` + +|**1**|Override Spring HATEOAS’s default by rendering ALL single-link relations as arrays.| +|-----|-----------------------------------------------------------------------------------| + +If you prefer to only override some particular link relations, you can create a `HalConfiguration`bean like this: + +Example 28. Link relation-based HAL single-link rendering policy + +``` +@Bean +public HalConfiguration linkRelationBasedPolicy() { + return new HalConfiguration() // + .withRenderSingleLinksFor( // + IanaLinkRelations.ITEM, RenderSingleLinks.AS_ARRAY) (1) + .withRenderSingleLinksFor( // + LinkRelation.of("prev"), RenderSingleLinks.AS_SINGLE); (2) +} +``` + +|**1**| Always render `item` link relations as an array. | +|-----|----------------------------------------------------------------------| +|**2**|Render `prev` link relations as an object when there is only one link.| + +If neither of these match your needs, you can use an Ant-style path pattern: + +Example 29. Pattern-based HAL single-link rendering policy + +``` +@Bean +public HalConfiguration patternBasedPolicy() { + return new HalConfiguration() // + .withRenderSingleLinksFor( // + "http*", RenderSingleLinks.AS_ARRAY); (1) +} +``` + +|**1**|Render all link relations that start with `http` as an array.| +|-----|-------------------------------------------------------------| + +| |The pattern-based approach uses Spring’s `AntPathMatcher`.| +|---|----------------------------------------------------------| + +All of these `HalConfiguration` withers can be combined to form one comprehensive policy. Be sure to test your API +extensively to avoid surprises. + +#### 4.1.3. Link title internationalization + +HAL defines a `title` attribute for its link objects. +These titles can be populated by using Spring’s resource bundle abstraction and a resource bundle named `rest-messages` so that clients can use them in their UIs directly. +This bundle will be set up automatically and is used during HAL link serialization. + +To define a title for a link, use the key template `_links.$relationName.title` as follows: + +Example 30. A sample `rest-messages.properties` + +``` +_links.cancel.title=Cancel order +_links.payment.title=Proceed to checkout +``` + +This will result in the following HAL representation: + +Example 31. A sample HAL document with link titles defined + +``` +{ + "_links" : { + "cancel" : { + "href" : "…" + "title" : "Cancel order" + }, + "payment" : { + "href" : "…" + "title" : "Proceed to checkout" + } + } +} +``` + +#### Using the `CurieProvider` API + +The [Web Linking RFC](https://tools.ietf.org/html/rfc8288#section-2.1) describes registered and extension link relation types. Registered rels are well-known strings registered with the [IANA registry of link relation types](https://www.iana.org/assignments/link-relations/link-relations.xhtml). Extension `rel` URIs can be used by applications that do not wish to register a relation type. Each one is a URI that uniquely identifies the relation type. The `rel` URI can be serialized as a compact URI or [Curie](https://www.w3.org/TR/curie). For example, a curie of `ex:persons` stands for the link relation type `[example.com/rels/persons](https://example.com/rels/persons)` if `ex` is defined as `[example.com/rels/{rel}](https://example.com/rels/{rel})`. If curies are used, the base URI must be present in the response scope. + +The `rel` values created by the default `RelProvider` are extension relation types and, as a result, must be URIs, which can cause a lot of overhead. The `CurieProvider` API takes care of that: It lets you define a base URI as a URI template and a prefix that stands for that base URI. If a `CurieProvider` is present, the `RelProvider` prepends all `rel` values with the curie prefix. Furthermore a `curies` link is automatically added to the HAL resource. + +The following configuration defines a default curie provider: + +``` +@Configuration +@EnableWebMvc +@EnableHypermediaSupport(type= {HypermediaType.HAL}) +public class Config { + + @Bean + public CurieProvider curieProvider() { + return new DefaultCurieProvider("ex", new UriTemplate("https://www.example.com/rels/{rel}")); + } +} +``` + +Note that now the `ex:` prefix automatically appears before all rel values that are not registered with IANA, as in `ex:orders`. Clients can use the `curies` link to resolve a curie to its full form. +The following example shows how to do so: + +``` +{ + "_links": { + "self": { + "href": "https://myhost/person/1" + }, + "curies": { + "name": "ex", + "href": "https://example.com/rels/{rel}", + "templated": true + }, + "ex:orders": { + "href": "https://myhost/person/1/orders" + } + }, + "firstname": "Dave", + "lastname": "Matthews" +} +``` + +Since the purpose of the `CurieProvider` API is to allow for automatic curie creation, you can define only one `CurieProvider` bean per application scope. + +### 4.2. HAL-FORMS + +[HAL-FORMS](https://rwcbook.github.io/hal-forms/) is designed to add runtime FORM support to the [HAL media type](#mediatypes.hal). + +> +> +> +> +> HAL-FORMS "looks like HAL." However, it is important to keep in mind that HAL-FORMS is not the same as HAL — the two +> should not be thought of as interchangeable in any way. +> +> +> +> + +— Mike Amundsen + +> HAL-FORMS spec + +To enable this media type, put the following configuration in your code: + +Example 32. HAL-FORMS enabled application + +``` +@Configuration +@EnableHypermediaSupport(type = HypermediaType.HAL_FORMS) +public class HalFormsApplication { + +} +``` + +Anytime a client supplies an `Accept` header with `application/prs.hal-forms+json`, you can expect something like this: + +Example 33. HAL-FORMS sample document + +``` +{ + "firstName" : "Frodo", + "lastName" : "Baggins", + "role" : "ring bearer", + "_links" : { + "self" : { + "href" : "http://localhost:8080/employees/1" + } + }, + "_templates" : { + "default" : { + "method" : "put", + "contentType" : "", + "properties" : [ { + "name" : "firstName", + "required" : true + }, { + "name" : "lastName", + "required" : true + }, { + "name" : "role", + "required" : true + } ] + }, + "partiallyUpdateEmployee" : { + "method" : "patch", + "contentType" : "", + "properties" : [ { + "name" : "firstName", + "required" : false + }, { + "name" : "lastName", + "required" : false + }, { + "name" : "role", + "required" : false + } ] + } + } +} +``` + +Check out the [HAL-FORMS spec](https://rwcbook.github.io/hal-forms/) to understand the details of the **\_templates** attribute. +Read about the [Affordances API](#server.affordances) to augment your controllers with this extra metadata. + +As for single-item (`EntityModel`) and aggregate root collections (`CollectionModel`), Spring HATEOAS renders them +identically to [HAL documents](#mediatypes.hal). + +#### 4.2.1. Defining HAL-FORMS metadata + +HAL-FORMS allows to describe criterias for each form field. +Spring HATEOAS allows to customize those by shaping the model type for the input and output types and using annotations on them. + +|Attribute | Description | +|----------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +|`readOnly`|Set to `true` if there’s no setter method for the property. If that is present, use Jackson’s `@JsonProperty(Access.READ_ONLY)` on the accessors or field explicitly. Not rendered by default, thus defaulting to `false`.| +| `regex` | Can be customized by using JSR-303’s `@Pattern` annotation either on the field or a type. In case of the latter the pattern will be used for every property declared as that particular type. Not rendered by default. | +|`required`| Can be customized by using JSR-303’s `@NotNull`. Not rendered by default and thus defaulting to `false`. Templates using `PATCH` as method will automatically have all properties set to not required. | + +For types that you cannot annotate manually, you can register a custom pattern via a `HalFormsConfiguration` bean present in the application context. + +``` +@Configuration +class CustomConfiguration { + + @Bean + HalFormsConfiguration halFormsConfiguration() { + + HalFormsConfiguration configuration = new HalFormsConfiguration(); + configuration.registerPatternFor(CreditCardNumber.class, "[0-9]{16}"); + } +} +``` + +This setup will cause the HAL-FORMS template properties for representation model properties of type `CreditCardNumber` to declare a `regex` field with value `[0-9]{16}`. + +#### 4.2.2. Internationalization of form attributes + +HAL-FORMS contains attributes that are intended for human interpretation, like a template’s title or property prompts. +These can be defined and internationalized using Spring’s resource bundle support and the `rest-messages` resource bundle configured by Spring HATEOAS by default. + +##### Template titles + +To define a template title use the following pattern: `_templates.$affordanceName.title`. Note that in HAL-FORMS, the name of a template is `default` if it is the only one. +This means that you’ll usually have to qualify the key with the local or fully qualified input type name that affordance describes. + +Example 34. Defining HAL-FORMS template titles + +``` +_templates.default.title=Some title (1) +_templates.putEmployee.title=Create employee (2) +Employee._templates.default.title=Create employee (3) +com.acme.Employee._templates.default.title=Create employee (4) +``` + +|**1**| A global definition for the title using `default` as key. | +|-----|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +|**2**|A global definition for the title using the actual affordance name as key. Unless defined explicitly when creating the affordance, this defaults to `$httpMethod + $simpleInputTypeName`.| +|**3**| A locally defined title to be applied to all types named `Employee`. | +|**4**| A title definition using the fully-qualified type name. | + +| |Keys using the actual affordance name enjoy preference over the defaulted ones.| +|---|-------------------------------------------------------------------------------| + +##### Property prompts + +Property prompts can also be resolved via the `rest-messages` resource bundle automatically configured by Spring HATEOAS. +The keys can be defined globally, locally or fully-qualified and need an `._prompt` concatenated to the actual property key: + +Example 35. Defining prompts for an `email` property + +``` +firstName._prompt=Firstname (1) +Employee.firstName._prompt=Firstname (2) +com.acme.Employee.firstName._prompt=Firstname (3) +``` + +|**1**|All properties named `firstName` will get "Firstname" rendered, independent of the type they’re declared in.| +|-----|------------------------------------------------------------------------------------------------------------| +|**2**| The `firstName` property in types named `Employee` will be prompted "Firstname". | +|**3**| The `firstName` property of `com.acme.Employee` will get a prompt of "Firstname" assigned. | + +A sample document with both template titles and property prompts defined would then look something like this: + +Example 36. A sample HAL-FORMS document with internationalized template titles and property prompts + +``` +{ + …, + "_templates" : { + "default" : { + "title" : "Create employee", + "method" : "put", + "contentType" : "", + "properties" : [ { + "name" : "firstName", + "prompt" : "Firstname", + "required" : true + }, { + "name" : "lastName", + "prompt" : "Lastname", + "required" : true + }, { + "name" : "role", + "prompt" : "Role", + "required" : true + } ] + } + } +} +``` + +### 4.3. HTTP Problem Details + +[Problem Details for HTTP APIs](https://tools.ietf.org/html/rfc7807) is a media type to carry machine-readable details of errors in a HTTP response to avoid the need to define new error response formats for HTTP APIs. + +HTTP Problem Details defines a set of JSON properties that carry additional information to describe error details to HTTP clients. +Find more details about those properties in particular in the relevant section of the [RFC document](https://tools.ietf.org/html/rfc7807#section-3.1). + +You can create such a JSON response by using the `Problem` media type domain type in your Spring MVC Controller: + +Reporting problem details using Spring HATEOAS' `Problem` type + +``` +@RestController +class PaymentController { + + @PutMapping + ResponseEntity issuePayment(@RequestBody PaymentRequest request) { + + PaymentResult result = payments.issuePayment(request.orderId, request.amount); + + if (result.isSuccess()) { + return ResponseEntity.ok(result); + } + + String title = messages.getMessage("payment.out-of-credit"); + String detail = messages.getMessage("payment.out-of-credit.details", // + new Object[] { result.getBalance(), result.getCost() }); + + Problem problem = Problem.create() (1) + .withType(OUT_OF_CREDIT_URI) // + .withTitle(title) (2) + .withDetail(detail) // + .withInstance(PAYMENT_ERROR_INSTANCE.expand(result.getPaymentId())) // + .withProperties(map -> { (3) + map.put("balance", result.getBalance()); + map.put("accounts", Arrays.asList( // + ACCOUNTS.expand(result.getSourceAccountId()), // + ACCOUNTS.expand(result.getTargetAccountId()) // + )); + }); + + return ResponseEntity.status(HttpStatus.FORBIDDEN) // + .body(problem); + } +} +``` + +|**1**| You start by creating an instance of `Problem` using the factory methods exposed. | +|-----|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +|**2**|You can define the values for the default properties defined by the media type, e.g. the type URI, the title and details using internationalization features of Spring (see above).| +|**3**| Custom properties can be added via a `Map` or an explicit object (see below). | + +To use a dedicated object for custom properties, declare a type, create and populate an instance of it and hand this into the `Problem` instance either via `….withProperties(…)` or on instance creation via `Problem.create(…)`. + +Using a dedicated type to capture extended problem properties + +``` +class AccountDetails { + int balance; + List accounts; +} + +problem.withProperties(result.getDetails()); + +// or + +Problem.create(result.getDetails()); +``` + +This will result in a response looking like this: + +A sample HTTP Problem Details response + +``` +{ + "type": "https://example.com/probs/out-of-credit", + "title": "You do not have enough credit.", + "detail": "Your current balance is 30, but that costs 50.", + "instance": "/account/12345/msgs/abc", + "balance": 30, + "accounts": ["/account/12345", + "/account/67890"] +} +``` + +### 4.4. Collection+JSON + +[Collection+JSON](http://amundsen.com/media-types/collection/format/) is a JSON spec registered with IANA-approved media type `application/vnd.collection+json`. + +> +> +> +> +> [Collection+JSON](http://amundsen.com/media-types/collection/) is a JSON-based read/write hypermedia-type designed to support +> management and querying of simple collections. +> +> +> +> + +— Mike Amundsen + +> Collection+JSON spec + +Collection+JSON provides a uniform way to represent both single item resources as well as collections. +To enable this media type, put the following configuration in your code: + +Example 37. Collection+JSON enabled application + +``` +@Configuration +@EnableHypermediaSupport(type = HypermediaType.COLLECTION_JSON) +public class CollectionJsonApplication { + +} +``` + +This configuration will make your application respond to requests that have an `Accept` header of `application/vnd.collection+json`as shown below. + +The following example from the spec shows a single item: + +Example 38. Collection+JSON single item example + +``` +{ + "collection": { + "version": "1.0", + "href": "https://example.org/friends/", (1) + "links": [ (2) + { + "rel": "feed", + "href": "https://example.org/friends/rss" + }, + { + "rel": "queries", + "href": "https://example.org/friends/?queries" + }, + { + "rel": "template", + "href": "https://example.org/friends/?template" + } + ], + "items": [ (3) + { + "href": "https://example.org/friends/jdoe", + "data": [ (4) + { + "name": "fullname", + "value": "J. Doe", + "prompt": "Full Name" + }, + { + "name": "email", + "value": "[email protected]", + "prompt": "Email" + } + ], + "links": [ (5) + { + "rel": "blog", + "href": "https://examples.org/blogs/jdoe", + "prompt": "Blog" + }, + { + "rel": "avatar", + "href": "https://examples.org/images/jdoe", + "prompt": "Avatar", + "render": "image" + } + ] + } + ] + } +} +``` + +|**1**| The `self` link is stored in the document’s `href` attribute. | +|-----|---------------------------------------------------------------------------------------------------------------| +|**2**| The document’s top `links` section contains collection-level links (minus the `self` link). | +|**3**|The `items` section contains a collection of data. Since this is a single-item document, it only has one entry.| +|**4**| The `data` section contains actual content. It’s made up of properties. | +|**5**| The item’s individual `links`. | + +| |The previous fragment was lifted from the spec. When Spring HATEOAS renders an `EntityModel`, it will:

    * Put the `self` link into both the document’s `href` attribute and the item-level `href` attribute.

    * Put the rest of the model’s links into both the top-level `links` as well as the item-level `links`.

    * Extract the properties from the `EntityModel` and turn them into …​| +|---|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +When rendering a collection of resources, the document is almost the same, except there will be multiple entries inside +the `items` JSON array, one for each entry. + +Spring HATEOAS more specifically will: + +* Put the entire collection’s `self` link into the top-level `href` attribute. + +* The `CollectionModel` links (minus `self`) will be put into the top-level `links`. + +* Each item-level `href` will contain the corresponding `self` link for each entry from the `CollectionModel.content` collection. + +* Each item-level `links` will contain all other links for each entry from `CollectionModel.content`. + +### 4.5. UBER - Uniform Basis for Exchanging Representations + +[UBER](https://rawgit.com/uber-hypermedia/specification/master/uber-hypermedia.html) is an experimental JSON spec + +> +> +> +> +> The UBER document format is a minimal read/write hypermedia type designed to support simple state transfers and ad-hoc +> hypermedia-based transitions. +> +> +> +> + +— Mike Amundsen + +> UBER spec + +UBER provides a uniform way to represent both single item resources as well as collections. To enable this media type, put the following configuration in your code: + +Example 39. UBER+JSON enabled application + +``` +@Configuration +@EnableHypermediaSupport(type = HypermediaType.UBER) +public class UberApplication { + +} +``` + +This configuration will make your application respond to requests using the `Accept` header `application/vnd.amundsen-uber+json`as show below: + +Example 40. UBER sample document + +``` +{ + "uber" : { + "version" : "1.0", + "data" : [ { + "rel" : [ "self" ], + "url" : "/employees/1" + }, { + "name" : "employee", + "data" : [ { + "name" : "role", + "value" : "ring bearer" + }, { + "name" : "name", + "value" : "Frodo" + } ] + } ] + } +} +``` + +This media type is still under development as is the spec itself. Feel free to[open a ticket](https://github.com/spring-projects/spring-hateoas/issues) if you run into issues using it. + +| |**UBER media type** is not associated in any way with **Uber Technologies Inc.**, the ride sharing company.| +|---|-----------------------------------------------------------------------------------------------------------| + +### 4.6. ALPS - Application-Level Profile Semantics + +[ALPS](https://tools.ietf.org/html/draft-amundsen-richardson-foster-alps-01) is a media type for providing +profile-based metadata about another resource. + +> +> +> +> +> An ALPS document can be used as a profile to +> explain the application semantics of a document with an application- +> agnostic media type (such as HTML, HAL, Collection+JSON, Siren, +> etc.). This increases the reusability of profile documents across +> media types. +> +> +> +> + +— Mike Amundsen + +> ALPS spec + +ALPS requires no special activation. Instead you "build" an `Alps` record and return it from either a Spring MVC or a Spring WebFlux web method as shown below: + +Example 41. Building an `Alps` record + +``` +@GetMapping(value = "/profile", produces = ALPS_JSON_VALUE) +Alps profile() { + + return Alps.alps() // + .doc(doc() // + .href("https://example.org/samples/full/doc.html") // + .value("value goes here") // + .format(Format.TEXT) // + .build()) // + .descriptor(getExposedProperties(Employee.class).stream() // + .map(property -> Descriptor.builder() // + .id("class field [" + property.getName() + "]") // + .name(property.getName()) // + .type(Type.SEMANTIC) // + .ext(Ext.builder() // + .id("ext [" + property.getName() + "]") // + .href("https://example.org/samples/ext/" + property.getName()) // + .value("value goes here") // + .build()) // + .rt("rt for [" + property.getName() + "]") // + .descriptor(Collections.singletonList(Descriptor.builder().id("embedded").build())) // + .build()) // + .collect(Collectors.toList())) + .build(); +} +``` + +* This example leverages `PropertyUtils.getExposedProperties()` to extract metadata about the domain object’s attributes. + +This fragment has test data plugged in. It yields JSON like this: + +Example 42. ALPS JSON + +``` +{ + "version": "1.0", + "doc": { + "format": "TEXT", + "href": "https://example.org/samples/full/doc.html", + "value": "value goes here" + }, + "descriptor": [ + { + "id": "class field [name]", + "name": "name", + "type": "SEMANTIC", + "descriptor": [ + { + "id": "embedded" + } + ], + "ext": { + "id": "ext [name]", + "href": "https://example.org/samples/ext/name", + "value": "value goes here" + }, + "rt": "rt for [name]" + }, + { + "id": "class field [role]", + "name": "role", + "type": "SEMANTIC", + "descriptor": [ + { + "id": "embedded" + } + ], + "ext": { + "id": "ext [role]", + "href": "https://example.org/samples/ext/role", + "value": "value goes here" + }, + "rt": "rt for [role]" + } + ] +} +``` + +Instead of linking each field "automatically" to a domain object’s fields, you can write them by hand if you like. It’s also possible +to use Spring Framework’s message bundles and the `MessageSource` interface. This gives you the ability to delegate these values to +locale-specific message bundles and even internationalize the metadata. + +### 4.7. Community-based media types + +Thanks to the [ability to create your own media type](#mediatypes.custom), there are several community-led efforts to build additional media types. + +#### 4.7.1. JSON:API + +* [Specification](https://jsonapi.org) + +* Media type designation: `application/vnd.api+json` + +* Latest Release + + * [Reference documentation](https://toedter.github.io/spring-hateoas-jsonapi/release/reference/index.html) + + * [API documentation](https://toedter.github.io/spring-hateoas-jsonapi/release/api/index.html) + +* Current Snapshot + + * [Reference documentation](https://toedter.github.io/spring-hateoas-jsonapi/snapshot/reference/index.html) + + * [API documentation](https://toedter.github.io/spring-hateoas-jsonapi/snapshot/api/index.html) + +* [Source](https://github.com/toedter/spring-hateoas-jsonapi) + +* Project Lead: [Kai Toedter](https://github.com/toedter) + +Maven coordinates + +``` + + com.toedter + spring-hateoas-jsonapi + {see project page for current version} + +``` + +Gradle coordinates + +``` +implementation 'com.toedter:spring-hateoas-jsonapi:{see project page for current version}' +``` + +Visit the project page for more details if you want snapshot releases. + +#### 4.7.2. Siren + +* [Specification](https://github.com/kevinswiber/siren) + +* Media type designation: `application/vnd.siren+json` + +* [Reference documentation](https://spring-hateoas-siren.ingogriebsch.de) + +* [javadocs](https://spring-hateoas-siren.ingogriebsch.de/apidocs) + +* [Source](https://github.com/ingogriebsch/spring-hateoas-siren) + +* Project Lead: [Ingo Griebsch](https://github.com/ingogriebsch) + +Maven coordinates + +``` + + de.ingogriebsch.hateoas + spring-hateoas-siren + {see project page for current version} + compile + +``` + +Gradle coordinates + +``` +implementation 'de.ingogriebsch.hateoas:spring-hateoas-siren:{see project page for current version}' +``` + +### 4.8. Registering a custom media type + +Spring HATEOAS allows you to integrate custom media types through an SPI. +The building blocks of such an implementation are: + +1. Some form of Jackson `ObjectMapper` customization. In its most simple case that’s a Jackson `Module` implementation. + +2. A `LinkDiscoverer` implementation so that the client-side support is able to detect links in representations. + +3. A small bit of infrastructure configuration that will allow Spring HATEOAS to find the custom implementation and pick it up. + +#### 4.8.1. Custom media type configuration + +Custom media type implementations are picked up by Spring HATEOAS by scanning the application context for any implementations of the `HypermediaMappingInformation` interface. +Each media type must implement this interface in order to: + +* Be applied to [`WebClient`](#client.web-client), [`WebTestClient`](#client.web-test-client), or [`RestTemplate`](#client.rest-template) instances. + +* Support serving that media type from Spring Web MVC and Spring WebFlux controllers. + +To define your own media type could look as simple as this: + +``` +@Configuration +public class MyMediaTypeConfiguration implements HypermediaMappingInformation { + + @Override + public List getMediaTypes() { + return MediaType.parse("application/vnd-acme-media-type") (1) + } + + @Override + public Module getJacksonModule() { + return new Jackson2MyMediaTypeModule(); (2) + } + + @Bean + MyLinkDiscoverer myLinkDiscoverer() { + return new MyLinkDiscoverer(); (3) + } +} +``` + +|**1**|The configuration class returns the media type it supports. This applies to both server-side and client-side scenarios.| +|-----|-----------------------------------------------------------------------------------------------------------------------| +|**2**| It overrides `getJacksonModule()` to provide custom serializers to create the media type specific representations. | +|**3**| It also declares a custom `LinkDiscoverer` implementation for further client-side support. | + +The Jackson module usually declares `Serializer` and `Deserializer` implementations for the representation model types `RepresentationModel`, `EntityModel`, `CollectionModel` and `PagedModel`. +In case you need further customization of the Jackson `ObjectMapper` (like a custom `HandlerInstantiator`), you can alternatively override `configureObjectMapper(…)`. + +| |Prior versions of reference documentation has mentioned implementing the `MediaTypeConfigurationProvider` interface and registering it with `spring.factories`.
    This is NOT necessary.
    This SPI is ONLY used for out-of-the-box media types provided by Spring HATEOAS.
    Merely implementing the `HypermediaMappingInformation` interface and registering it as a Spring bean is all that’s needed.| +|---|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +#### 4.8.2. Recommendations + +The preferred way to implement media type representations is by providing a type hierarchy that matches the expected format and can be serialized by Jackson as is. +In the `Serializer` and `Deserializer` implementations registered for `RepresentationModel`, convert the instances into the media type-specific model types and then lookup the Jackson serializer for those. + +The media types supported by default use the same configuration mechanism as third-party implementations would do. +So it’s worth studying the implementations in [the `mediatype` package](https://github.com/spring-projects/spring-hateoas/tree/master/src/main/java/org/springframework/hateoas/mediatype). +Note, that the built in media type implementations keep their configuration classes package private, as they’re activated via `@EnableHypermediaSupport`. +Custom implementations should probably make those public instead to make sure, users can import those configuration classes from their application packages. + +## 5. Configuration + +This section describes how to configure Spring HATEOAS. + +### 5.1. Using `@EnableHypermediaSupport` + +To let the `RepresentationModel` subtypes be rendered according to the specification of various hypermedia representation types, you can activate support for a particular hypermedia representation format through `@EnableHypermediaSupport`. The annotation takes a `HypermediaType` enumeration as its argument. Currently, we support [HAL](https://tools.ietf.org/html/draft-kelly-json-hal) as well as a default rendering. Using the annotation triggers the following: + +* It registers necessary Jackson modules to render `EntityModel` and `CollectionModel` in the hypermedia specific format. + +* If JSONPath is on the classpath, it automatically registers a `LinkDiscoverer` instance to look up links by their `rel` in plain JSON representations (see [Using `LinkDiscoverer` Instances](#client.link-discoverer)). + +* By default, it enables [entity links](#fundamentals.obtaining-links.entity-links) and automatically picks up `EntityLinks` implementations and bundles them into a `DelegatingEntityLinks` instance that you can autowire. + +* It automatically picks up all `RelProvider` implementations in the `ApplicationContext` and bundles them into a `DelegatingRelProvider` that you can autowire. It registers providers to consider `@Relation` on domain types as well as Spring MVC controllers. If the [EVO inflector](https://github.com/atteo/evo-inflector) is on the classpath, collection `rel` values are derived by using the pluralizing algorithm implemented in the library (see [[spis.rel-provider]](#spis.rel-provider)). + +#### 5.1.1. Explicitly enabling support for dedicated web stacks + +By default, `@EnableHypermediaSupport` will reflectively detect the web application stack you’re using and hook into the Spring components registered for those to enable support for hypermedia representations. +However, there are situations in which you’d only explicitly want to activate support for a particular stack. +E.g. if your Spring WebMVC based application uses WebFlux' `WebClient` to make outgoing requests and that one is not supposed to work with hypermedia elements, you can restrict the functionality to be enabled by explicitly declaring WebMVC in the configuration: + +Example 43. Explicitly activating hypermedia support for a particular web stack + +``` +@EnableHypermediaSupport(…, stacks = WebStack.WEBMVC) +class MyHypermediaConfiguration { … } +``` + +## 6. Client-side Support + +This section describes Spring HATEOAS’s support for clients. + +### 6.1. Traverson + +Spring HATEOAS provides an API for client-side service traversal. It is inspired by the [Traverson JavaScript library](https://blog.codecentric.de/en/2013/11/traverson/). +The following example shows how to use it: + +``` +Map parameters = new HashMap<>(); +parameters.put("user", 27); + +Traverson traverson = new Traverson(URI.create("http://localhost:8080/api/"), MediaTypes.HAL_JSON); +String name = traverson + .follow("movies", "movie", "actor").withTemplateParameters(parameters) + .toObject("$.name"); +``` + +You can set up a `Traverson` instance by pointing it to a REST server and configuring the media types you want to set as `Accept` headers. You can then define the relation names you want to discover and follow. Relation names can either be simple names or JSONPath expressions (starting with an `$`). + +The sample then hands a parameter map into the `Traverson` instance. The parameters are used to expand URIs (which are templated) found during the traversal. The traversal is concluded by accessing the representation of the final traversal. In the preceding example, we evaluate a JSONPath expression to access the actor’s name. + +The preceding example is the simplest version of traversal, where the `rel` values are strings and, at each hop, the same template parameters are applied. + +There are more options to customize template parameters at each level. +The following example shows these options. + +``` +ParameterizedTypeReference> resourceParameterizedTypeReference = new ParameterizedTypeReference>() {}; + +EntityModel itemResource = traverson.// + follow(rel("items").withParameter("projection", "noImages")).// + follow("$._embedded.items[0]._links.self.href").// + toObject(resourceParameterizedTypeReference); +``` + +The static `rel(…​)` function is a convenient way to define a single `Hop`. Using `.withParameter(key, value)` makes it simple to specify URI template variables. + +| |`.withParameter()` returns a new `Hop` object that is chainable. You can string together as many `.withParameter` as you like. The result is a single `Hop` definition.
    The following example shows one way to do so:| +|---|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +``` +ParameterizedTypeReference> resourceParameterizedTypeReference = new ParameterizedTypeReference>() {}; + +Map params = Collections.singletonMap("projection", "noImages"); + +EntityModel itemResource = traverson.// + follow(rel("items").withParameters(params)).// + follow("$._embedded.items[0]._links.self.href").// + toObject(resourceParameterizedTypeReference); +``` + +You can also load an entire `Map` of parameters by using `.withParameters(Map)`. + +| |`follow()` is chainable, meaning you can string together multiple hops, as shown in the preceding examples. You can either put multiple string-based `rel` values (`follow("items", "item")`) or a single hop with specific parameters.| +|---|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +#### 6.1.1. `EntityModel` vs. `CollectionModel` + +The examples shown so far demonstrate how to sidestep Java’s type erasure and convert a single JSON-formatted resource into a `EntityModel` object. However, what if you get a collection like an `\_embedded` HAL collection? +You can do so with only one slight tweak, as the following example shows: + +``` +CollectionModelType collectionModelType = + TypeReferences.CollectionModelType() {}; + +CollectionModel itemResource = traverson.// + follow(rel("items")).// + toObject(collectionModelType); +``` + +Instead of fetching a single resource, this one deserializes a collection into `CollectionModel`. + +### 6.2. Using `LinkDiscoverer` Instances + +When working with hypermedia enabled representations, a common task is to find a link with a particular relation type in it. Spring HATEOAS provides [JSONPath](https://code.google.com/p/json-path)-based implementations of the `LinkDiscoverer` interface for either the default representation rendering or HAL out of the box. When using `@EnableHypermediaSupport`, we automatically expose an instance supporting the configured hypermedia type as a Spring bean. + +Alternatively, you can set up and use an instance as follows: + +``` +String content = "{'_links' : { 'foo' : { 'href' : '/foo/bar' }}}"; +LinkDiscoverer discoverer = new HalLinkDiscoverer(); +Link link = discoverer.findLinkWithRel("foo", content); + +assertThat(link.getRel(), is("foo")); +assertThat(link.getHref(), is("/foo/bar")); +``` + +### 6.3. Configuring WebClient instances + +If you need configure a `WebClient` to speak hypermedia, it’s easy. Get a hold of the `HypermediaWebClientConfigurer` as shown below: + +Example 44. Configuring a `WebClient` yourself + +``` +@Bean +WebClient.Builder hypermediaWebClient(HypermediaWebClientConfigurer configurer) { (1) + return configurer.registerHypermediaTypes(WebClient.builder()); (2) +} +``` + +|**1**|Inside your `@Configuration` class, get a copy of the `HypermediaWebClientConfigurer` bean Spring HATEOAS registers.| +|-----|--------------------------------------------------------------------------------------------------------------------| +|**2**| After creating a `WebClient.Builder`, use the configurer to register hypermedia types. | + +| |What `HypermediaWebClientConfigurer` does it register all the right encoders and decoders with a `WebClient.Builder`. To make use of it,
    you need to inject the builder somewhere into your application, and run the `build()` method to produce a `WebClient`.| +|---|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +If you’re using Spring Boot, there is another way: the `WebClientCustomizer`. + +Example 45. Letting Spring Boot configure things + +``` +@Bean (4) +WebClientCustomizer hypermediaWebClientCustomizer(HypermediaWebClientConfigurer configurer) { (1) + return webClientBuilder -> { (2) + configurer.registerHypermediaTypes(webClientBuilder); (3) + }; +} +``` + +|**1**| When creating a Spring bean, request a copy of Spring HATEOAS’s `HypermediaWebClientConfigurer` bean. | +|-----|----------------------------------------------------------------------------------------------------------------------------------| +|**2**| Use a Java 8 lambda expression to define a `WebClientCustomizer`. | +|**3**| Inside the function call, apply the `registerHypermediaTypes` method. | +|**4**|Return the whole thing as a Spring bean so Spring Boot can pick it up and apply it to its autoconfigured `WebClient.Builder` bean.| + +At this stage, whenever you need a concrete `WebClient`, simply inject `WebClient.Builder` into your code, and use `build()`. The `WebClient` instance +will be able to interact using hypermedia. + +### 6.4. Configuring `WebTestClient` Instances + +When working with hypermedia-enabled representations, a common task is to run various tests by using `WebTestClient`. + +To configure an instance of `WebTestClient` in a test case, check out this example: + +Example 46. Configuring `WebTestClient` when using Spring HATEOAS + +``` +@Test // #1225 +void webTestClientShouldSupportHypermediaDeserialization() { + + // Configure an application context programmatically. + AnnotationConfigApplicationContext context = new AnnotationConfigApplicationContext(); + context.register(HalConfig.class); (1) + context.refresh(); + + // Create an instance of a controller for testing + WebFluxEmployeeController controller = context.getBean(WebFluxEmployeeController.class); + controller.reset(); + + // Extract the WebTestClientConfigurer from the app context. + HypermediaWebTestClientConfigurer configurer = context.getBean(HypermediaWebTestClientConfigurer.class); + + // Create a WebTestClient by binding to the controller and applying the hypermedia configurer. + WebTestClient client = WebTestClient.bindToApplicationContext(context).build().mutateWith(configurer); (2) + + // Exercise the controller. + client.get().uri("http://localhost/employees").accept(HAL_JSON) // + .exchange() // + .expectStatus().isOk() // + .expectBody(new TypeReferences.CollectionModelType>() {}) (3) + .consumeWith(result -> { + CollectionModel> model = result.getResponseBody(); (4) + + // Assert against the hypermedia model. + assertThat(model.getRequiredLink(IanaLinkRelations.SELF)).isEqualTo(Link.of("http://localhost/employees")); + assertThat(model.getContent()).hasSize(2); + }); +} +``` + +|**1**| Register your configuration class that uses `@EnableHypermediaSupport` to enable HAL support. | +|-----|----------------------------------------------------------------------------------------------------------------------------------| +|**2**| Use `HypermediaWebTestClientConfigurer` to apply hypermedia support. | +|**3**|Ask for a response of `CollectionModel>` using Spring HATEOAS’s `TypeReferences.CollectionModelType` helper.| +|**4**| After getting the "body" in Spring HATEOAS format, assert against it! | + +| |`WebTestClient` is an immutable value type, so you can’t alter it in place. `HypermediaWebClientConfigurer` returns a mutated
    variant that you must then capture to use it.| +|---|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +If you are using Spring Boot, there are additional options, like this: + +Example 47. Configuring `WebTestClient` when using Spring Boot + +``` +@SpringBootTest +@AutoConfigureWebTestClient (1) +class WebClientBasedTests { + + @Test + void exampleTest(@Autowired WebTestClient.Builder builder, @Autowired HypermediaWebTestClientConfigurer configurer) { (2) + client = builder.apply(configurer).build(); (3) + + client.get().uri("/") // + .exchange() // + .expectBody(new TypeReferences.EntityModelType() {}) (4) + .consumeWith(result -> { + // assert against this EntityModel! + }); + } +} +``` + +|**1**| This is Spring Boot’s test annotation that will configure a `WebTestClient.Builder` for this test class. | +|-----|-------------------------------------------------------------------------------------------------------------------| +|**2**|Autowire Spring Boot’s `WebTestClient.Builder` into `builder` and Spring HATEOAS’s configurer as method parameters.| +|**3**| Use `HypermediaWebTestClientConfigurer` to register support for hypermedia. | +|**4**| Signal you want an `EntityModel` returned using `TypeReferences`. | + +Again, you can use similar assertions as the earlier example. + +There are many other ways to fashion test cases. `WebTestClient` can be bound to controllers, functions, and URLs. This section isn’t meant to show all that. Instead, this gives you some examples to get started. The important thing is that by applying `HypermediaWebTestClientConfigurer`, any instance of `WebTestClient` can be altered to handle hypermedia. + +### 6.5. Configuring RestTemplate instances + +If you want to create your own copy of `RestTemplate`, configured to speak hypermedia, you can use the `HypermediaRestTemplateConfigurer`: + +Example 48. Configuring `RestTemplate` yourself + +``` +/** + * Use the {@link HypermediaRestTemplateConfigurer} to configure a {@link RestTemplate}. + */ +@Bean +RestTemplate hypermediaRestTemplate(HypermediaRestTemplateConfigurer configurer) { (1) + return configurer.registerHypermediaTypes(new RestTemplate()); (2) +} +``` + +|**1**|Inside your `@Configuration` class, get a copy of the `HypermediaRestTemplateConfigurer` bean Spring HATEOAS registers.| +|-----|-----------------------------------------------------------------------------------------------------------------------| +|**2**| After creating a `RestTemplate`, use the configurer to apply hypermedia types. | + +You are free to apply this pattern to any instance of `RestTemplate` that you need, whether is to create a registered bean, or inside a service you define. + +If you’re using Spring Boot, there is another approach. + +In general, Spring Boot has moved away from the concept of registering a `RestTemplate` bean in the application context. + +* When talking to different services, you often need different credentials. + +* When `RestTemplate` uses an underlying connection pool, you run into additional issues. + +* Users often need different instances rather than a single bean. + +To compensate for this, Spring Boot provides a `RestTemplateBuilder`. This autoconfigured bean lets you define various beans used to fashion +a `RestTemplate` instance. You ask for a `RestTemplateBuilder` bean, call its `build()` method, and then apply final settings (such as credentials and other details). + +To register hypermedia-based message converters, add the following to your code: + +Example 49. Letting Spring Boot configure things + +``` +@Bean (4) +RestTemplateCustomizer hypermediaRestTemplateCustomizer(HypermediaRestTemplateConfigurer configurer) { (1) + return restTemplate -> { (2) + configurer.registerHypermediaTypes(restTemplate); (3) + }; +} +``` + +|**1**| When creating a Spring bean, request a copy of Spring HATEOAS’s `HypermediaRestTemplateConfigurer` bean. | +|-----|-------------------------------------------------------------------------------------------------------------------------------| +|**2**| Use a Java 8 lambda expression to define a `RestTemplateCustomizer`. | +|**3**| Inside the function call, apply the `registerHypermediaTypes` method. | +|**4**|Return the whole thing as a Spring bean so Spring Boot can pick it up and apply it to its autoconfigured `RestTemplateBuilder`.| + +At this stage, whenever you need a concrete `RestTemplate`, simply inject `RestTemplateBuilder` into your code, and use `build()`. The `RestTemplate` instance +will be able to interact using hypermedia. \ No newline at end of file diff --git a/docs/en/spring-integration/README.md b/docs/en/spring-integration/README.md new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/docs/en/spring-integration/amqp.md b/docs/en/spring-integration/amqp.md new file mode 100644 index 0000000000000000000000000000000000000000..0a42b372c8d5a2ad24e3217402d1eeadae3f607a --- /dev/null +++ b/docs/en/spring-integration/amqp.md @@ -0,0 +1,1079 @@ +# AMQP Support + +## AMQP Support + +Spring Integration provides channel adapters for receiving and sending messages by using the Advanced Message Queuing Protocol (AMQP). + +You need to include this dependency into your project: + +Maven + +``` + + org.springframework.integration + spring-integration-amqp + 5.5.9 + +``` + +Gradle + +``` +compile "org.springframework.integration:spring-integration-amqp:5.5.9" +``` + +The following adapters are available: + +* [Inbound Channel Adapter](#amqp-inbound-channel-adapter) + +* [Inbound Gateway](#amqp-inbound-gateway) + +* [Outbound Channel Adapter](#amqp-outbound-channel-adapter) + +* [Outbound Gateway](#amqp-outbound-gateway) + +* [Async Outbound Gateway](#amqp-async-outbound-gateway) + +Spring Integration also provides a point-to-point message channel and a publish-subscribe message channel backed by AMQP Exchanges and Queues. + +To provide AMQP support, Spring Integration relies on ([Spring AMQP](https://projects.spring.io/spring-amqp)), which applies core Spring concepts to the development of AMQP-based messaging solutions. +Spring AMQP provides similar semantics to ([Spring JMS](https://docs.spring.io/spring/docs/current/spring-framework-reference/integration.html#jms)). + +Whereas the provided AMQP Channel Adapters are intended for unidirectional messaging (send or receive) only, Spring Integration also provides inbound and outbound AMQP gateways for request-reply operations. + +TIP: +You should familiarize yourself with the [reference documentation of the Spring AMQP project](https://docs.spring.io/spring-amqp/reference/html/). +It provides much more in-depth information about Spring’s integration with AMQP in general and RabbitMQ in particular. + +### Inbound Channel Adapter + +The following listing shows the possible configuration options for an AMQP Inbound Channel Adapter: + +Java DSL + +``` +@Bean +public IntegrationFlow amqpInbound(ConnectionFactory connectionFactory) { + return IntegrationFlows.from(Amqp.inboundAdapter(connectionFactory, "aName")) + .handle(m -> System.out.println(m.getPayload())) + .get(); +} +``` + +Java + +``` +@Bean +public MessageChannel amqpInputChannel() { + return new DirectChannel(); +} + +@Bean +public AmqpInboundChannelAdapter inbound(SimpleMessageListenerContainer listenerContainer, + @Qualifier("amqpInputChannel") MessageChannel channel) { + AmqpInboundChannelAdapter adapter = new AmqpInboundChannelAdapter(listenerContainer); + adapter.setOutputChannel(channel); + return adapter; +} + +@Bean +public SimpleMessageListenerContainer container(ConnectionFactory connectionFactory) { + SimpleMessageListenerContainer container = + new SimpleMessageListenerContainer(connectionFactory); + container.setQueueNames("aName"); + container.setConcurrentConsumers(2); + // ... + return container; +} + +@Bean +@ServiceActivator(inputChannel = "amqpInputChannel") +public MessageHandler handler() { + return new MessageHandler() { + + @Override + public void handleMessage(Message message) throws MessagingException { + System.out.println(message.getPayload()); + } + + }; +} +``` + +XML + +``` + (27) +``` + +|**1** | The unique ID for this adapter.
    Optional. | +|------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +|**2** | Message channel to which converted messages should be sent.
    Required. | +|**3** | Names of the AMQP queues (comma-separated list) from which messages should be consumed.
    Required. | +|**4** | Acknowledge mode for the `MessageListenerContainer`.
    When set to `MANUAL`, the delivery tag and channel are provided in message headers `amqp_deliveryTag` and `amqp_channel`, respectively.
    The user application is responsible for acknowledgement.`NONE` means no acknowledgements (`autoAck`).`AUTO` means the adapter’s container acknowledges when the downstream flow completes.
    Optional (defaults to AUTO).
    See [Inbound Endpoint Acknowledge Mode](#amqp-inbound-ack). | +|**5** | Extra AOP Advices to handle cross-cutting behavior associated with this inbound channel adapter.
    Optional. | +|**6** | Flag to indicate that channels created by this component are transactional.
    If true, it tells the framework to use a transactional channel and to end all operations (send or receive) with a commit or rollback, depending on the outcome, with an exception that signals a rollback.
    Optional (Defaults to false). | +|**7** | Specify the number of concurrent consumers to create.
    The default is `1`.
    We recommend raising the number of concurrent consumers to scale the consumption of messages coming in from a queue.
    However, note that any ordering guarantees are lost once multiple consumers are registered.
    In general, use one consumer for low-volume queues.
    Not allowed when 'consumers-per-queue' is set.
    Optional. | +|**8** | Bean reference to the RabbitMQ `ConnectionFactory`.
    Optional (defaults to `connectionFactory`). | +|**9** | Message channel to which error messages should be sent.
    Optional. | +|**10**| Whether the listener channel (com.rabbitmq.client.Channel) is exposed to a registered `ChannelAwareMessageListener`.
    Optional (defaults to true). | +|**11**| A reference to an `AmqpHeaderMapper` to use when receiving AMQP Messages.
    Optional.
    By default, only standard AMQP properties (such as `contentType`) are copied to Spring Integration `MessageHeaders`.
    Any user-defined headers within the AMQP `MessageProperties` are NOT copied to the message by the default `DefaultAmqpHeaderMapper`.
    Not allowed if 'request-header-names' is provided. | +|**12**| Comma-separated list of the names of AMQP Headers to be mapped from the AMQP request into the `MessageHeaders`.
    This can only be provided if the 'header-mapper' reference is not provided.
    The values in this list can also be simple patterns to be matched against the header names (such as "\*" or "thing1\*, thing2" or "\*something"). | +|**13**| Reference to the `AbstractMessageListenerContainer` to use for receiving AMQP Messages.
    If this attribute is provided, no other attribute related to the listener container configuration should be provided.
    In other words, by setting this reference, you must take full responsibility for the listener container configuration.
    The only exception is the `MessageListener` itself.
    Since that is actually the core responsibility of this channel adapter implementation, the referenced listener container must not already have its own `MessageListener`.
    Optional. | +|**14**| The `MessageConverter` to use when receiving AMQP messages.
    Optional. | +|**15**| The `MessagePropertiesConverter` to use when receiving AMQP messages.
    Optional. | +|**16**| Specifies the phase in which the underlying `AbstractMessageListenerContainer` should be started and stopped.
    The startup order proceeds from lowest to highest, and the shutdown order is the reverse of that.
    By default, this value is `Integer.MAX_VALUE`, meaning that this container starts as late as possible and stops as soon as possible.
    Optional. | +|**17**| Tells the AMQP broker how many messages to send to each consumer in a single request.
    Often, you can set this value high to improve throughput.
    It should be greater than or equal to the transaction size (see the `tx-size` attribute, later in this list).
    Optional (defaults to `1`). | +|**18**| Receive timeout in milliseconds.
    Optional (defaults to `1000`). | +|**19**| Specifies the interval between recovery attempts of the underlying `AbstractMessageListenerContainer` (in milliseconds).
    Optional (defaults to `5000`). | +|**20**| If 'true' and none of the queues are available on the broker, the container throws a fatal exception during startup and stops if the queues are deleted when the container is running (after making three attempts to passively declare the queues).
    If `false`, the container does not throw an exception and goes into recovery mode, attempting to restart according to the `recovery-interval`.
    Optional (defaults to `true`). | +|**21**| The time to wait for workers (in milliseconds) after the underlying `AbstractMessageListenerContainer` is stopped and before the AMQP connection is forced closed.
    If any workers are active when the shutdown signal comes, they are allowed to finish processing as long as they can finish within this timeout.
    Otherwise, the connection is closed and messages remain unacknowledged (if the channel is transactional).
    Optional (defaults to `5000`). | +|**22**| By default, the underlying `AbstractMessageListenerContainer` uses a `SimpleAsyncTaskExecutor` implementation, that fires up a new thread for each task, running it asynchronously.
    By default, the number of concurrent threads is unlimited.
    Note that this implementation does not reuse threads.
    Consider using a thread-pooling `TaskExecutor` implementation as an alternative.
    Optional (defaults to `SimpleAsyncTaskExecutor`). | +|**23**| By default, the underlying `AbstractMessageListenerContainer` creates a new instance of the `DefaultTransactionAttribute` (it takes the EJB approach to rolling back on runtime but not checked exceptions).
    Optional (defaults to `DefaultTransactionAttribute`). | +|**24**|Sets a bean reference to an external `PlatformTransactionManager` on the underlying `AbstractMessageListenerContainer`.
    The transaction manager works in conjunction with the `channel-transacted` attribute.
    If there is already a transaction in progress when the framework is sending or receiving a message and the `channelTransacted` flag is `true`, the commit or rollback of the messaging transaction is deferred until the end of the current transaction.
    If the `channelTransacted` flag is `false`, no transaction semantics apply to the messaging operation (it is auto-acked).
    For further information, see[Transactions with Spring AMQP](https://docs.spring.io/spring-amqp/reference/html/%255Freference.html#%5Ftransactions).
    Optional.| +|**25**| Tells the `SimpleMessageListenerContainer` how many messages to process in a single transaction (if the channel is transactional).
    For best results, it should be less than or equal to the value set in `prefetch-count`.
    Not allowed when 'consumers-per-queue' is set.
    Optional (defaults to `1`). | +|**26**| Indicates that the underlying listener container should be a `DirectMessageListenerContainer` instead of the default `SimpleMessageListenerContainer`.
    See the [Spring AMQP Reference Manual](https://docs.spring.io/spring-amqp/reference/html/) for more information. | +|**27**| When the container’s `consumerBatchEnabled` is `true`, determines how the adapter presents the batch of messages in the message payload.
    When set to `MESSAGES` (default), the payload is a `List>` where each message has headers mapped from the incoming AMQP `Message` and the payload is the converted `body`.
    When set to `EXTRACT_PAYLOADS`, the payload is a `List` where the elements are converted from the AMQP `Message` body.`EXTRACT_PAYLOADS_WITH_HEADERS` is similar to `EXTRACT_PAYLOADS` but, in addition, the headers from each message are mapped from the `MessageProperties` into a `List` at the corresponding index; the header name is `AmqpInboundChannelAdapter.CONSOLIDATED_HEADERS`. | + +| |container

    Note that when configuring an external container with XML, you cannot use the Spring AMQP namespace to define the container.
    This is because the namespace requires at least one `` element.
    In this environment, the listener is internal to the adapter.
    For this reason, you must define the container by using a normal Spring `` definition, as the following example shows:

    ```
    class="org.springframework.amqp.rabbit.listener.SimpleMessageListenerContainer">




    ```| +|---|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +| |Even though the Spring Integration JMS and AMQP support is similar, important differences exist.
    The JMS inbound channel adapter is using a `JmsDestinationPollingSource` under the covers and expects a configured poller.
    The AMQP inbound channel adapter uses an `AbstractMessageListenerContainer` and is message driven.
    In that regard, it is more similar to the JMS message-driven channel adapter.| +|---|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +Starting with version 5.5, the `AmqpInboundChannelAdapter` can be configured with an `org.springframework.amqp.rabbit.retry.MessageRecoverer` strategy which is used in the `RecoveryCallback` when the retry operation is called internally. +See `setMessageRecoverer()` JavaDocs for more information. + +#### Batched Messages + +See [the Spring AMQP Documentation](https://docs.spring.io/spring-amqp/docs/current/reference/html/#template-batching) for more information about batched messages. + +To produce batched messages with Spring Integration, simply configure the outbound endpoint with a `BatchingRabbitTemplate`. + +When receiving batched messages, by default, the listener containers extract each fragment message and the adapter will produce a `Message` for each fragment. +Starting with version 5.2, if the container’s `deBatchingEnabled` property is set to `false`, the de-batching is performed by the adapter instead, and a single `Message>` is produced with the payload being a list of the fragment payloads (after conversion if appropriate). + +The default `BatchingStrategy` is the `SimpleBatchingStrategy`, but this can be overridden on the adapter. + +| |The `org.springframework.amqp.rabbit.retry.MessageBatchRecoverer` must be used with batches when recovery is required for retry operations.| +|---|-------------------------------------------------------------------------------------------------------------------------------------------| + +### Polled Inbound Channel Adapter + +#### Overview + +Version 5.0.1 introduced a polled channel adapter, letting you fetch individual messages on demand — for example, with a `MessageSourcePollingTemplate` or a poller. +See [Deferred Acknowledgment Pollable Message Source](./polling-consumer.html#deferred-acks-message-source) for more information. + +It does not currently support XML configuration. + +The following example shows how to configure an `AmqpMessageSource`: + +Java DSL + +``` +@Bean +public IntegrationFlow flow() { + return IntegrationFlows.from(Amqp.inboundPolledAdapter(connectionFactory(), DSL_QUEUE), + e -> e.poller(Pollers.fixedDelay(1_000)).autoStartup(false)) + .handle(p -> { + ... + }) + .get(); +} +``` + +Java + +``` +@Bean +public AmqpMessageSource source(ConnectionFactory connectionFactory) { + return new AmqpMessageSource(connectionFactory, "someQueue"); +} +``` + +See the [Javadoc](https://docs.spring.io/spring-integration/api/org/springframework/integration/amqp/inbound/AmqpMessageSource.html) for configuration properties. + +XML + +``` +This adapter currently does not have XML configuration support. +``` + +#### Batched Messages + +See [Batched Messages](#amqp-debatching). + +For the polled adapter, there is no listener container, batched messages are always debatched (if the `BatchingStrategy` supports doing so). + +### Inbound Gateway + +The inbound gateway supports all the attributes on the inbound channel adapter (except that 'channel' is replaced by 'request-channel'), plus some additional attributes. +The following listing shows the available attributes: + +Java DSL + +``` +@Bean // return the upper cased payload +public IntegrationFlow amqpInboundGateway(ConnectionFactory connectionFactory) { + return IntegrationFlows.from(Amqp.inboundGateway(connectionFactory, "foo")) + .transform(String.class, String::toUpperCase) + .get(); +} +``` + +Java + +``` +@Bean +public MessageChannel amqpInputChannel() { + return new DirectChannel(); +} + +@Bean +public AmqpInboundGateway inbound(SimpleMessageListenerContainer listenerContainer, + @Qualifier("amqpInputChannel") MessageChannel channel) { + AmqpInboundGateway gateway = new AmqpInboundGateway(listenerContainer); + gateway.setRequestChannel(channel); + gateway.setDefaultReplyTo("bar"); + return gateway; +} + +@Bean +public SimpleMessageListenerContainer container(ConnectionFactory connectionFactory) { + SimpleMessageListenerContainer container = + new SimpleMessageListenerContainer(connectionFactory); + container.setQueueNames("foo"); + container.setConcurrentConsumers(2); + // ... + return container; +} + +@Bean +@ServiceActivator(inputChannel = "amqpInputChannel") +public MessageHandler handler() { + return new AbstractReplyProducingMessageHandler() { + + @Override + protected Object handleRequestMessage(Message requestMessage) { + return "reply to " + requestMessage.getPayload(); + } + + }; +} +``` + +XML + +``` + (9) +``` + +|**1**| The Unique ID for this adapter.
    Optional. | +|-----|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +|**2**| Message channel to which converted messages are sent.
    Required. | +|**3**| A reference to an `AmqpHeaderMapper` to use when receiving AMQP Messages.
    Optional.
    By default, only standard AMQP properties (such as `contentType`) are copied to and from Spring Integration `MessageHeaders`.
    Any user-defined headers within the AMQP `MessageProperties` are not copied to or from an AMQP message by the default `DefaultAmqpHeaderMapper`.
    Not allowed if 'request-header-names' or 'reply-header-names' is provided. | +|**4**| Comma-separated list of names of AMQP Headers to be mapped from the AMQP request into the `MessageHeaders`.
    This attribute can be provided only if the 'header-mapper' reference is not provided.
    The values in this list can also be simple patterns to be matched against the header names (e.g. `"*"` or `"thing1*, thing2"` or `"*thing1"`). | +|**5**| Comma-separated list of names of `MessageHeaders` to be mapped into the AMQP message properties of the AMQP reply message.
    All standard Headers (such as `contentType`) are mapped to AMQP Message Properties, while user-defined headers are mapped to the 'headers' property.
    This attribute can only be provided if the 'header-mapper' reference is not provided.
    The values in this list can also be simple patterns to be matched against the header names (for example, `"*"` or `"foo*, bar"` or `"*foo"`). | +|**6**| Message Channel where reply Messages are expected.
    Optional. | +|**7**| Sets the `receiveTimeout` on the underlying `o.s.i.core.MessagingTemplate` for receiving messages from the reply channel.
    If not specified, this property defaults to `1000` (1 second).
    Only applies if the container thread hands off to another thread before the reply is sent. | +|**8**| The customized `AmqpTemplate` bean reference (to have more control over the reply messages to send).
    You can provide an alternative implementation to the `RabbitTemplate`. | +|**9**|The `replyTo` `o.s.amqp.core.Address` to be used when the `requestMessage` does not have a `replyTo`property.
    If this option is not specified, no `amqp-template` is provided, no `replyTo` property exists in the request message, and
    an `IllegalStateException` is thrown because the reply cannot be routed.
    If this option is not specified and an external `amqp-template` is provided, no exception is thrown.
    You must either specify this option or configure a default `exchange` and `routingKey` on that template,
    if you anticipate cases when no `replyTo` property exists in the request message.| + +See the note in [Inbound Channel Adapter](#amqp-inbound-channel-adapter) about configuring the `listener-container` attribute. + +Starting with version 5.5, the `AmqpInboundChannelAdapter` can be configured with an `org.springframework.amqp.rabbit.retry.MessageRecoverer` strategy which is used in the `RecoveryCallback` when the retry operation is called internally. +See `setMessageRecoverer()` JavaDocs for more information. + +#### Batched Messages + +See [Batched Messages](#amqp-debatching). + +### Inbound Endpoint Acknowledge Mode + +By default, the inbound endpoints use the `AUTO` acknowledge mode, which means the container automatically acknowledges the message when the downstream integration flow completes (or a message is handed off to another thread by using a `QueueChannel` or `ExecutorChannel`). +Setting the mode to `NONE` configures the consumer such that acknowledgments are not used at all (the broker automatically acknowledges the message as soon as it is sent). +Setting the mode to `MANUAL` lets user code acknowledge the message at some other point during processing. +To support this, with this mode, the endpoints provide the `Channel` and `deliveryTag` in the `amqp_channel` and `amqp_deliveryTag` headers, respectively. + +You can perform any valid Rabbit command on the `Channel` but, generally, only `basicAck` and `basicNack` (or `basicReject`) are used. +In order to not interfere with the operation of the container, you should not retain a reference to the channel and use it only in the context of the current message. + +| |Since the `Channel` is a reference to a “live” object, it cannot be serialized and is lost if a message is persisted.| +|---|---------------------------------------------------------------------------------------------------------------------| + +The following example shows how you might use `MANUAL` acknowledgement: + +``` +@ServiceActivator(inputChannel = "foo", outputChannel = "bar") +public Object handle(@Payload String payload, @Header(AmqpHeaders.CHANNEL) Channel channel, + @Header(AmqpHeaders.DELIVERY_TAG) Long deliveryTag) throws Exception { + + // Do some processing + + if (allOK) { + channel.basicAck(deliveryTag, false); + + // perhaps do some more processing + + } + else { + channel.basicNack(deliveryTag, false, true); + } + return someResultForDownStreamProcessing; +} +``` + +### Outbound Endpoints + +The following outbound endpoints have many similar configuration options. +Starting with version 5.2, the `confirm-timeout` has been added. +Normally, when publisher confirms are enabled, the broker will quickly return an ack (or nack) which will be sent to the appropriate channel. +If a channel is closed before the confirm is received, the Spring AMQP framework will synthesize a nack. +"Missing" acks should never occur but, if you set this property, the endpoint will periodically check for them and synthesize a nack if the time elapses without a confirm being received. + +### Outbound Channel Adapter + +The following example shows the available properties for an AMQP outbound channel adapter: + +Java DSL + +``` +@Bean +public IntegrationFlow amqpOutbound(AmqpTemplate amqpTemplate, + MessageChannel amqpOutboundChannel) { + return IntegrationFlows.from(amqpOutboundChannel) + .handle(Amqp.outboundAdapter(amqpTemplate) + .routingKey("queue1")) // default exchange - route to queue 'queue1' + .get(); +} +``` + +Java + +``` +@Bean +@ServiceActivator(inputChannel = "amqpOutboundChannel") +public AmqpOutboundEndpoint amqpOutbound(AmqpTemplate amqpTemplate) { + AmqpOutboundEndpoint outbound = new AmqpOutboundEndpoint(amqpTemplate); + outbound.setRoutingKey("queue1"); // default exchange - route to queue 'queue1' + return outbound; +} + +@Bean +public MessageChannel amqpOutboundChannel() { + return new DirectChannel(); +} +``` + +XML + +``` + (20) +``` + +|**1** | The unique ID for this adapter.
    Optional. | +|------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +|**2** | Message channel to which messages should be sent to have them converted and published to an AMQP exchange.
    Required. | +|**3** | Bean reference to the configured AMQP template.
    Optional (defaults to `amqpTemplate`). | +|**4** | The name of the AMQP exchange to which messages are sent.
    If not provided, messages are sent to the default, no-name exchange.
    Mutually exclusive with 'exchange-name-expression'.
    Optional. | +|**5** | A SpEL expression that is evaluated to determine the name of the AMQP exchange to which messages are sent, with the message as the root object.
    If not provided, messages are sent to the default, no-name exchange.
    Mutually exclusive with 'exchange-name'.
    Optional. | +|**6** | The order for this consumer when multiple consumers are registered, thereby enabling load-balancing and failover.
    Optional (defaults to `Ordered.LOWEST_PRECEDENCE [=Integer.MAX_VALUE]`). | +|**7** | The fixed routing-key to use when sending messages.
    By default, this is an empty `String`.
    Mutually exclusive with 'routing-key-expression'.
    Optional. | +|**8** | A SpEL expression that is evaluated to determine the routing key to use when sending messages, with the message as the root object (for example, 'payload.key').
    By default, this is an empty `String`.
    Mutually exclusive with 'routing-key'.
    Optional. | +|**9** | The default delivery mode for messages: `PERSISTENT` or `NON_PERSISTENT`.
    Overridden if the `header-mapper` sets the delivery mode.
    If the Spring Integration message header `amqp_deliveryMode` is present, the `DefaultHeaderMapper` sets the value.
    If this attribute is not supplied and the header mapper does not set it, the default depends on the underlying Spring AMQP `MessagePropertiesConverter` used by the `RabbitTemplate`.
    If that is not customized at all, the default is `PERSISTENT`.
    Optional. | +|**10**|An expression that defines correlation data.
    When provided, this configures the underlying AMQP template to receive publisher confirmations.
    Requires a dedicated `RabbitTemplate` and a `CachingConnectionFactory` with the `publisherConfirms` property set to `true`.
    When a publisher confirmation is received and correlation data is supplied, it is written to either the `confirm-ack-channel` or the `confirm-nack-channel`, depending on the confirmation type.
    The payload of the confirmation is the correlation data, as defined by this expression.
    The message has an 'amqp\_publishConfirm' header set to `true` (`ack`) or `false` (`nack`).
    Examples: `headers['myCorrelationData']` and `payload`.
    Version 4.1 introduced the `amqp_publishConfirmNackCause` message header.
    It contains the `cause` of a 'nack' for a publisher confirmation.
    Starting with version 4.2, if the expression resolves to a `Message` instance (such as `#this`), the message emitted on the `ack`/`nack` channel is based on that message, with the additional header(s) added.
    Previously, a new message was created with the correlation data as its payload, regardless of type.
    Also see [Alternative Mechanism for Publisher Confirms and Returns](#alternative-confirms-returns).
    Optional.| +|**11**| The channel to which positive (`ack`) publisher confirms are sent.
    The payload is the correlation data defined by the `confirm-correlation-expression`.
    If the expression is `#root` or `#this`, the message is built from the original message, with the `amqp_publishConfirm` header set to `true`.
    Also see [Alternative Mechanism for Publisher Confirms and Returns](#alternative-confirms-returns).
    Optional (the default is `nullChannel`). | +|**12**| The channel to which negative (`nack`) publisher confirmations are sent.
    The payload is the correlation data defined by the `confirm-correlation-expression` (if there is no `ErrorMessageStrategy` configured).
    If the expression is `#root` or `#this`, the message is built from the original message, with the `amqp_publishConfirm` header set to `false`.
    When there is an `ErrorMessageStrategy`, the message is an `ErrorMessage` with a `NackedAmqpMessageException` payload.
    Also see [Alternative Mechanism for Publisher Confirms and Returns](#alternative-confirms-returns).
    Optional (the default is `nullChannel`). | +|**13**| When set, the adapter will synthesize a negative acknowledgment (nack) if a publisher confirm is not received within this time in milliseconds.
    Pending confirms are checked every 50% of this value, so the actual time a nack is sent will be between 1x and 1.5x this value.
    Also see [Alternative Mechanism for Publisher Confirms and Returns](#alternative-confirms-returns).
    Default none (nacks will not be generated). | +|**14**| When set to true, the calling thread will block, waiting for a publisher confirmation.
    This requires a `RabbitTemplate` configured for confirms as well as a `confirm-correlation-expression`.
    The thread will block for up to `confirm-timeout` (or 5 seconds by default).
    If a timeout occurs, a `MessageTimeoutException` will be thrown.
    If returns are enabled and a message is returned, or any other exception occurs while awaiting the confirm, a `MessageHandlingException` will be thrown, with an appropriate message. | +|**15**| The channel to which returned messages are sent.
    When provided, the underlying AMQP template is configured to return undeliverable messages to the adapter.
    When there is no `ErrorMessageStrategy` configured, the message is constructed from the data received from AMQP, with the following additional headers: `amqp_returnReplyCode`, `amqp_returnReplyText`, `amqp_returnExchange`, `amqp_returnRoutingKey`.
    When there is an `ErrorMessageStrategy`, the message is an `ErrorMessage` with a `ReturnedAmqpMessageException` payload.
    Also see [Alternative Mechanism for Publisher Confirms and Returns](#alternative-confirms-returns).
    Optional. | +|**16**| A reference to an `ErrorMessageStrategy` implementation used to build `ErrorMessage` instances when sending returned or negatively acknowledged messages. | +|**17**| A reference to an `AmqpHeaderMapper` to use when sending AMQP Messages.
    By default, only standard AMQP properties (such as `contentType`) are copied to the Spring Integration `MessageHeaders`.
    Any user-defined headers is not copied to the message by the default`DefaultAmqpHeaderMapper`.
    Not allowed if 'request-header-names' is provided.
    Optional. | +|**18**| Comma-separated list of names of AMQP Headers to be mapped from the `MessageHeaders` to the AMQP Message.
    Not allowed if the 'header-mapper' reference is provided.
    The values in this list can also be simple patterns to be matched against the header names (e.g. `"*"` or `"thing1*, thing2"` or `"*thing1"`). | +|**19**| When set to `false`, the endpoint attempts to connect to the broker during application context initialization.
    This allows “fail fast” detection of bad configuration but also causes initialization to fail if the broker is down.
    When `true` (the default), the connection is established (if it does not already exist because some other component established it) when the first message is sent. | +|**20**| When set to `true`, payloads of type `Iterable>` will be sent as discrete messages on the same channel within the scope of a single `RabbitTemplate` invocation.
    Requires a `RabbitTemplate`.
    When `wait-for-confirms` is true, `RabbitTemplate.waitForConfirmsOrDie()` is invoked after the messages have been sent.
    With a transactional template, the sends will be performed in either a new transaction or one that has already been started (if present). | + +| |return-channel

    Using a `return-channel` requires a `RabbitTemplate` with the `mandatory` property set to `true` and a `CachingConnectionFactory` with the `publisherReturns` property set to `true`.
    When using multiple outbound endpoints with returns, a separate `RabbitTemplate` is needed for each endpoint.| +|---|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +### Outbound Gateway + +The following listing shows the possible properties for an AMQP Outbound Gateway: + +Java DSL + +``` +@Bean +public IntegrationFlow amqpOutbound(AmqpTemplate amqpTemplate) { + return f -> f.handle(Amqp.outboundGateway(amqpTemplate) + .routingKey("foo")) // default exchange - route to queue 'foo' + .get(); +} + +@MessagingGateway(defaultRequestChannel = "amqpOutbound.input") +public interface MyGateway { + + String sendToRabbit(String data); + +} +``` + +Java + +``` +@Bean +@ServiceActivator(inputChannel = "amqpOutboundChannel") +public AmqpOutboundEndpoint amqpOutbound(AmqpTemplate amqpTemplate) { + AmqpOutboundEndpoint outbound = new AmqpOutboundEndpoint(amqpTemplate); + outbound.setExpectReply(true); + outbound.setRoutingKey("foo"); // default exchange - route to queue 'foo' + return outbound; +} + +@Bean +public MessageChannel amqpOutboundChannel() { + return new DirectChannel(); +} + +@MessagingGateway(defaultRequestChannel = "amqpOutboundChannel") +public interface MyGateway { + + String sendToRabbit(String data); + +} +``` + +XML + +``` + (19) +``` + +|**1** | The unique ID for this adapter.
    Optional. | +|------|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +|**2** | Message channel to which messages are sent to have them converted and published to an AMQP exchange.
    Required. | +|**3** | Bean reference to the configured AMQP template.
    Optional (defaults to `amqpTemplate`). | +|**4** | The name of the AMQP exchange to which messages should be sent.
    If not provided, messages are sent to the default, no-name cxchange.
    Mutually exclusive with 'exchange-name-expression'.
    Optional. | +|**5** | A SpEL expression that is evaluated to determine the name of the AMQP exchange to which messages should be sent, with the message as the root object.
    If not provided, messages are sent to the default, no-name exchange.
    Mutually exclusive with 'exchange-name'.
    Optional. | +|**6** | The order for this consumer when multiple consumers are registered, thereby enabling load-balancing and failover.
    Optional (defaults to `Ordered.LOWEST_PRECEDENCE [=Integer.MAX_VALUE]`). | +|**7** | Message channel to which replies should be sent after being received from an AMQP queue and converted.
    Optional. | +|**8** | The time the gateway waits when sending the reply message to the `reply-channel`.
    This only applies if the `reply-channel` can block — such as a `QueueChannel` with a capacity limit that is currently full.
    Defaults to infinity. | +|**9** | When `true`, the gateway throws an exception if no reply message is received within the `AmqpTemplate’s `replyTimeout` property.
    Defaults to `true`. | +|**10**| The `routing-key` to use when sending messages.
    By default, this is an empty `String`.
    Mutually exclusive with 'routing-key-expression'.
    Optional. | +|**11**| A SpEL expression that is evaluated to determine the `routing-key` to use when sending messages, with the message as the root object (for example, 'payload.key').
    By default, this is an empty `String`.
    Mutually exclusive with 'routing-key'.
    Optional. | +|**12**| The default delivery mode for messages: `PERSISTENT` or `NON_PERSISTENT`.
    Overridden if the `header-mapper` sets the delivery mode.
    If the Spring Integration message header `amqp_deliveryMode` is present, the `DefaultHeaderMapper` sets the value.
    If this attribute is not supplied and the header mapper does not set it, the default depends on the underlying Spring AMQP `MessagePropertiesConverter` used by the `RabbitTemplate`.
    If that is not customized at all, the default is `PERSISTENT`.
    Optional. | +|**13**|Since version 4.2.
    An expression defining correlation data.
    When provided, this configures the underlying AMQP template to receive publisher confirms.
    Requires a dedicated `RabbitTemplate` and a `CachingConnectionFactory` with the `publisherConfirms` property set to `true`.
    When a publisher confirm is received and correlation data is supplied, it is written to either the `confirm-ack-channel` or the `confirm-nack-channel`, depending on the confirmation type.
    The payload of the confirm is the correlation data, as defined by this expression.
    The message has a header 'amqp\_publishConfirm' set to `true` (`ack`) or `false` (`nack`).
    For `nack` confirmations, Spring Integration provides an additional header `amqp_publishConfirmNackCause`.
    Examples: `headers['myCorrelationData']` and `payload`.
    If the expression resolves to a `Message` instance (such as `#this`), the message
    emitted on the `ack`/`nack` channel is based on that message, with the additional headers added.
    Previously, a new message was created with the correlation data as its payload, regardless of type.
    Also see [Alternative Mechanism for Publisher Confirms and Returns](#alternative-confirms-returns).
    Optional.| +|**14**| The channel to which positive (`ack`) publisher confirmations are sent.
    The payload is the correlation data defined by `confirm-correlation-expression`.
    If the expression is `#root` or `#this`, the message is built from the original message, with the `amqp_publishConfirm` header set to `true`.
    Also see [Alternative Mechanism for Publisher Confirms and Returns](#alternative-confirms-returns).
    Optional (the default is `nullChannel`). | +|**15**| The channel to which negative (`nack`) publisher confirmations are sent.
    The payload is the correlation data defined by `confirm-correlation-expression` (if there is no `ErrorMessageStrategy` configured).
    If the expression is `#root` or `#this`, the message is built from the original message, with the `amqp_publishConfirm` header set to `false`.
    When there is an `ErrorMessageStrategy`, the message is an `ErrorMessage` with a `NackedAmqpMessageException` payload.
    Also see [Alternative Mechanism for Publisher Confirms and Returns](#alternative-confirms-returns).
    Optional (the default is `nullChannel`). | +|**16**| When set, the gateway will synthesize a negative acknowledgment (nack) if a publisher confirm is not received within this time in milliseconds.
    Pending confirms are checked every 50% of this value, so the actual time a nack is sent will be between 1x and 1.5x this value.
    Default none (nacks will not be generated). | +|**17**| The channel to which returned messages are sent.
    When provided, the underlying AMQP template is configured to return undeliverable messages to the adapter.
    When there is no `ErrorMessageStrategy` configured, the message is constructed from the data received from AMQP, with the following additional headers: `amqp_returnReplyCode`, `amqp_returnReplyText`, `amqp_returnExchange`, and `amqp_returnRoutingKey`.
    When there is an `ErrorMessageStrategy`, the message is an `ErrorMessage` with a `ReturnedAmqpMessageException` payload.
    Also see [Alternative Mechanism for Publisher Confirms and Returns](#alternative-confirms-returns).
    Optional. | +|**18**| A reference to an `ErrorMessageStrategy` implementation used to build `ErrorMessage` instances when sending returned or negatively acknowledged messages. | +|**19**| When set to `false`, the endpoint attempts to connect to the broker during application context initialization.
    This allows “fail fast” detection of bad configuration by logging an error message if the broker is down.
    When `true` (the default), the connection is established (if it does not already exist because some other component established it) when the first message is sent. | + +| |return-channel

    Using a `return-channel` requires a `RabbitTemplate` with the `mandatory` property set to `true` and a `CachingConnectionFactory` with the `publisherReturns` property set to `true`.
    When using multiple outbound endpoints with returns, a separate `RabbitTemplate` is needed for each endpoint.| +|---|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +| |The underlying `AmqpTemplate` has a default `replyTimeout` of five seconds.
    If you require a longer timeout, you must configure it on the `template`.| +|---|---------------------------------------------------------------------------------------------------------------------------------------------------------| + +Note that the only difference between the outbound adapter and outbound gateway configuration is the setting of the`expectReply` property. + +### Asynchronous Outbound Gateway + +The gateway discussed in the previous section is synchronous, in that the sending thread is suspended until a +reply is received (or a timeout occurs). +Spring Integration version 4.3 added an asynchronous gateway, which uses the `AsyncRabbitTemplate` from Spring AMQP. +When a message is sent, the thread returns immediately after the send operation completes, and, when the message is received, the reply is sent on the template’s listener container thread. +This can be useful when the gateway is invoked on a poller thread. +The thread is released and is available for other tasks in the framework. + +The following listing shows the possible configuration options for an AMQP asynchronous outbound gateway: + +Java DSL + +``` +@Configuration +public class AmqpAsyncApplication { + + @Bean + public IntegrationFlow asyncAmqpOutbound(AsyncRabbitTemplate asyncRabbitTemplate) { + return f -> f + .handle(Amqp.asyncOutboundGateway(asyncRabbitTemplate) + .routingKey("queue1")); // default exchange - route to queue 'queue1' + } + + @MessagingGateway(defaultRequestChannel = "asyncAmqpOutbound.input") + public interface MyGateway { + + String sendToRabbit(String data); + + } + +} +``` + +Java + +``` +@Configuration +public class AmqpAsyncConfig { + + @Bean + @ServiceActivator(inputChannel = "amqpOutboundChannel") + public AsyncAmqpOutboundGateway amqpOutbound(AsyncRabbitTemplate asyncTemplate) { + AsyncAmqpOutboundGateway outbound = new AsyncAmqpOutboundGateway(asyncTemplate); + outbound.setRoutingKey("foo"); // default exchange - route to queue 'foo' + return outbound; + } + + @Bean + public AsyncRabbitTemplate asyncTemplate(RabbitTemplate rabbitTemplate, + SimpleMessageListenerContainer replyContainer) { + + return new AsyncRabbitTemplate(rabbitTemplate, replyContainer); + } + + @Bean + public SimpleMessageListenerContainer replyContainer() { + SimpleMessageListenerContainer container = new SimpleMessageListenerContainer(ccf); + container.setQueueNames("asyncRQ1"); + return container; + } + + @Bean + public MessageChannel amqpOutboundChannel() { + return new DirectChannel(); + } + +} +``` + +XML + +``` + (18) +``` + +|**1** | The unique ID for this adapter.
    Optional. | +|------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +|**2** | Message channel to which messages should be sent in order to have them converted and published to an AMQP exchange.
    Required. | +|**3** | Bean reference to the configured `AsyncRabbitTemplate`.
    Optional (it defaults to `asyncRabbitTemplate`). | +|**4** | The name of the AMQP exchange to which messages should be sent.
    If not provided, messages are sent to the default, no-name exchange.
    Mutually exclusive with 'exchange-name-expression'.
    Optional. | +|**5** | A SpEL expression that is evaluated to determine the name of the AMQP exchange to which messages are sent, with the message as the root object.
    If not provided, messages are sent to the default, no-name exchange.
    Mutually exclusive with 'exchange-name'.
    Optional. | +|**6** | The order for this consumer when multiple consumers are registered, thereby enabling load-balancing and failover.
    Optional (it defaults to `Ordered.LOWEST_PRECEDENCE [=Integer.MAX_VALUE]`). | +|**7** | Message channel to which replies should be sent after being received from an AMQP queue and converted.
    Optional. | +|**8** | The time the gateway waits when sending the reply message to the `reply-channel`.
    This only applies if the `reply-channel` can block — such as a `QueueChannel` with a capacity limit that is currently full.
    The default is infinity. | +|**9** | When no reply message is received within the `AsyncRabbitTemplate’s `receiveTimeout` property and this setting is `true`, the gateway sends an error message to the inbound message’s `errorChannel` header.
    When no reply message is received within the `AsyncRabbitTemplate’s `receiveTimeout` property and this setting is `false`, the gateway sends an error message to the default `errorChannel` (if available).
    It defaults to `true`. | +|**10**| The routing-key to use when sending Messages.
    By default, this is an empty `String`.
    Mutually exclusive with 'routing-key-expression'.
    Optional. | +|**11**| A SpEL expression that is evaluated to determine the routing-key to use when sending messages,
    with the message as the root object (for example, 'payload.key').
    By default, this is an empty `String`.
    Mutually exclusive with 'routing-key'.
    Optional. | +|**12**| The default delivery mode for messages: `PERSISTENT` or `NON_PERSISTENT`.
    Overridden if the `header-mapper` sets the delivery mode.
    If the Spring Integration message header (`amqp_deliveryMode`) is present, the `DefaultHeaderMapper` sets the value.
    If this attribute is not supplied and the header mapper does not set it, the default depends on the underlying Spring AMQP `MessagePropertiesConverter` used by the `RabbitTemplate`.
    If that is not customized, the default is `PERSISTENT`.
    Optional. | +|**13**|An expression that defines correlation data.
    When provided, this configures the underlying AMQP template to receive publisher confirmations.
    Requires a dedicated `RabbitTemplate` and a `CachingConnectionFactory` with its `publisherConfirms` property set to `true`.
    When a publisher confirmation is received and correlation data is supplied, the confirmation is written to either the `confirm-ack-channel` or the `confirm-nack-channel`, depending on the confirmation type.
    The payload of the confirmation is the correlation data as defined by this expression, and the message has its 'amqp\_publishConfirm' header set to `true` (`ack`) or `false` (`nack`).
    For `nack` instances, an additional header (`amqp_publishConfirmNackCause`) is provided.
    Examples: `headers['myCorrelationData']`, `payload`.
    If the expression resolves to a `Message` instance (such as “#this”), the message emitted on the `ack`/`nack` channel is based on that message, with the additional headers added.
    Also see [Alternative Mechanism for Publisher Confirms and Returns](#alternative-confirms-returns).
    Optional.| +|**14**| The channel to which positive (`ack`) publisher confirmations are sent.
    The payload is the correlation data defined by the `confirm-correlation-expression`.
    Requires the underlying `AsyncRabbitTemplate` to have its `enableConfirms` property set to `true`.
    Also see [Alternative Mechanism for Publisher Confirms and Returns](#alternative-confirms-returns).
    Optional (the default is `nullChannel`). | +|**15**| Since version 4.2.
    The channel to which negative (`nack`) publisher confirmations are sent.
    The payload is the correlation data defined by the `confirm-correlation-expression`.
    Requires the underlying `AsyncRabbitTemplate` to have its `enableConfirms` property set to `true`.
    Also see [Alternative Mechanism for Publisher Confirms and Returns](#alternative-confirms-returns).
    Optional (the default is `nullChannel`). | +|**16**| When set, the gateway will synthesize a negative acknowledgment (nack) if a publisher confirm is not received within this time in milliseconds.
    Pending confirms are checked every 50% of this value, so the actual time a nack is sent will be between 1x and 1.5x this value.
    Also see [Alternative Mechanism for Publisher Confirms and Returns](#alternative-confirms-returns).
    Default none (nacks will not be generated). | +|**17**| The channel to which returned messages are sent.
    When provided, the underlying AMQP template is configured to return undeliverable messages to the gateway.
    The message is constructed from the data received from AMQP, with the following additional headers: `amqp_returnReplyCode`, `amqp_returnReplyText`, `amqp_returnExchange`, and `amqp_returnRoutingKey`.
    Requires the underlying `AsyncRabbitTemplate` to have its `mandatory` property set to `true`.
    Also see [Alternative Mechanism for Publisher Confirms and Returns](#alternative-confirms-returns).
    Optional. | +|**18**| When set to `false`, the endpoint tries to connect to the broker during application context initialization.
    Doing so allows “fail fast” detection of bad configuration, by logging an error message if the broker is down.
    When `true` (the default), the connection is established (if it does not already exist because some other component established
    it) when the first message is sent. | + +See also [Asynchronous Service Activator](./service-activator.html#async-service-activator) for more information. + +| |RabbitTemplate

    When you use confirmations and returns, we recommend that the `RabbitTemplate` wired into the `AsyncRabbitTemplate` be dedicated.
    Otherwise, unexpected side-effects may be encountered.| +|---|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +### Alternative Mechanism for Publisher Confirms and Returns + +When the connection factory is configured for publisher confirms and returns, the sections above discuss the configuration of message channels to receive the confirms and returns asynchronously. +Starting with version 5.4, there is an additional mechanism which is generally easier to use. + +In this case, do not configure a `confirm-correlation-expression` or the confirm and return channels. +Instead, add a `CorrelationData` instance in the `AmqpHeaders.PUBLISH_CONFIRM_CORRELATION` header; you can then wait for the result(s) later, by checking the state of the future in the `CorrelationData` instances for which you have sent messages. +The `returnedMessage` field will always be populated (if a message is returned) before the future is completed. + +``` +CorrelationData corr = new CorrelationData("someId"); // <--- Unique "id" is required for returns +someFlow.getInputChannel().send(MessageBuilder.withPayload("test") + .setHeader("rk", "someKeyThatWontRoute") + .setHeader(AmqpHeaders.PUBLISH_CONFIRM_CORRELATION, corr) + .build()); +... +try { + Confirm Confirm = corr.getFuture().get(10, TimeUnit.SECONDS); + Message returned = corr.getReturnedMessage(); + if (returned !- null) { + // message could not be routed + } +} +catch { ... } +``` + +To improve performance, you may wish to send multiple messages and wait for the confirmations later, rather than one-at-a-time. +The returned message is the raw message after conversion; you can subclass `CorrelationData` with whatever additional data you need. + +### Inbound Message Conversion + +Inbound messages, arriving at the channel adapter or gateway, are converted to the `spring-messaging` `Message` payload using a message converter. +By default, a `SimpleMessageConverter` is used, which handles java serialization and text. +Headers are mapped using the `DefaultHeaderMapper.inboundMapper()` by default. +If a conversion error occurs, and there is no error channel defined, the exception is thrown to the container and handled by the listener container’s error handler. +The default error handler treats conversion errors as fatal and the message will be rejected (and routed to a dead-letter exchange, if the queue is so configured). +If an error channel is defined, the `ErrorMessage` payload is a `ListenerExecutionFailedException` with properties `failedMessage` (the Spring AMQP message that could not be converted) and the `cause`. +If the container `AcknowledgeMode` is `AUTO` (the default) and the error flow consumes the error without throwing an exception, the original message will be acknowledged. +If the error flow throws an exception, the exception type, in conjunction with the container’s error handler, will determine whether or not the message is requeued. +If the container is configured with `AcknowledgeMode.MANUAL`, the payload is a `ManualAckListenerExecutionFailedException` with additional properties `channel` and `deliveryTag`. +This enables the error flow to call `basicAck` or `basicNack` (or `basicReject`) for the message, to control its disposition. + +### Outbound Message Conversion + +Spring AMQP 1.4 introduced the `ContentTypeDelegatingMessageConverter`, where the actual converter is selected based +on the incoming content type message property. +This can be used by inbound endpoints. + +As of Spring Integration version 4.3, you can use the `ContentTypeDelegatingMessageConverter` on outbound endpoints as well, with the `contentType` header specifying which converter is used. + +The following example configures a `ContentTypeDelegatingMessageConverter`, with the default converter being the `SimpleMessageConverter` (which handles Java serialization and plain text), together with a JSON converter: + +``` + + + + + + + + + + + + + + + +``` + +Sending a message to `ctRequestChannel` with the `contentType` header set to `application/json` causes the JSON converter to be selected. + +This applies to both the outbound channel adapter and gateway. + +| |Starting with version 5.0, headers that are added to the `MessageProperties` of the outbound message are never overwritten by mapped headers (by default).
    Previously, this was only the case if the message converter was a `ContentTypeDelegatingMessageConverter` (in that case, the header was mapped first so that the proper converter could be selected).
    For other converters, such as the `SimpleMessageConverter`, mapped headers overwrote any headers added by the converter.
    This caused problems when an outbound message had some leftover `contentType` headers (perhaps from an inbound channel adapter) and the correct outbound `contentType` was incorrectly overwritten.
    The work-around was to use a header filter to remove the header before sending the message to the outbound endpoint.

    There are, however, cases where the previous behavior is desired — for example, when a `String` payload that contains JSON, the `SimpleMessageConverter` is not aware of the content and sets the `contentType` message property to `text/plain` but your application would like to override that to `application/json` by setting the `contentType` header of the message sent to the outbound endpoint.
    The `ObjectToJsonTransformer` does exactly that (by default).

    There is now a property called `headersMappedLast` on the outbound channel adapter and gateway (as well as on AMQP-backed channels).
    Setting this to `true` restores the behavior of overwriting the property added by the converter.

    Starting with version 5.1.9, a similar `replyHeadersMappedLast` is provided for the `AmqpInboundGateway` when we produce a reply and would like to override headers populated by the converter.
    See its JavaDocs for more information.| +|---|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +### Outbound User ID + +Spring AMQP version 1.6 introduced a mechanism to allow the specification of a default user ID for outbound messages. +It has always been possible to set the `AmqpHeaders.USER_ID` header, which now takes precedence over the default. +This might be useful to message recipients. +For inbound messages, if the message publisher sets the property, it is made available in the `AmqpHeaders.RECEIVED_USER_ID` header. +Note that RabbitMQ [validates that the user ID is the actual user ID for the connection or that the connection allows impersonation](https://www.rabbitmq.com/validated-user-id.html). + +To configure a default user ID for outbound messages, configure it on a `RabbitTemplate` and configure the outbound adapter or gateway to use that template. +Similarly, to set the user ID property on replies, inject an appropriately configured template into the inbound gateway. +See the [Spring AMQP documentation](https://docs.spring.io/spring-amqp/reference/html/_reference.html#template-user-id) for more information. + +### Delayed Message Exchange + +Spring AMQP supports the [RabbitMQ Delayed Message Exchange Plugin](https://docs.spring.io/spring-amqp/reference/html/#delayed-message-exchange). +For inbound messages, the `x-delay` header is mapped to the `AmqpHeaders.RECEIVED_DELAY` header. +Setting the `AMQPHeaders.DELAY` header causes the corresponding `x-delay` header to be set in outbound messages. +You can also specify the `delay` and `delayExpression` properties on outbound endpoints (`delay-expression` when using XML configuration). +These properties take precedence over the `AmqpHeaders.DELAY` header. + +### AMQP-backed Message Channels + +There are two message channel implementations available. +One is point-to-point, and the other is publish-subscribe. +Both of these channels provide a wide range of configuration attributes for the underlying `AmqpTemplate` and`SimpleMessageListenerContainer` (as shown earlier in this chapter for the channel adapters and gateways). +However, the examples we show here have minimal configuration. +Explore the XML schema to view the available attributes. + +A point-to-point channel might look like the following example: + +``` + +``` + +Under the covers, the preceding example causes a `Queue` named `si.p2pChannel` to be declared, and this channel sends to that `Queue` (technically, by sending to the no-name direct exchange with a routing key that matches the name of this `Queue`). +This channel also registers a consumer on that `Queue`. +If you want the channel to be “pollable” instead of message-driven, provide the `message-driven` flag with a value of `false`, as the following example shows: + +``` + +``` + +A publish-subscribe channel might look like the following: + +``` + +``` + +Under the covers, the preceding example causes a fanout exchange named `si.fanout.pubSubChannel` to be declared, and this channel sends to that fanout exchange. +This channel also declares a server-named exclusive, auto-delete, non-durable `Queue` and binds that to the fanout exchange while registering a consumer on that `Queue` to receive messages. +There is no “pollable” option for a publish-subscribe-channel. +It must be message-driven. + +Starting with version 4.1, AMQP-backed message channels (in conjunction with `channel-transacted`) support`template-channel-transacted` to separate `transactional` configuration for the `AbstractMessageListenerContainer` and +for the `RabbitTemplate`. +Note that, previously, `channel-transacted` was `true` by default. +Now, by default, it is `false` for the `AbstractMessageListenerContainer`. + +Prior to version 4.3, AMQP-backed channels only supported messages with `Serializable` payloads and headers. +The entire message was converted (serialized) and sent to RabbitMQ. +Now, you can set the `extract-payload` attribute (or `setExtractPayload()` when using Java configuration) to `true`. +When this flag is `true`, the message payload is converted and the headers are mapped, in a manner similar to when you use channel adapters. +This arrangement lets AMQP-backed channels be used with non-serializable payloads (perhaps with another message converter, such as the `Jackson2JsonMessageConverter`). +See [AMQP Message Headers](#amqp-message-headers) for more about the default mapped headers. +You can modify the mapping by providing custom mappers that use the `outbound-header-mapper` and `inbound-header-mapper` attributes. +You can now also specify a `default-delivery-mode`, which is used to set the delivery mode when there is no `amqp_deliveryMode` header. +By default, Spring AMQP `MessageProperties` uses `PERSISTENT` delivery mode. + +| |As with other persistence-backed channels, AMQP-backed channels are intended to provide message persistence to avoid message loss.
    They are not intended to distribute work to other peer applications.
    For that purpose, use channel adapters instead.| +|---|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +| |Starting with version 5.0, the pollable channel now blocks the poller thread for the specified `receiveTimeout` (the default is 1 second).
    Previously, unlike other `PollableChannel` implementations, the thread returned immediately to the scheduler if no message was available, regardless of the receive timeout.
    Blocking is a little more expensive than using a `basicGet()` to retrieve a message (with no timeout), because a consumer has to be created to receive each message.
    To restore the previous behavior, set the poller’s `receiveTimeout` to 0.| +|---|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +#### Configuring with Java Configuration + +The following example shows how to configure the channels with Java configuration: + +``` +@Bean +public AmqpChannelFactoryBean pollable(ConnectionFactory connectionFactory) { + AmqpChannelFactoryBean factoryBean = new AmqpChannelFactoryBean(); + factoryBean.setConnectionFactory(connectionFactory); + factoryBean.setQueueName("foo"); + factoryBean.setPubSub(false); + return factoryBean; +} + +@Bean +public AmqpChannelFactoryBean messageDriven(ConnectionFactory connectionFactory) { + AmqpChannelFactoryBean factoryBean = new AmqpChannelFactoryBean(true); + factoryBean.setConnectionFactory(connectionFactory); + factoryBean.setQueueName("bar"); + factoryBean.setPubSub(false); + return factoryBean; +} + +@Bean +public AmqpChannelFactoryBean pubSub(ConnectionFactory connectionFactory) { + AmqpChannelFactoryBean factoryBean = new AmqpChannelFactoryBean(true); + factoryBean.setConnectionFactory(connectionFactory); + factoryBean.setQueueName("baz"); + factoryBean.setPubSub(false); + return factoryBean; +} +``` + +#### Configuring with the Java DSL + +The following example shows how to configure the channels with the Java DSL: + +``` +@Bean +public IntegrationFlow pollableInFlow(ConnectionFactory connectionFactory) { + return IntegrationFlows.from(...) + ... + .channel(Amqp.pollableChannel(connectionFactory) + .queueName("foo")) + ... + .get(); +} + +@Bean +public IntegrationFlow messageDrivenInFow(ConnectionFactory connectionFactory) { + return IntegrationFlows.from(...) + ... + .channel(Amqp.channel(connectionFactory) + .queueName("bar")) + ... + .get(); +} + +@Bean +public IntegrationFlow pubSubInFlow(ConnectionFactory connectionFactory) { + return IntegrationFlows.from(...) + ... + .channel(Amqp.publishSubscribeChannel(connectionFactory) + .queueName("baz")) + ... + .get(); +} +``` + +### AMQP Message Headers + +#### Overview + +The Spring Integration AMQP Adapters automatically map all AMQP properties and headers. +(This is a change from 4.3 - previously, only standard headers were mapped). +By default, these properties are copied to and from Spring Integration `MessageHeaders` by using the[`DefaultAmqpHeaderMapper`](https://docs.spring.io/spring-integration/api/org/springframework/integration/amqp/support/DefaultAmqpHeaderMapper.html). + +You can pass in your own implementation of AMQP-specific header mappers, as the adapters have properties to support doing so. + +Any user-defined headers within the AMQP [`MessageProperties`](https://docs.spring.io/spring-amqp/api/org/springframework/amqp/core/MessageProperties.html) are copied to or from an AMQP message, unless explicitly negated by the `requestHeaderNames` or `replyHeaderNames` properties of the `DefaultAmqpHeaderMapper`. +By default, for an outbound mapper, no `x-*` headers are mapped. +See the [caution](#header-copy-caution) that appears later in this section for why. + +To override the default and revert to the pre-4.3 behavior, use `STANDARD_REQUEST_HEADERS` and`STANDARD_REPLY_HEADERS` in the properties. + +| |When mapping user-defined headers, the values can also contain simple wildcard patterns (such as `thing*` or `*thing`) to be matched.
    The `*` matches all headers.| +|---|----------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +Starting with version 4.1, the `AbstractHeaderMapper` (a `DefaultAmqpHeaderMapper` superclass) lets the `NON_STANDARD_HEADERS` token be configured for the `requestHeaderNames` and `replyHeaderNames` properties (in addition to the existing `STANDARD_REQUEST_HEADERS` and `STANDARD_REPLY_HEADERS`) to map all user-defined headers. + +The `org.springframework.amqp.support.AmqpHeaders` class identifies the default headers that are used by the `DefaultAmqpHeaderMapper`: + +* `amqp_appId` + +* `amqp_clusterId` + +* `amqp_contentEncoding` + +* `amqp_contentLength` + +* `content-type` (see [The `contentType` Header](#amqp-content-type)) + +* `amqp_correlationId` + +* `amqp_delay` + +* `amqp_deliveryMode` + +* `amqp_deliveryTag` + +* `amqp_expiration` + +* `amqp_messageCount` + +* `amqp_messageId` + +* `amqp_receivedDelay` + +* `amqp_receivedDeliveryMode` + +* `amqp_receivedExchange` + +* `amqp_receivedRoutingKey` + +* `amqp_redelivered` + +* `amqp_replyTo` + +* `amqp_timestamp` + +* `amqp_type` + +* `amqp_userId` + +* `amqp_publishConfirm` + +* `amqp_publishConfirmNackCause` + +* `amqp_returnReplyCode` + +* `amqp_returnReplyText` + +* `amqp_returnExchange` + +* `amqp_returnRoutingKey` + +* `amqp_channel` + +* `amqp_consumerTag` + +* `amqp_consumerQueue` + +| |As mentioned earlier in this section, using a header mapping pattern of `*` is a common way to copy all headers.
    However, this can have some unexpected side effects, because certain RabbitMQ proprietary properties/headers are also copied.
    For example, when you use [federation](https://www.rabbitmq.com/federated-exchanges.html), the received message may have a property named `x-received-from`, which contains the node that sent the message.
    If you use the wildcard character `*` for the request and reply header mapping on the inbound gateway, this header is copied, which may cause some issues with federation.
    This reply message may be federated back to the sending broker, which may think that a message is looping and, as a result, silently drop it.
    If you wish to use the convenience of wildcard header mapping, you may need to filter out some headers in the downstream flow.
    For example, to avoid copying the `x-received-from` header back to the reply you can use `` before sending the reply to the AMQP inbound gateway.
    Alternatively, you can explicitly list those properties that you actually want mapped, instead of using wildcards.
    For these reasons, for inbound messages, the mapper (by default) does not map any `x-*` headers.
    It also does not map the `deliveryMode` to the `amqp_deliveryMode` header, to avoid propagation of that header from an inbound message to an outbound message.
    Instead, this header is mapped to `amqp_receivedDeliveryMode`, which is not mapped on output.| +|---|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +Starting with version 4.3, patterns in the header mappings can be negated by preceding the pattern with `!`. +Negated patterns get priority, so a list such as `STANDARD_REQUEST_HEADERS,thing1,ba*,!thing2,!thing3,qux,!thing1` does not map `thing1` (nor `thing2` nor `thing3`). +The standard headers plus `bad` and `qux` are mapped. +The negation technique can be useful for example to not map JSON type headers for incoming messages when a JSON deserialization logic is done in the receiver downstream different way. +For this purpose a `!json_*` pattern should be configured for header mapper of the inbound channel adapter/gateway. + +| |If you have a user-defined header that begins with `!` that you do wish to map, you need to escape it with `\`, as follows: `STANDARD_REQUEST_HEADERS,\!myBangHeader`.
    The header named `!myBangHeader` is now mapped.| +|---|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +| |Starting with version 5.1, the `DefaultAmqpHeaderMapper` will fall back to mapping `MessageHeaders.ID` and `MessageHeaders.TIMESTAMP` to `MessageProperties.messageId` and `MessageProperties.timestamp` respectively, if the corresponding `amqp_messageId` or `amqp_timestamp` headers are not present on outbound messages.
    Inbound properties will be mapped to the `amqp_*` headers as before.
    It is useful to populate the `messageId` property when message consumers are using stateful retry.| +|---|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +#### The `contentType` Header + +Unlike other headers, the `AmqpHeaders.CONTENT_TYPE` is not prefixed with `amqp_`; this allows transparent passing of the contentType header across different technologies. +For example an inbound HTTP message sent to a RabbitMQ queue. + +The `contentType` header is mapped to Spring AMQP’s `MessageProperties.contentType` property and that is subsequently mapped to RabbitMQ’s `content_type` property. + +Prior to version 5.1, this header was also mapped as an entry in the `MessageProperties.headers` map; this was incorrect and, furthermore, the value could be wrong since the underlying Spring AMQP message converter might have changed the content type. +Such a change would be reflected in the first-class `content_type` property, but not in the RabbitMQ headers map. +Inbound mapping ignored the headers map value.`contentType` is no longer mapped to an entry in the headers map. + +### Strict Message Ordering + +This section describes message ordering for inbound and outbound messages. + +#### Inbound + +If you require strict ordering of inbound messages, you must configure the inbound listener container’s `prefetchCount` property to `1`. +This is because, if a message fails and is redelivered, it arrives after existing prefetched messages. +Since Spring AMQP version 2.0, the `prefetchCount` defaults to `250` for improved performance. +Strict ordering requirements come at the cost of decreased performance. + +#### Outbound + +Consider the following integration flow: + +``` +@Bean +public IntegrationFlow flow(RabbitTemplate template) { + return IntegrationFlows.from(Gateway.class) + .split(s -> s.delimiters(",")) + .transform(String::toUpperCase) + .handle(Amqp.outboundAdapter(template).routingKey("rk")) + .get(); +} +``` + +Suppose we send messages `A`, `B` and `C` to the gateway. +While it is likely that messages `A`, `B`, `C` are sent in order, there is no guarantee. +This is because the template “borrows” a channel from the cache for each send operation, and there is no guarantee that the same channel is used for each message. +One solution is to start a transaction before the splitter, but transactions are expensive in RabbitMQ and can reduce performance several hundred fold. + +To solve this problem in a more efficient manner, starting with version 5.1, Spring Integration provides the `BoundRabbitChannelAdvice` which is a `HandleMessageAdvice`. +See [Handling Message Advice](./handler-advice.html#handle-message-advice). +When applied before the splitter, it ensures that all downstream operations are performed on the same channel and, optionally, can wait until publisher confirmations for all sent messages are received (if the connection factory is configured for confirmations). +The following example shows how to use `BoundRabbitChannelAdvice`: + +``` +@Bean +public IntegrationFlow flow(RabbitTemplate template) { + return IntegrationFlows.from(Gateway.class) + .split(s -> s.delimiters(",") + .advice(new BoundRabbitChannelAdvice(template, Duration.ofSeconds(10)))) + .transform(String::toUpperCase) + .handle(Amqp.outboundAdapter(template).routingKey("rk")) + .get(); +} +``` + +Notice that the same `RabbitTemplate` (which implements `RabbitOperations`) is used in the advice and the outbound adapter. +The advice runs the downstream flow within the template’s `invoke` method so that all operations run on the same channel. +If the optional timeout is provided, when the flow completes, the advice calls the `waitForConfirmsOrDie` method, which throws an exception if the confirmations are not received within the specified time. + +| |There must be no thread handoffs in the downstream flow (`QueueChannel`, `ExecutorChannel`, and others).| +|---|--------------------------------------------------------------------------------------------------------| + +### AMQP Samples + +To experiment with the AMQP adapters, check out the samples available in the Spring Integration samples git repository at [https://github.com/SpringSource/spring-integration-samples](https://github.com/spring-projects/spring-integration-samples) + +Currently, one sample demonstrates the basic functionality of the Spring Integration AMQP adapter by using an outbound channel adapter and an inbound channel adapter. +As AMQP broker implementation in the sample uses [RabbitMQ](https://www.rabbitmq.com/). + +| |In order to run the example, you need a running instance of RabbitMQ.
    A local installation with just the basic defaults suffices.
    For detailed RabbitMQ installation procedures, see [https://www.rabbitmq.com/install.html](https://www.rabbitmq.com/install.html)| +|---|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +Once the sample application is started, enter some text on the command prompt and a message containing that entered text is dispatched to the AMQP queue. +In return, that message is retrieved by Spring Integration and printed to the console. + +The following image illustrates the basic set of Spring Integration components used in this sample. + +![spring integration amqp sample graph](https://docs.spring.io/spring-integration/docs/current/reference/html/images/spring-integration-amqp-sample-graph.png) + +Figure 1. The Spring Integration graph of the AMQP sample \ No newline at end of file diff --git a/docs/en/spring-integration/configuration.md b/docs/en/spring-integration/configuration.md new file mode 100644 index 0000000000000000000000000000000000000000..67394295b00d4735836cc9898cfd88e55cf18512 --- /dev/null +++ b/docs/en/spring-integration/configuration.md @@ -0,0 +1,815 @@ +# Configuration + +## Configuration + +Spring Integration offers a number of configuration options. +Which option you choose depends upon your particular needs and at what level you prefer to work. +As with the Spring framework in general, you can mix and match the various techniques to suit the problem at hand. +For example, you can choose the XSD-based namespace for the majority of configuration and combine it with a handful of objects that you configure with annotations. +As much as possible, the two provide consistent naming. +The XML elements defined by the XSD schema match the names of the annotations, and the attributes of those XML elements match the names of annotation properties. +You can also use the API directly, but we expect most developers to choose one of the higher-level options or a combination of the namespace-based and annotation-driven configuration. + +### Namespace Support + +You can configure Spring Integration components with XML elements that map directly to the terminology and concepts of enterprise integration. +In many cases, the element names match those of the [*Enterprise Integration Patterns*](https://www.enterpriseintegrationpatterns.com/) book. + +To enable Spring Integration’s core namespace support within your Spring configuration files, add the following namespace reference and schema mapping in your top-level 'beans' element: + +``` + +``` + +(We have emphasized the lines that are particular to Spring Integration.) + +You can choose any name after "xmlns:". +We use `int` (short for Integration) for clarity, but you might prefer another abbreviation. +On the other hand, if you use an XML editor or IDE support, the availability of auto-completion may convince you to keep the longer name for clarity. +Alternatively, you can create configuration files that use the Spring Integration schema as the primary namespace, as the following example shows: + +``` + +``` + +(We have emphasized the lines that are particular to Spring Integration.) + +When using this alternative, no prefix is necessary for the Spring Integration elements. +On the other hand, if you define a generic Spring bean within the same configuration file, the bean element requires a prefix (``). +Since it is generally a good idea to modularize the configuration files themselves (based on responsibility or architectural layer), you may find it appropriate to use the latter approach in the integration-focused configuration files, since generic beans are seldom necessary within those files. +For the purposes of this documentation, we assume the integration namespace is the primary. + +Spring Integration provides many other namespaces. +In fact, each adapter type (JMS, file, and so on) that provides namespace support defines its elements within a separate schema. +In order to use these elements, add the necessary namespaces with an `xmlns` entry and the corresponding `schemaLocation` mapping. +For example, the following root element shows several of these namespace declarations: + +``` + + + ... + +``` + +This reference manual provides specific examples of the various elements in their corresponding chapters. +Here, the main thing to recognize is the consistency of the naming for each namespace URI and schema location. + +### Configuring the Task Scheduler + +In Spring Integration, the `ApplicationContext` plays the central role of a message bus, and you need to consider only a couple of configuration options. +First, you may want to control the central `TaskScheduler` instance. +You can do so by providing a single bean named `taskScheduler`. +This is also defined as a constant, as follows: + +``` +IntegrationContextUtils.TASK_SCHEDULER_BEAN_NAME +``` + +By default, Spring Integration relies on an instance of `ThreadPoolTaskScheduler`, as described in the [Task Execution and Scheduling](https://docs.spring.io/spring/docs/current/spring-framework-reference/integration.html#scheduling) section of the Spring Framework reference manual. +That default `TaskScheduler` starts up automatically with a pool of ten threads, but see [Global Properties](#global-properties). +If you provide your own `TaskScheduler` instance instead, you can set the 'autoStartup' property to `false` or provide your own pool size value. + +When polling consumers provide an explicit task executor reference in their configuration, the invocation of the handler methods happens within that executor’s thread pool and not the main scheduler pool. +However, when no task executor is provided for an endpoint’s poller, it is invoked by one of the main scheduler’s threads. + +| |Do not run long-running tasks on poller threads.
    Use a task executor instead.
    If you have a lot of polling endpoints, you can cause thread starvation, unless you increase the pool size.
    Also, polling consumers have a default `receiveTimeout` of one second.
    Since the poller thread blocks for this time, we recommend that you use a task executor when many such endpoints exist, again to avoid starvation.
    Alternatively, you can reduce the `receiveTimeout`.| +|---|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +| |An endpoint is a Polling Consumer if its input channel is one of the queue-based (that is, pollable) channels.
    Event-driven consumers are those having input channels that have dispatchers instead of queues (in other words, they are subscribable).
    Such endpoints have no poller configuration, since their handlers are invoked directly.| +|---|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +| |When running in a JEE container, you may need to use Spring’s `TimerManagerTaskScheduler`, as described [here](https://docs.spring.io/spring/docs/current/spring-framework-reference/integration.html#scheduling-task-scheduler-implementations), instead of the default `taskScheduler`.
    To do so, define a bean with the appropriate JNDI name for your environment, as the following example shows:

    ```




    ```| +|---|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +| |When a custom `TaskScheduler` is configured in the application context (like the above mentioned `DefaultManagedTaskScheduler`), it is recommended to supply it with a `MessagePublishingErrorHandler` (`integrationMessagePublishingErrorHandler` bean) to be able to handle exceptions as `ErrorMessage`s sent to the error channel, as is done with the default `TaskScheduler` bean provided by the framework.| +|---|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +See also [Error Handling](./error-handling.html#error-handling) for more information. + +### Global Properties + +Certain global framework properties can be overridden by providing a properties file on the classpath. + +The default properties can be found in `org.springframework.integration.context.IntegrationProperties` class. +The following listing shows the default values: + +``` +spring.integration.channels.autoCreate=true (1) +spring.integration.channels.maxUnicastSubscribers=0x7fffffff (2) +spring.integration.channels.maxBroadcastSubscribers=0x7fffffff (3) +spring.integration.taskScheduler.poolSize=10 (4) +spring.integration.messagingTemplate.throwExceptionOnLateReply=false (5) +spring.integration.readOnly.headers= (6) +spring.integration.endpoints.noAutoStartup= (7) +spring.integration.channels.error.requireSubscribers=true (8) +spring.integration.channels.error.ignoreFailures=true (9) +``` + +|**1**| When true, `input-channel` instances are automatically declared as `DirectChannel` instances when not explicitly found in the application context. | +|-----|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +|**2**| Sets the default number of subscribers allowed on, for example, a `DirectChannel`.
    It can be used to avoid inadvertently subscribing multiple endpoints to the same channel.
    You can override it on individual channels by setting the `max-subscribers` attribute. | +|**3**| This property provides the default number of subscribers allowed on, for example, a `PublishSubscribeChannel`.
    It can be used to avoid inadvertently subscribing more than the expected number of endpoints to the same channel.
    You can override it on individual channels by setting the `max-subscribers` attribute. | +|**4**| The number of threads available in the default `taskScheduler` bean.
    See [Configuring the Task Scheduler](#namespace-taskscheduler). | +|**5**| When `true`, messages that arrive at a gateway reply channel throw an exception when the gateway is not expecting a reply (because the sending thread has timed out or already received a reply). | +|**6**| A comma-separated list of message header names that should not be populated into `Message` instances during a header copying operation.
    The list is used by the `DefaultMessageBuilderFactory` bean and propagated to the `IntegrationMessageHeaderAccessor` instances (see [`MessageHeaderAccessor` API](./message.html#message-header-accessor)) used to build messages via `MessageBuilder` (see [The `MessageBuilder` Helper Class](./message.html#message-builder)).
    By default, only `MessageHeaders.ID` and `MessageHeaders.TIMESTAMP` are not copied during message building.
    Since version 4.3.2. | +|**7**|A comma-separated list of `AbstractEndpoint` bean names patterns (`xxx*`, `**xxx**`**, `*xxx`** or `xxx*yyy`) that should not be started automatically during application startup.
    You can manually start these endpoints later by their bean name through a `Control Bus` (see [Control Bus](./control-bus.html#control-bus)), by their role with the `SmartLifecycleRoleController` (see [Endpoint Roles](./endpoint.html#endpoint-roles)), or by `Lifecycle` bean injection.
    You can explicitly override the effect of this global property by specifying `auto-startup` XML annotation or the `autoStartup` annotation attribute or by calling `AbstractEndpoint.setAutoStartup()` in the bean definition.
    Since version 4.3.12.| +|**8**| A boolean flag to indicate that default global `errorChannel` must be configured with the `requireSubscribers` option.
    Since version 5.4.3.
    See [Error Handling](./error-handling.html#error-handling) for more information. | +|**9**| A boolean flag to indicate that default global `errorChannel` must ignore dispatching errors and pass the message to the next handler.
    Since version 5.5. | + +These properties can be overridden by adding a `/META-INF/spring.integration.properties` file to the classpath or an `IntegrationContextUtils.INTEGRATION_GLOBAL_PROPERTIES_BEAN_NAME` bean for the `org.springframework.integration.context.IntegrationProperties` instance. +You need not provide all the properties — only those that you want to override. + +Starting with version 5.1, all the merged global properties are printed in the logs after application context startup when a `DEBUG` logic level is turned on for the `org.springframework.integration` category. +The output looks like this: + +``` +Spring Integration global properties: + +spring.integration.endpoints.noAutoStartup=fooService* +spring.integration.taskScheduler.poolSize=20 +spring.integration.channels.maxUnicastSubscribers=0x7fffffff +spring.integration.channels.autoCreate=true +spring.integration.channels.maxBroadcastSubscribers=0x7fffffff +spring.integration.readOnly.headers= +spring.integration.messagingTemplate.throwExceptionOnLateReply=true +``` + +### Annotation Support + +In addition to the XML namespace support for configuring message endpoints, you can also use annotations. +First, Spring Integration provides the class-level `@MessageEndpoint` as a stereotype annotation, meaning that it is itself annotated with Spring’s `@Component` annotation and is therefore automatically recognized as a bean definition by Spring’s component scanning. + +Even more important are the various method-level annotations. +They indicate that the annotated method is capable of handling a message. +The following example demonstrates both class-level and method-level annotations: + +``` +@MessageEndpoint +public class FooService { + + @ServiceActivator + public void processMessage(Message message) { + ... + } +} +``` + +Exactly what it means for the method to “handle” the Message depends on the particular annotation. +Annotations available in Spring Integration include: + +* `@Aggregator` (see [Aggregator](./aggregator.html#aggregator)) + +* `@Filter` (see [Filter](./filter.html#filter)) + +* `@Router` (see [Routers](./router.html#router)) + +* `@ServiceActivator` (see [Service Activator](./service-activator.html#service-activator)) + +* `@Splitter` (see [Splitter](./splitter.html#splitter)) + +* `@Transformer` (see [Transformer](./transformer.html#transformer)) + +* `@InboundChannelAdapter` (see [Channel Adapter](./channel-adapter.html#channel-adapter)) + +* `@BridgeFrom` (see [Configuring a Bridge with Java Configuration](./bridge.html#bridge-annot)) + +* `@BridgeTo` (see [Configuring a Bridge with Java Configuration](./bridge.html#bridge-annot)) + +* `@MessagingGateway` (see [Messaging Gateways](./gateway.html#gateway)) + +* `@IntegrationComponentScan` (see [Configuration and `@EnableIntegration`](./overview.html#configuration-enable-integration)) + +| |If you use XML configuration in combination with annotations, the `@MessageEndpoint` annotation is not required.
    If you want to configure a POJO reference from the `ref` attribute of a `` element, you can provide only the method-level annotations.
    In that case, the annotation prevents ambiguity even when no method-level attribute exists on the `` element.| +|---|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +In most cases, the annotated handler method should not require the `Message` type as its parameter. +Instead, the method parameter type can match the message’s payload type, as the following example shows: + +``` +public class ThingService { + + @ServiceActivator + public void bar(Thing thing) { + ... + } + +} +``` + +When the method parameter should be mapped from a value in the `MessageHeaders`, another option is to use the parameter-level `@Header` annotation. +In general, methods annotated with the Spring Integration annotations can accept the `Message` itself, the message payload, or a header value (with `@Header`) as the parameter. +In fact, the method can accept a combination, as the following example shows: + +``` +public class ThingService { + + @ServiceActivator + public void otherThing(String payload, @Header("x") int valueX, @Header("y") int valueY) { + ... + } + +} +``` + +You can also use the `@Headers` annotation to provide all of the message headers as a `Map`, as the following example shows: + +``` +public class ThingService { + + @ServiceActivator + public void otherThing(String payload, @Headers Map headerMap) { + ... + } + +} +``` + +| |The value of the annotation can also be a SpEL expression (for example, `someHeader.toUpperCase()`), which is useful when you wish to manipulate the header value before injecting it.
    It also provides an optional `required` property, which specifies whether the attribute value must be available within the headers.
    The default value for the `required` property is `true`.| +|---|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +For several of these annotations, when a message-handling method returns a non-null value, the endpoint tries to send a reply. +This is consistent across both configuration options (namespace and annotations) in that such an endpoint’s output channel is used (if available), and the `REPLY_CHANNEL` message header value is used as a fallback. + +| |The combination of output channels on endpoints and the reply channel message header enables a pipeline approach, where multiple components have an output channel and the final component allows the reply message to be forwarded to the reply channel (as specified in the original request message).
    In other words, the final component depends on the information provided by the original sender and can dynamically support any number of clients as a result.
    This is an example of the [return address](https://www.enterpriseintegrationpatterns.com/ReturnAddress.html) pattern.| +|---|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +In addition to the examples shown here, these annotations also support the `inputChannel` and `outputChannel` properties, as the following example shows: + +``` +@Service +public class ThingService { + + @ServiceActivator(inputChannel="input", outputChannel="output") + public void otherThing(String payload, @Headers Map headerMap) { + ... + } + +} +``` + +The processing of these annotations creates the same beans as the corresponding XML components — `AbstractEndpoint` instances and `MessageHandler` instances (or `MessageSource` instances for the inbound channel adapter). +See [Annotations on `@Bean` Methods](#annotations_on_beans). +The bean names are generated from the following pattern: `[componentName].[methodName].[decapitalizedAnnotationClassShortName]`. +In the preceding example the bean name is `thingService.otherThing.serviceActivator` for the `AbstractEndpoint` and the same name with an additional `.handler` (`.source`) suffix for the `MessageHandler` (`MessageSource`) bean. +Such a name can be customized using an `@EndpointId` annotation alongside with these messaging annotations. +The `MessageHandler` instances (`MessageSource` instances) are also eligible to be tracked by [the message history](./message-history.html#message-history). + +Starting with version 4.0, all messaging annotations provide `SmartLifecycle` options (`autoStartup` and `phase`) to allow endpoint lifecycle control on application context initialization. +They default to `true` and `0`, respectively. +To change the state of an endpoint (such as ` start()` or `stop()`), you can obtain a reference to the endpoint bean by using the `BeanFactory` (or autowiring) and invoke the methods. +Alternatively, you can send a command message to the `Control Bus` (see [Control Bus](./control-bus.html#control-bus)). +For these purposes, you should use the `beanName` mentioned earlier in the preceding paragraph. + +| |Channels automatically created after parsing the mentioned annotations (when no specific channel bean is configured), and the corresponding consumer endpoints, are declared as beans near the end of the context initialization.
    These beans **can** be autowired in other services, but they have to be marked with the `@Lazy` annotation because the definitions, typically, won’t yet be available during normal autowiring processing.

    ```
    @Autowired
    @Lazy
    @Qualifier("someChannel")
    MessageChannel someChannel;
    ...

    @Bean
    Thing1 dependsOnSPCA(@Qualifier("someInboundAdapter") @Lazy SourcePollingChannelAdapter someInboundAdapter) {
    ...
    }
    ```| +|---|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +#### Using the `@Poller` Annotation + +Before Spring Integration 4.0, messaging annotations required that the `inputChannel` be a reference to a `SubscribableChannel`. +For `PollableChannel` instances, an `` element was needed to configure an `` and make the composite endpoint be a `PollingConsumer`. +Version 4.0 introduced the `@Poller` annotation to allow the configuration of `poller` attributes directly on the messaging annotations, as the following example shows: + +``` +public class AnnotationService { + + @Transformer(inputChannel = "input", outputChannel = "output", + poller = @Poller(maxMessagesPerPoll = "${poller.maxMessagesPerPoll}", fixedDelay = "${poller.fixedDelay}")) + public String handle(String payload) { + ... + } +} +``` + +The `@Poller` annotation provides only simple `PollerMetadata` options. +You can configure the `@Poller` annotation’s attributes (`maxMessagesPerPoll`, `fixedDelay`, `fixedRate`, and `cron`) with property placeholders. +Also, starting with version 5.1, the `receiveTimeout` option for `PollingConsumer` s is also provided. +If it is necessary to provide more polling options (for example, `transaction`, `advice-chain`, `error-handler`, and others), you should configure the `PollerMetadata` as a generic bean and use its bean name as the `@Poller` 's `value` attribute. +In this case, no other attributes are allowed (they must be specified on the `PollerMetadata` bean). +Note, if `inputChannel` is a `PollableChannel` and no `@Poller` is configured, the default `PollerMetadata` is used (if it is present in the application context). +To declare the default poller by using a `@Configuration` annotation, use code similar to the following example: + +``` +@Bean(name = PollerMetadata.DEFAULT_POLLER) +public PollerMetadata defaultPoller() { + PollerMetadata pollerMetadata = new PollerMetadata(); + pollerMetadata.setTrigger(new PeriodicTrigger(10)); + return pollerMetadata; +} +``` + +The following example shows how to use the default poller: + +``` +public class AnnotationService { + + @Transformer(inputChannel = "aPollableChannel", outputChannel = "output") + public String handle(String payload) { + ... + } +} +``` + +The following example shows how to use a named poller: + +``` +@Bean +public PollerMetadata myPoller() { + PollerMetadata pollerMetadata = new PollerMetadata(); + pollerMetadata.setTrigger(new PeriodicTrigger(1000)); + return pollerMetadata; +} +``` + +The following example shows an endpoint that uses the default poller: + +``` +public class AnnotationService { + + @Transformer(inputChannel = "aPollableChannel", outputChannel = "output" + poller = @Poller("myPoller")) + public String handle(String payload) { + ... + } +} +``` + +Starting with version 4.3.3, the `@Poller` annotation has the `errorChannel` attribute for easier configuration of the underlying `MessagePublishingErrorHandler`. +This attribute plays the same role as `error-channel` in the `` XML component. +See [Endpoint Namespace Support](./endpoint.html#endpoint-namespace) for more information. + +The `poller()` attribute on the messaging annotations is mutually exclusive with the `reactive()` attribute. +See next section for more information. + +#### Using `@Reactive` Annotation + +The `ReactiveStreamsConsumer` has been around since version 5.0, but it was applied only when an input channel for the endpoint is a `FluxMessageChannel` (or any `org.reactivestreams.Publisher` implementation). +Starting with version 5.3, its instance is also created by the framework when the target message handler is a `ReactiveMessageHandler` independently of the input channel type. +The `@Reactive` sub-annotation (similar to mentioned above `@Poller`) has been introduced for all the messaging annotations starting with version 5.5. +It accepts an optional `Function>, ? extends Publisher>>` bean reference and, independently of the input channel type and message handler, turns the target endpoint into the `ReactiveStreamsConsumer` instance. +The function is used from the `Flux.transform()` operator to apply some customization (`publishOn()`, `doOnNext()`, `log()`, `retry()` etc.) on a reactive stream source from the input channel. + +The following example demonstrates how to change the publishing thread from the input channel independently of the final subscriber and producer to that `DirectChannel`: + +``` +@Bean +public Function, Flux> publishOnCustomizer() { + return flux -> flux.publishOn(Schedulers.parallel()); +} + +@ServiceActivator(inputChannel = "directChannel", reactive = @Reactive("publishOnCustomizer")) +public void handleReactive(String payload) { + ... +} +``` + +The `reactive()` attribute on the messaging annotations is mutually exclusive with the `poller()` attribute. +See [Using the `@Poller` Annotation](#configuration-using-poller-annotation) and [Reactive Streams Support](./reactive-streams.html#reactive-streams) for more information. + +#### Using the `@InboundChannelAdapter` Annotation + +Version 4.0 introduced the `@InboundChannelAdapter` method-level annotation. +It produces a `SourcePollingChannelAdapter` integration component based on a `MethodInvokingMessageSource` for the annotated method. +This annotation is an analogue of the `` XML component and has the same restrictions: The method cannot have parameters, and the return type must not be `void`. +It has two attributes: `value` (the required `MessageChannel` bean name) and `poller` (an optional `@Poller` annotation, as [described earlier](#configuration-using-poller-annotation)). +If you need to provide some `MessageHeaders`, use a `Message` return type and use a `MessageBuilder` to build the `Message`. +Using a `MessageBuilder` lets you configure the `MessageHeaders`. +The following example shows how to use an `@InboundChannelAdapter` annotation: + +``` +@InboundChannelAdapter("counterChannel") +public Integer count() { + return this.counter.incrementAndGet(); +} + +@InboundChannelAdapter(value = "fooChannel", poller = @Poller(fixed-rate = "5000")) +public String foo() { + return "foo"; +} +``` + +Version 4.3 introduced the `channel` alias for the `value` annotation attribute, to provide better source code readability. +Also, the target `MessageChannel` bean is resolved in the `SourcePollingChannelAdapter` by the provided name (set by the `outputChannelName` option) on the first `receive()` call, not during the initialization phase. +It allows “late binding” logic: The target `MessageChannel` bean from the consumer perspective is created and registered a bit later than the `@InboundChannelAdapter` parsing phase. + +The first example requires that the default poller has been declared elsewhere in the application context. + +Using the `@MessagingGateway` Annotation + +See [`@MessagingGateway` Annotation](./gateway.html#messaging-gateway-annotation). + +#### Using the `@IntegrationComponentScan` Annotation + +The standard Spring Framework `@ComponentScan` annotation does not scan interfaces for stereotype `@Component` annotations. +To overcome this limitation and allow the configuration of `@MessagingGateway` (see [`@MessagingGateway` Annotation](./gateway.html#messaging-gateway-annotation)), we introduced the `@IntegrationComponentScan` mechanism. +This annotation must be placed with a `@Configuration` annotation and be customized to define its scanning options, +such as `basePackages` and `basePackageClasses`. +In this case, all discovered interfaces annotated with `@MessagingGateway` are parsed and registered as `GatewayProxyFactoryBean` instances. +All other class-based components are parsed by the standard `@ComponentScan`. + +### Messaging Meta-Annotations + +Starting with version 4.0, all messaging annotations can be configured as meta-annotations and all user-defined messaging annotations can define the same attributes to override their default values. +In addition, meta-annotations can be configured hierarchically, as the following example shows: + +``` +@Target({ElementType.METHOD, ElementType.ANNOTATION_TYPE}) +@Retention(RetentionPolicy.RUNTIME) +@ServiceActivator(inputChannel = "annInput", outputChannel = "annOutput") +public @interface MyServiceActivator { + + String[] adviceChain = { "annAdvice" }; +} + +@Target({ElementType.METHOD, ElementType.ANNOTATION_TYPE}) +@Retention(RetentionPolicy.RUNTIME) +@MyServiceActivator +public @interface MyServiceActivator1 { + + String inputChannel(); + + String outputChannel(); +} +... + +@MyServiceActivator1(inputChannel = "inputChannel", outputChannel = "outputChannel") +public Object service(Object payload) { + ... +} +``` + +Configuring meta-annotations hierarchically lets users set defaults for various attributes and enables isolation of framework Java dependencies to user annotations, avoiding their use in user classes. +If the framework finds a method with a user annotation that has a framework meta-annotation, it is treated as if the method were annotated directly with the framework annotation. + +#### Annotations on `@Bean` Methods + +Starting with version 4.0, you can configure messaging annotations on `@Bean` method definitions in `@Configuration` classes, to produce message endpoints based on the beans, not the methods. +It is useful when `@Bean` definitions are “out-of-the-box” `MessageHandler` instances (`AggregatingMessageHandler`, `DefaultMessageSplitter`, and others), `Transformer` instances (`JsonToObjectTransformer`, `ClaimCheckOutTransformer`, and others), and `MessageSource` instances (`FileReadingMessageSource`, `RedisStoreMessageSource`, and others). +The following example shows how to use messaging annotations with `@Bean` annotations: + +``` +@Configuration +@EnableIntegration +public class MyFlowConfiguration { + + @Bean + @InboundChannelAdapter(value = "inputChannel", poller = @Poller(fixedDelay = "1000")) + public MessageSource consoleSource() { + return CharacterStreamReadingMessageSource.stdin(); + } + + @Bean + @Transformer(inputChannel = "inputChannel", outputChannel = "httpChannel") + public ObjectToMapTransformer toMapTransformer() { + return new ObjectToMapTransformer(); + } + + @Bean + @ServiceActivator(inputChannel = "httpChannel") + public MessageHandler httpHandler() { + HttpRequestExecutingMessageHandler handler = new HttpRequestExecutingMessageHandler("https://foo/service"); + handler.setExpectedResponseType(String.class); + handler.setOutputChannelName("outputChannel"); + return handler; + } + + @Bean + @ServiceActivator(inputChannel = "outputChannel") + public LoggingHandler loggingHandler() { + return new LoggingHandler("info"); + } + +} +``` + +Version 5.0 introduced support for a `@Bean` annotated with `@InboundChannelAdapter` that returns `java.util.function.Supplier`, which can produce either a POJO or a `Message`. +The following example shows how to use that combination: + +``` +@Configuration +@EnableIntegration +public class MyFlowConfiguration { + + @Bean + @InboundChannelAdapter(value = "inputChannel", poller = @Poller(fixedDelay = "1000")) + public Supplier pojoSupplier() { + return () -> "foo"; + } + + @Bean + @InboundChannelAdapter(value = "inputChannel", poller = @Poller(fixedDelay = "1000")) + public Supplier> messageSupplier() { + return () -> new GenericMessage<>("foo"); + } +} +``` + +The meta-annotation rules work on `@Bean` methods as well (the `@MyServiceActivator` annotation [described earlier](#meta-annotations) can be applied to a `@Bean` definition). + +| |When you use these annotations on consumer `@Bean` definitions, if the bean definition returns an appropriate `MessageHandler` (depending on the annotation type), you must set attributes (such as `outputChannel`, `requiresReply`, `order`, and others), on the `MessageHandler` `@Bean` definition itself.
    Only the following annotation attributes are used: `adviceChain`, `autoStartup`, `inputChannel`, `phase`, and `poller`.
    All other attributes are for the handler.| +|---|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +| |The bean names are generated with the following algorithm:| +|---|----------------------------------------------------------| + +* The `MessageHandler` (`MessageSource`) `@Bean` gets its own standard name from the method name or `name` attribute on the `@Bean`. + This works as though there were no messaging annotation on the `@Bean` method. + +* The `AbstractEndpoint` bean name is generated with the following pattern: `[configurationComponentName].[methodName].[decapitalizedAnnotationClassShortName]`. + For example, the `SourcePollingChannelAdapter` endpoint for the `consoleSource()` definition [shown earlier](#annotations_on_beans) gets a bean name of `myFlowConfiguration.consoleSource.inboundChannelAdapter`. + See also [Endpoint Bean Names](./overview.html#endpoint-bean-names). + +| |When using these annotations on `@Bean` definitions, the `inputChannel` must reference a declared bean.
    Channels are not automatically declared in this case.| +|---|-----------------------------------------------------------------------------------------------------------------------------------------------------------------| + +| |With Java configuration, you can use any `@Conditional` (for example, `@Profile`) definition on the `@Bean` method level to skip the bean registration for some conditional reason.
    The following example shows how to do so:

    ```
    @Bean
    @ServiceActivator(inputChannel = "skippedChannel")
    @Profile("thing")
    public MessageHandler skipped() {
    return System.out::println;
    }
    ```

    Together with the existing Spring container logic, the messaging endpoint bean (based on the `@ServiceActivator` annotation), is also not registered.| +|---|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +#### Creating a Bridge with Annotations + +Starting with version 4.0, Java configuration provides the `@BridgeFrom` and `@BridgeTo` `@Bean` method annotations to mark `MessageChannel` beans in `@Configuration` classes. +These really exists for completeness, providing a convenient mechanism to declare a `BridgeHandler` and its message endpoint configuration: + +``` +@Bean +public PollableChannel bridgeFromInput() { + return new QueueChannel(); +} + +@Bean +@BridgeFrom(value = "bridgeFromInput", poller = @Poller(fixedDelay = "1000")) +public MessageChannel bridgeFromOutput() { + return new DirectChannel(); +} +@Bean +public QueueChannel bridgeToOutput() { + return new QueueChannel(); +} + +@Bean +@BridgeTo("bridgeToOutput") +public MessageChannel bridgeToInput() { + return new DirectChannel(); +} +``` + +You can use these annotations as meta-annotations as well. + +#### Advising Annotated Endpoints + +See [Advising Endpoints Using Annotations](./handler-advice.html#advising-with-annotations). + +### Message Mapping Rules and Conventions + +Spring Integration implements a flexible facility to map messages to methods and their arguments without providing extra configuration, by relying on some default rules and defining certain conventions. +The examples in the following sections articulate the rules. + +#### Sample Scenarios + +The following example shows a single un-annotated parameter (object or primitive) that is not a `Map` or a `Properties` object with a non-void return type: + +``` +public String doSomething(Object o); +``` + +The input parameter is a message payload. +If the parameter type is not compatible with a message payload, an attempt is made to convert it by using a conversion service provided by Spring 3.0. +The return value is incorporated as a payload of the returned message. + +The following example shows a single un-annotated parameter (object or primitive)that is not a `Map` or a `Properties` with a `Message` return type: + +``` +public Message doSomething(Object o); +``` + +The input parameter is a message payload. +If the parameter type is not compatible with a message payload, an attempt is made to convert it by using a conversion service provided by Spring 3.0. +The return value is a newly constructed message that is sent to the next destination. + +The followig example shows a single parameter that is a message (or one of its subclasses) with an arbitrary object or primitive return type: + +``` +public int doSomething(Message msg); +``` + +The input parameter is itself a `Message`. +The return value becomes a payload of the `Message` that is sent to the next destination. + +The following example shows a single parameter that is a `Message` (or one of its subclasses) with a `Message` (or one of its subclasses) as the return type: + +``` +public Message doSomething(Message msg); +``` + +The input parameter is itself a `Message`. +The return value is a newly constructed `Message` that is sent to the next destination. + +The following example shows a single parameter of type `Map` or `Properties` with a `Message` as the return type: + +``` +public Message doSomething(Map m); +``` + +This one is a bit interesting. +Although, at first, it might seem like an easy mapping straight to message headers, preference is always given to a `Message` payload. +This means that if a `Message` payload is of type `Map`, this input argument represents a `Message` payload. +However, if the `Message` payload is not of type `Map`, the conversion service does not try to convert the payload, and the input argument is mapped to message headers. + +The following example shows two parameters, where one of them is an arbitrary type (an object or a primitive) that is not a `Map` or a `Properties` object and the other is of type `Map` or `Properties` type (regardless of the return): + +``` +public Message doSomething(Map h, t); +``` + +This combination contains two input parameters where one of them is of type `Map`. +The non-`Map` parameters (regardless of the order) are mapped to a `Message` payload and the `Map` or `Properties` (regardless of the order) is mapped to message headers, giving you a nice POJO way of interacting with `Message` structure. + +The following example shows no parameters (regardless of the return): + +``` +public String doSomething(); +``` + +This message handler method is invoked based on the Message sent to the input channel to which this handler is connected. +However no `Message` data is mapped, thus making the `Message` act as event or trigger to invoke the handler. +The output is mapped according to the rules [described earlier](#message-mapping-rules). + +The following example shows no parameters and a void return: + +``` +public void soSomething(); +``` + +This example is the same as the previous example, but it produces no output. + +#### Annotation-based Mapping + +Annotation-based mapping is the safest and least ambiguous approach to map messages to methods. +The following example shows how to explicitly map a method to a header: + +``` +public String doSomething(@Payload String s, @Header("someheader") String b) +``` + +As you can see later on, without an annotation this signature would result in an ambiguous condition. +However, by explicitly mapping the first argument to a `Message` payload and the second argument to a value of the `someheader` message header, we avoid any ambiguity. + +The following example is nearly identical to the preceding example: + +``` +public String doSomething(@Payload String s, @RequestParam("something") String b) +``` + +`@RequestMapping` or any other non-Spring Integration mapping annotation is irrelevant and is therefore ignored, leaving the second parameter unmapped. +Although the second parameter could easily be mapped to a payload, there can only be one payload. +Therefore, the annotations keep this method from being ambiguous. + +The following example shows another similar method that would be ambiguous were it not for annotations to clarify the intent: + +``` +public String foo(String s, @Header("foo") String b) +``` + +The only difference is that the first argument is implicitly mapped to the message payload. + +The following example shows yet another signature that would definitely be treated as ambiguous without annotations, because it has more than two arguments: + +``` +public String soSomething(@Headers Map m, @Header("something") Map f, @Header("someotherthing") String bar) +``` + +This example would be especially problematic, because two of its arguments are `Map` instances. +However, with annotation-based mapping, the ambiguity is easily avoided. +In this example the first argument is mapped to all the message headers, while the second and third argument map to the values of the message headers named 'something' and 'someotherthing'. +The payload is not being mapped to any argument. + +#### Complex Scenarios + +The following example uses multiple parameters: + +Multiple parameters can create a lot of ambiguity with regards to determining the appropriate mappings. +The general advice is to annotate your method parameters with `@Payload`, `@Header`, and `@Headers`. +The examples in this section show ambiguous conditions that result in an exception being raised. + +``` +public String doSomething(String s, int i) +``` + +The two parameters are equal in weight. +Therefore, there is no way to determine which one is a payload. + +The following example shows a similar problem, only with three parameters: + +``` +public String foo(String s, Map m, String b) +``` + +Although the Map could be easily mapped to message headers, there is no way to determine what to do with the two String parameters. + +The following example shows another ambiguous method: + +``` +public String foo(Map m, Map f) +``` + +Although one might argue that one `Map` could be mapped to the message payload and the other one to the message headers, we cannot rely on the order. + +| |Any method signature with more than one method argument that is not (Map, \) and with unannotated parameters results in an ambiguous condition and triggers an exception.| +|---|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +The next set of examples each show mutliple methods that result in ambiguity. + +Message handlers with multiple methods are mapped based on the same rules that are described earlier (in the examples). +However, some scenarios might still look confusing. + +The following example shows multiple methods with legal (mappable and unambiguous) signatures: + +``` +public class Something { + public String doSomething(String str, Map m); + + public String doSomething(Map m); +} +``` + +(Whether the methods have the same name or different names makes no difference). +The `Message` could be mapped to either method. +The first method would be invoked when the message payload could be mapped to `str` and the message headers could be mapped to `m`. +The second method could also be a candidate by mapping only the message headers to `m`. +To make matters worse, both methods have the same name. +At first, that might look ambiguous because of the following configuration: + +``` + + + +``` + +It works because mappings are based on the payload first and everything else next. +In other words, the method whose first argument can be mapped to a payload takes precedence over all other methods. + +Now consider an alternate example, which produces a truly ambiguous condition: + +``` +public class Something { + public String doSomething(String str, Map m); + + public String doSomething(String str); +} +``` + +Both methods have signatures that could be mapped to a message payload. +They also have the same name. +Such handler methods will trigger an exception. +However, if the method names were different, you could influence the mapping with a `method` attribute (shown in the next example). +The following example shows the same example with two different method names: + +``` +public class Something { + public String doSomething(String str, Map m); + + public String doSomethingElse(String str); +} +``` + +The following example shows how to use the `method` attribute to dictate the mapping: + +``` + + + +``` + +Because the configuration explicitly maps the `doSomethingElse` method, we have eliminated the ambiguity. \ No newline at end of file diff --git a/docs/en/spring-integration/core.md b/docs/en/spring-integration/core.md new file mode 100644 index 0000000000000000000000000000000000000000..34be6d06dc977f913e6b6db660618cd29e7931c9 --- /dev/null +++ b/docs/en/spring-integration/core.md @@ -0,0 +1,1531 @@ +# Core Messaging + +## Messaging Channels + +### Message Channels + +While the `Message` plays the crucial role of encapsulating data, it is the `MessageChannel` that decouples message producers from message consumers. + +#### The MessageChannel Interface + +Spring Integration’s top-level `MessageChannel` interface is defined as follows: + +``` +public interface MessageChannel { + + boolean send(Message message); + + boolean send(Message message, long timeout); +} +``` + +When sending a message, the return value is `true` if the message is sent successfully. +If the send call times out or is interrupted, it returns `false`. + +##### `PollableChannel` + +Since message channels may or may not buffer messages (as discussed in the [Spring Integration Overview](./overview.html#overview)), two sub-interfaces define the buffering (pollable) and non-buffering (subscribable) channel behavior. +The following listing shows the definition of the `PollableChannel` interface: + +``` +public interface PollableChannel extends MessageChannel { + + Message receive(); + + Message receive(long timeout); + +} +``` + +As with the send methods, when receiving a message, the return value is null in the case of a timeout or interrupt. + +##### `SubscribableChannel` + +The `SubscribableChannel` base interface is implemented by channels that send messages directly to their subscribed `MessageHandler` instances. +Therefore, they do not provide receive methods for polling. +Instead, they define methods for managing those subscribers. +The following listing shows the definition of the `SubscribableChannel` interface: + +``` +public interface SubscribableChannel extends MessageChannel { + + boolean subscribe(MessageHandler handler); + + boolean unsubscribe(MessageHandler handler); + +} +``` + +#### Message Channel Implementations + +Spring Integration provides several different message channel implementations. +The following sections briefly describe each one. + +##### `PublishSubscribeChannel` + +The `PublishSubscribeChannel` implementation broadcasts any `Message` sent to it to all of its subscribed handlers. +This is most often used for sending event messages, whose primary role is notification (as opposed to document messages, which are generally intended to be processed by a single handler). +Note that the `PublishSubscribeChannel` is intended for sending only. +Since it broadcasts to its subscribers directly when its `send(Message)` method is invoked, consumers cannot poll for messages (it does not implement `PollableChannel` and therefore has no `receive()` method). +Instead, any subscriber must itself be a `MessageHandler`, and the subscriber’s `handleMessage(Message)` method is invoked in turn. + +Prior to version 3.0, invoking the `send` method on a `PublishSubscribeChannel` that had no subscribers returned `false`. +When used in conjunction with a `MessagingTemplate`, a `MessageDeliveryException` was thrown. +Starting with version 3.0, the behavior has changed such that a `send` is always considered successful if at least the minimum subscribers are present (and successfully handle the message). +This behavior can be modified by setting the `minSubscribers` property, which defaults to `0`. + +| |If you use a `TaskExecutor`, only the presence of the correct number of subscribers is used for this determination, because the actual handling of the message is performed asynchronously.| +|---|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +##### `QueueChannel` + +The `QueueChannel` implementation wraps a queue. +Unlike the `PublishSubscribeChannel`, the `QueueChannel` has point-to-point semantics. +In other words, even if the channel has multiple consumers, only one of them should receive any `Message` sent to that channel. +It provides a default no-argument constructor (providing an essentially unbounded capacity of `Integer.MAX_VALUE`) as well as a constructor that accepts the queue capacity, as the following listing shows: + +``` +public QueueChannel(int capacity) +``` + +A channel that has not reached its capacity limit stores messages in its internal queue, and the `send(Message)` method returns immediately, even if no receiver is ready to handle the message. +If the queue has reached capacity, the sender blocks until room is available in the queue. +Alternatively, if you use the send method that has an additional timeout parameter, the queue blocks until either room is available or the timeout period elapses, whichever occurs first. +Similarly, a `receive()` call returns immediately if a message is available on the queue, but, if the queue is empty, then a receive call may block until either a message is available or the timeout, if provided, elapses. +In either case, it is possible to force an immediate return regardless of the queue’s state by passing a timeout value of 0. +Note, however, that calls to the versions of `send()` and `receive()` with no `timeout` parameter block indefinitely. + +##### `PriorityChannel` + +Whereas the `QueueChannel` enforces first-in-first-out (FIFO) ordering, the `PriorityChannel` is an alternative implementation that allows for messages to be ordered within the channel based upon a priority. +By default, the priority is determined by the `priority` header within each message. +However, for custom priority determination logic, a comparator of type `Comparator>` can be provided to the `PriorityChannel` constructor. + +##### `RendezvousChannel` + +The `RendezvousChannel` enables a “direct-handoff” scenario, wherein a sender blocks until another party invokes the channel’s `receive()` method. +The other party blocks until the sender sends the message. +Internally, this implementation is quite similar to the `QueueChannel`, except that it uses a `SynchronousQueue` (a zero-capacity implementation of `BlockingQueue`). +This works well in situations where the sender and receiver operate in different threads, but asynchronously dropping the message in a queue is not appropriate. +In other words, with a `RendezvousChannel`, the sender knows that some receiver has accepted the message, whereas with a `QueueChannel`, the message would have been stored to the internal queue and potentially never received. + +| |Keep in mind that all of these queue-based channels are storing messages in-memory only by default.
    When persistence is required, you can either provide a 'message-store' attribute within the 'queue' element to reference a persistent `MessageStore` implementation or you can replace the local channel with one that is backed by a persistent broker, such as a JMS-backed channel or channel adapter.
    The latter option lets you take advantage of any JMS provider’s implementation for message persistence, as discussed in [JMS Support](./jms.html#jms).
    However, when buffering in a queue is not necessary, the simplest approach is to rely upon the `DirectChannel`, discussed in the next section.| +|---|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +The `RendezvousChannel` is also useful for implementing request-reply operations. +The sender can create a temporary, anonymous instance of `RendezvousChannel`, which it then sets as the 'replyChannel' header when building a `Message`. +After sending that `Message`, the sender can immediately call `receive` (optionally providing a timeout value) in order to block while waiting for a reply `Message`. +This is very similar to the implementation used internally by many of Spring Integration’s request-reply components. + +##### `DirectChannel` + +The `DirectChannel` has point-to-point semantics but otherwise is more similar to the `PublishSubscribeChannel` than any of the queue-based channel implementations described earlier. +It implements the `SubscribableChannel` interface instead of the `PollableChannel` interface, so it dispatches messages directly to a subscriber. +As a point-to-point channel, however, it differs from the `PublishSubscribeChannel` in that it sends each `Message` to a single subscribed `MessageHandler`. + +In addition to being the simplest point-to-point channel option, one of its most important features is that it enables a single thread to perform the operations on “both sides” of the channel. +For example, if a handler subscribes to a `DirectChannel`, then sending a `Message` to that channel triggers invocation of that handler’s `handleMessage(Message)` method directly in the sender’s thread, before the `send()` method invocation can return. + +The key motivation for providing a channel implementation with this behavior is to support transactions that must span across the channel while still benefiting from the abstraction and loose coupling that the channel provides. +If the send call is invoked within the scope of a transaction, the outcome of the handler’s invocation (for example, updating a database record) plays a role in determining the ultimate result of that transaction (commit or rollback). + +| |Since the `DirectChannel` is the simplest option and does not add any additional overhead that would be required for scheduling and managing the threads of a poller, it is the default channel type within Spring Integration.
    The general idea is to define the channels for an application, consider which of those need to provide buffering or to throttle input, and modify those to be queue-based `PollableChannels`.
    Likewise, if a channel needs to broadcast messages, it should not be a `DirectChannel` but rather a `PublishSubscribeChannel`.
    Later, we show how each of these channels can be configured.| +|---|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +The `DirectChannel` internally delegates to a message dispatcher to invoke its subscribed message handlers, and that dispatcher can have a load-balancing strategy exposed by `load-balancer` or `load-balancer-ref` attributes (mutually exclusive). +The load balancing strategy is used by the message dispatcher to help determine how messages are distributed amongst message handlers when multiple message handlers subscribe to the same channel. +As a convenience, the `load-balancer` attribute exposes an enumeration of values pointing to pre-existing implementations of `LoadBalancingStrategy`. +A `round-robin` (load-balances across the handlers in rotation) and `none` (for the cases where one wants to explicitly disable load balancing) are the only available values. +Other strategy implementations may be added in future versions. +However, since version 3.0, you can provide your own implementation of the `LoadBalancingStrategy` and inject it by using the `load-balancer-ref` attribute, which should point to a bean that implements `LoadBalancingStrategy`, as the following example shows: + +A `FixedSubscriberChannel` is a `SubscribableChannel` that only supports a single `MessageHandler` subscriber that cannot be unsubscribed. +This is useful for high-throughput performance use-cases when no other subscribers are involved and no channel interceptors are needed. + +``` + + + + + +``` + +Note that the `load-balancer` and `load-balancer-ref` attributes are mutually exclusive. + +The load-balancing also works in conjunction with a boolean `failover` property. +If the `failover` value is true (the default), the dispatcher falls back to any subsequent handlers (as necessary) when preceding handlers throw exceptions. +The order is determined by an optional order value defined on the handlers themselves or, if no such value exists, the order in which the handlers subscribed. + +If a certain situation requires that the dispatcher always try to invoke the first handler and then fall back in the same fixed order sequence every time an error occurs, no load-balancing strategy should be provided. +In other words, the dispatcher still supports the `failover` boolean property even when no load-balancing is enabled. +Without load-balancing, however, the invocation of handlers always begins with the first, according to their order. +For example, this approach works well when there is a clear definition of primary, secondary, tertiary, and so on. +When using the namespace support, the `order` attribute on any endpoint determines the order. + +| |Keep in mind that load-balancing and `failover` apply only when a channel has more than one subscribed message handler.
    When using the namespace support, this means that more than one endpoint shares the same channel reference defined in the `input-channel` attribute.| +|---|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +Starting with version 5.2, when `failover` is true, a failure of the current handler together with the failed message is logged under `debug` or `info` if configured respectively. + +##### `ExecutorChannel` + +The `ExecutorChannel` is a point-to-point channel that supports the same dispatcher configuration as `DirectChannel` (load-balancing strategy and the `failover` boolean property). +The key difference between these two dispatching channel types is that the `ExecutorChannel` delegates to an instance of `TaskExecutor` to perform the dispatch. +This means that the send method typically does not block, but it also means that the handler invocation may not occur in the sender’s thread. +It therefore does not support transactions that span the sender and receiving handler. + +| |The sender can sometimes block.
    For example, when using a `TaskExecutor` with a rejection policy that throttles the client (such as the `ThreadPoolExecutor.CallerRunsPolicy`), the sender’s thread can execute the method any time the thread pool is at its maximum capacity and the executor’s work queue is full.
    Since that situation would only occur in a non-predictable way, you should not rely upon it for transactions.| +|---|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +##### `FluxMessageChannel` + +The `FluxMessageChannel` is an `org.reactivestreams.Publisher` implementation for `"sinking"` sent messages into an internal `reactor.core.publisher.Flux` for on demand consumption by reactive subscribers downstream. +This channel implementation is neither a `SubscribableChannel`, nor a `PollableChannel`, so only `org.reactivestreams.Subscriber` instances can be used to consume from this channel honoring back-pressure nature of reactive streams. +On the other hand, the `FluxMessageChannel` implements a `ReactiveStreamsSubscribableChannel` with its `subscribeTo(Publisher>)` contract allowing receiving events from reactive source publishers, bridging a reactive stream into the integration flow. +To achieve fully reactive behavior for the whole integration flow, such a channel must be placed between all the endpoints in the flow. + +See [Reactive Streams Support](./reactive-streams.html#reactive-streams) for more information about interaction with Reactive Streams. + +##### Scoped Channel + +Spring Integration 1.0 provided a `ThreadLocalChannel` implementation, but that has been removed as of 2.0. +Now the more general way to handle the same requirement is to add a `scope` attribute to a channel. +The value of the attribute can be the name of a scope that is available within the context. +For example, in a web environment, certain scopes are available, and any custom scope implementations can be registered with the context. +The following example shows a thread-local scope being applied to a channel, including the registration of the scope itself: + +``` + + + + + + + + + + + +``` + +The channel defined in the previous example also delegates to a queue internally, but the channel is bound to the current thread, so the contents of the queue are similarly bound. +That way, the thread that sends to the channel can later receive those same messages, but no other thread would be able to access them. +While thread-scoped channels are rarely needed, they can be useful in situations where `DirectChannel` instances are being used to enforce a single thread of operation but any reply messages should be sent to a “terminal” channel. +If that terminal channel is thread-scoped, the original sending thread can collect its replies from the terminal channel. + +Now, since any channel can be scoped, you can define your own scopes in addition to thread-Local. + +#### Channel Interceptors + +One of the advantages of a messaging architecture is the ability to provide common behavior and capture meaningful information about the messages passing through the system in a non-invasive way. +Since the `Message` instances are sent to and received from `MessageChannel` instances, those channels provide an opportunity for intercepting the send and receive operations. +The `ChannelInterceptor` strategy interface, shown in the following listing, provides methods for each of those operations: + +``` +public interface ChannelInterceptor { + + Message preSend(Message message, MessageChannel channel); + + void postSend(Message message, MessageChannel channel, boolean sent); + + void afterSendCompletion(Message message, MessageChannel channel, boolean sent, Exception ex); + + boolean preReceive(MessageChannel channel); + + Message postReceive(Message message, MessageChannel channel); + + void afterReceiveCompletion(Message message, MessageChannel channel, Exception ex); +} +``` + +After implementing the interface, registering the interceptor with a channel is just a matter of making the following call: + +``` +channel.addInterceptor(someChannelInterceptor); +``` + +The methods that return a `Message` instance can be used for transforming the `Message` or can return 'null' to prevent further processing (of course, any of the methods can throw a `RuntimeException`). +Also, the `preReceive` method can return `false` to prevent the receive operation from proceeding. + +| |Keep in mind that `receive()` calls are only relevant for `PollableChannels`.
    In fact, the `SubscribableChannel` interface does not even define a `receive()` method.
    The reason for this is that when a `Message` is sent to a `SubscribableChannel`, it is sent directly to zero or more subscribers, depending on the type of channel (for example,
    a `PublishSubscribeChannel` sends to all of its subscribers).
    Therefore, the `preReceive(…​)`, `postReceive(…​)`, and `afterReceiveCompletion(…​)` interceptor methods are invoked only when the interceptor is applied to a `PollableChannel`.| +|---|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +Spring Integration also provides an implementation of the [Wire Tap](https://www.enterpriseintegrationpatterns.com/WireTap.html) pattern. +It is a simple interceptor that sends the `Message` to another channel without otherwise altering the existing flow. +It can be very useful for debugging and monitoring. +An example is shown in [Wire Tap](#channel-wiretap). + +Because it is rarely necessary to implement all of the interceptor methods, the interface provides no-op methods (methods returning `void` method have no code, the `Message`-returning methods return the `Message` as-is, and the `boolean` method returns `true`). + +| |The order of invocation for the interceptor methods depends on the type of channel.
    As described earlier, the queue-based channels are the only ones where the receive method is intercepted in the first place.
    Additionally, the relationship between send and receive interception depends on the timing of the separate sender and receiver threads.
    For example, if a receiver is already blocked while waiting for a message, the order could be as follows: `preSend`, `preReceive`, `postReceive`, `postSend`.
    However, if a receiver polls after the sender has placed a message on the channel and has already returned, the order would be as follows: `preSend`, `postSend` (some-time-elapses), `preReceive`, `postReceive`.
    The time that elapses in such a case depends on a number of factors and is therefore generally unpredictable (in fact, the receive may never happen).
    The type of queue also plays a role (for example, rendezvous versus priority).
    In short, you cannot rely on the order beyond the fact that `preSend` precedes `postSend` and `preReceive` precedes `postReceive`.| +|---|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +Starting with Spring Framework 4.1 and Spring Integration 4.1, the `ChannelInterceptor` provides new methods: `afterSendCompletion()` and `afterReceiveCompletion()`. +They are invoked after `send()' and 'receive()` calls, regardless of any exception that is raised, which allow for resource cleanup. +Note that the channel invokes these methods on the `ChannelInterceptor` list in the reverse order of the initial `preSend()` and `preReceive()` calls. + +Starting with version 5.1, global channel interceptors now apply to dynamically registered channels - such as through beans that are initialized by using `beanFactory.initializeBean()` or `IntegrationFlowContext` when using the Java DSL. +Previously, interceptors were not applied when beans were created after the application context was refreshed. + +Also, starting with version 5.1, `ChannelInterceptor.postReceive()` is no longer called when no message is received; it is no longer necessary to check for a `null` `Message`. +Previously, the method was called. +If you have an interceptor that relies on the previous behavior, implement `afterReceiveCompleted()` instead, since that method is invoked, regardless of whether a message is received or not. + +| |Starting with version 5.2, the `ChannelInterceptorAware` is deprecated in favor of `InterceptableChannel` from the Spring Messaging module, which it extends now for backward compatibility.| +|---|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +#### `MessagingTemplate` + +When the endpoints and their various configuration options are introduced, Spring Integration provides a foundation for messaging components that enables non-invasive invocation of your application code from the messaging system. +However, it is sometimes necessary to invoke the messaging system from your application code. +For convenience when implementing such use cases, Spring Integration provides a `MessagingTemplate` that supports a variety of operations across the message channels, including request and reply scenarios. +For example, it is possible to send a request and wait for a reply, as follows: + +``` +MessagingTemplate template = new MessagingTemplate(); + +Message reply = template.sendAndReceive(someChannel, new GenericMessage("test")); +``` + +In the preceding example, a temporary anonymous channel would be created internally by the template. +The 'sendTimeout' and 'receiveTimeout' properties may also be set on the template, and other exchange types are also supported. +The following listing shows the signatures for such methods: + +``` +public boolean send(final MessageChannel channel, final Message message) { ... +} + +public Message sendAndReceive(final MessageChannel channel, final Message request) { ... +} + +public Message receive(final PollableChannel channel) { ... +} +``` + +| |A less invasive approach that lets you invoke simple interfaces with payload or header values instead of `Message` instances is described in [Enter the `GatewayProxyFactoryBean`](./gateway.html#gateway-proxy).| +|---|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +#### Configuring Message Channels + +To create a message channel instance, you can use the `` element for xml or `DirectChannel` instance for Java configuration, as follows: + +Java + +``` +@Bean +public MessageChannel exampleChannel() { + return new DirectChannel(); +} +``` + +XML + +``` + +``` + +When you use the `` element without any sub-elements, it creates a `DirectChannel` instance (a `SubscribableChannel`). + +To create a publish-subscribe channel, use the `` element (the `PublishSubscribeChannel` in Java), as follows: + +Java + +``` +@Bean +public MessageChannel exampleChannel() { + return new PublishSubscribeChannel(); +} +``` + +XML + +``` + +``` + +You can alternatively provide a variety of `` sub-elements to create any of the pollable channel types (as described in [Message Channel Implementations](#channel-implementations)). +The following sections shows examples of each channel type. + +##### `DirectChannel` Configuration + +As mentioned earlier, `DirectChannel` is the default type. +The following listing shows who to define one: + +Java + +``` +@Bean +public MessageChannel directChannel() { + return new DirectChannel(); +} +``` + +XML + +``` + +``` + +A default channel has a round-robin load-balancer and also has failover enabled (see [`DirectChannel`](#channel-implementations-directchannel) for more detail). +To disable one or both of these, add a `` sub-element (a `LoadBalancingStrategy` constructor of the `DirectChannel`) and configure the attributes as follows: + +Java + +``` +@Bean +public MessageChannel failFastChannel() { + DirectChannel channel = new DirectChannel(); + channel.setFailover(false); + return channel; +} + +@Bean +public MessageChannel failFastChannel() { + return new DirectChannel(null); +} +``` + +XML + +``` + + + + + + + +``` + +##### Datatype Channel Configuration + +Sometimes, a consumer can process only a particular type of payload, forcing you to ensure the payload type of the input messages. +The first thing that comes to mind may be to use a message filter. +However, all that message filter can do is filter out messages that are not compliant with the requirements of the consumer. +Another way would be to use a content-based router and route messages with non-compliant data-types to specific transformers to enforce transformation and conversion to the required data type. +This would work, but a simpler way to accomplish the same thing is to apply the [Datatype Channel](https://www.enterpriseintegrationpatterns.com/DatatypeChannel.html) pattern. +You can use separate datatype channels for each specific payload data type. + +To create a datatype channel that accepts only messages that contain a certain payload type, provide the data type’s fully-qualified class name in the channel element’s `datatype` attribute, as the following example shows: + +Java + +``` +@Bean +public MessageChannel numberChannel() { + DirectChannel channel = new DirectChannel(); + channel.setDatatypes(Number.class); + return channel; +} +``` + +XML + +``` + +``` + +Note that the type check passes for any type that is assignable to the channel’s datatype. +In other words, the `numberChannel` in the preceding example would accept messages whose payload is `java.lang.Integer` or `java.lang.Double`. +Multiple types can be provided as a comma-delimited list, as the following example shows: + +Java + +``` +@Bean +public MessageChannel numberChannel() { + DirectChannel channel = new DirectChannel(); + channel.setDatatypes(String.class, Number.class); + return channel; +} +``` + +XML + +``` + +``` + +So the 'numberChannel' in the preceding example accepts only messages with a data type of `java.lang.Number`. +But what happens if the payload of the message is not of the required type? +It depends on whether you have defined a bean named `integrationConversionService` that is an instance of Spring’s [Conversion Service](https://docs.spring.io/spring/docs/current/spring-framework-reference/html/validation.html#core-convert-ConversionService-API). +If not, then an `Exception` would be thrown immediately. +However, if you have defined an `integrationConversionService` bean, it is used in an attempt to convert the message’s payload to the acceptable type. + +You can even register custom converters. +For example, suppose you send a message with a `String` payload to the 'numberChannel' we configured above. +You might handle the message as follows: + +``` +MessageChannel inChannel = context.getBean("numberChannel", MessageChannel.class); +inChannel.send(new GenericMessage("5")); +``` + +Typically this would be a perfectly legal operation. +However, since we use Datatype Channel, the result of such operation would generate an exception similar to the following: + +``` +Exception in thread "main" org.springframework.integration.MessageDeliveryException: +Channel 'numberChannel' +expected one of the following datataypes [class java.lang.Number], +but received [class java.lang.String] +… +``` + +The exception happens because we require the payload type to be a `Number`, but we sent a `String`. +So we need something to convert a `String` to a `Number`. +For that, we can implement a converter similar to the following example: + +``` +public static class StringToIntegerConverter implements Converter { + public Integer convert(String source) { + return Integer.parseInt(source); + } +} +``` + +Then we can register it as a converter with the Integration Conversion Service, as the following example shows: + +Java + +``` +@Bean +@IntegrationConverter +public StringToIntegerConverter strToInt { + return new StringToIntegerConverter(); +} +``` + +XML + +``` + + + +``` + +Or on the `StringToIntegerConverter` class when it is marked with the `@Component` annotation for auto-scanning. + +When the 'converter' element is parsed, it creates the `integrationConversionService` bean if one is not already defined. +With that converter in place, the `send` operation would now be successful, because the datatype channel uses that converter to convert the `String` payload to an `Integer`. + +For more information regarding payload type conversion, see [Payload Type Conversion](./endpoint.html#payload-type-conversion). + +Beginning with version 4.0, the `integrationConversionService` is invoked by the `DefaultDatatypeChannelMessageConverter`, which looks up the conversion service in the application context. +To use a different conversion technique, you can specify the `message-converter` attribute on the channel. +This must be a reference to a `MessageConverter` implementation. +Only the `fromMessage` method is used. +It provides the converter with access to the message headers (in case the conversion might need information from the headers, such as `content-type`). +The method can return only the converted payload or a full `Message` object. +If the latter, the converter must be careful to copy all the headers from the inbound message. + +Alternatively, you can declare a `` of type `MessageConverter` with an ID of `datatypeChannelMessageConverter`, and that converter is used by all channels with a `datatype`. + +##### `QueueChannel` Configuration + +To create a `QueueChannel`, use the `` sub-element. +You may specify the channel’s capacity as follows: + +Java + +``` +@Bean +public PollableChannel queueChannel() { + return new QueueChannel(25); +} +``` + +XML + +``` + + + +``` + +| |If you do not provide a value for the 'capacity' attribute on this `` sub-element, the resulting queue is unbounded.
    To avoid issues such as running out of memory, we highly recommend that you set an explicit value for a bounded queue.| +|---|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +###### Persistent `QueueChannel` Configuration + +Since a `QueueChannel` provides the capability to buffer messages but does so in-memory only by default, it also introduces a possibility that messages could be lost in the event of a system failure. +To mitigate this risk, a `QueueChannel` may be backed by a persistent implementation of the `MessageGroupStore` strategy interface. +For more details on `MessageGroupStore` and `MessageStore`, see [Message Store](./message-store.html#message-store). + +| |The `capacity` attribute is not allowed when the `message-store` attribute is used.| +|---|-----------------------------------------------------------------------------------| + +When a `QueueChannel` receives a `Message`, it adds the message to the message store. +When a `Message` is polled from a `QueueChannel`, it is removed from the message store. + +By default, a `QueueChannel` stores its messages in an in-memory queue, which can lead to the lost message scenario mentioned earlier. +However, Spring Integration provides persistent stores, such as the `JdbcChannelMessageStore`. + +You can configure a message store for any `QueueChannel` by adding the `message-store` attribute, as the following example shows: + +``` + + + + + + + + +``` + +(See samples below for Java/Kotlin Configuration options.) + +The Spring Integration JDBC module also provides a schema Data Definition Language (DDL) for a number of popular databases. +These schemas are located in the org.springframework.integration.jdbc.store.channel package of that module (`spring-integration-jdbc`). + +| |One important feature is that, with any transactional persistent store (such as `JdbcChannelMessageStore`), as long as the poller has a transaction configured, a message removed from the store can be permanently removed only if the transaction completes successfully.
    Otherwise the transaction rolls back, and the `Message` is not lost.| +|---|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +Many other implementations of the message store are available as the growing number of Spring projects related to “NoSQL” data stores come to provide underlying support for these stores. +You can also provide your own implementation of the `MessageGroupStore` interface if you cannot find one that meets your particular needs. + +Since version 4.0, we recommend that `QueueChannel` instances be configured to use a `ChannelMessageStore`, if possible. +These are generally optimized for this use, as compared to a general message store. +If the `ChannelMessageStore` is a `ChannelPriorityMessageStore`, the messages are received in FIFO within priority order. +The notion of priority is determined by the message store implementation. +For example, the following example shows the Java configuration for the [MongoDB Channel Message Store](./mongodb.html#mongodb-priority-channel-message-store): + +Java + +``` +@Bean +public BasicMessageGroupStore mongoDbChannelMessageStore(MongoDbFactory mongoDbFactory) { + MongoDbChannelMessageStore store = new MongoDbChannelMessageStore(mongoDbFactory); + store.setPriorityEnabled(true); + return store; +} + +@Bean +public PollableChannel priorityQueue(BasicMessageGroupStore mongoDbChannelMessageStore) { + return new PriorityChannel(new MessageGroupQueue(mongoDbChannelMessageStore, "priorityQueue")); +} +``` + +Java DSL + +``` +@Bean +public IntegrationFlow priorityFlow(PriorityCapableChannelMessageStore mongoDbChannelMessageStore) { + return IntegrationFlows.from((Channels c) -> + c.priority("priorityChannel", mongoDbChannelMessageStore, "priorityGroup")) + .... + .get(); +} +``` + +Kotlin DSL + +``` +@Bean +fun priorityFlow(mongoDbChannelMessageStore: PriorityCapableChannelMessageStore) = + integrationFlow { + channel { priority("priorityChannel", mongoDbChannelMessageStore, "priorityGroup") } + } +``` + +| |Pay attention to the `MessageGroupQueue` class.
    That is a `BlockingQueue` implementation to use the `MessageGroupStore` operations.| +|---|---------------------------------------------------------------------------------------------------------------------------------------| + +Another option to customize the `QueueChannel` environment is provided by the `ref` attribute of the `` sub-element or its particular constructor. +This attribute supplies the reference to any `java.util.Queue` implementation. +For example, a Hazelcast distributed [`IQueue`](https://hazelcast.com/use-cases/imdg/imdg-messaging/) can be configured as follows: + +``` +@Bean +public HazelcastInstance hazelcastInstance() { + return Hazelcast.newHazelcastInstance(new Config() + .setProperty("hazelcast.logging.type", "log4j")); +} + +@Bean +public PollableChannel distributedQueue() { + return new QueueChannel(hazelcastInstance() + .getQueue("springIntegrationQueue")); +} +``` + +##### `PublishSubscribeChannel` Configuration + +To create a `PublishSubscribeChannel`, use the \ element. +When using this element, you can also specify the `task-executor` used for publishing messages (if none is specified, it publishes in the sender’s thread), as follows: + +Java + +``` +@Bean +public MessageChannel pubsubChannel() { + return new PublishSubscribeChannel(someExecutor()); +} +``` + +XML + +``` + +``` + +If you provide a resequencer or aggregator downstream from a `PublishSubscribeChannel`, you can set the 'apply-sequence' property on the channel to `true`. +Doing so indicates that the channel should set the `sequence-size` and `sequence-number` message headers as well as the correlation ID prior to passing along the messages. +For example, if there are five subscribers, the `sequence-size` would be set to `5`, and the messages would have `sequence-number` header values ranging from `1` to `5`. + +Along with the `Executor`, you can also configure an `ErrorHandler`. +By default, the `PublishSubscribeChannel` uses a `MessagePublishingErrorHandler` implementation to send an error to the `MessageChannel` from the `errorChannel` header or into the global `errorChannel` instance. +If an `Executor` is not configured, the `ErrorHandler` is ignored and exceptions are thrown directly to the caller’s thread. + +If you provide a `Resequencer` or `Aggregator` downstream from a `PublishSubscribeChannel`, you can set the 'apply-sequence' property on the channel to `true`. +Doing so indicates that the channel should set the sequence-size and sequence-number message headers as well as the correlation ID prior to passing along the messages. +For example, if there are five subscribers, the sequence-size would be set to `5`, and the messages would have sequence-number header values ranging from `1` to `5`. + +The following example shows how to set the `apply-sequence` header to `true`: + +Java + +``` +@Bean +public MessageChannel pubsubChannel() { + PublishSubscribeChannel channel = new PublishSubscribeChannel(); + channel.setApplySequence(false); + return channel; +} +``` + +XML + +``` + +``` + +| |The `apply-sequence` value is `false` by default so that a publish-subscribe channel can send the exact same message instances to multiple outbound channels.
    Since Spring Integration enforces immutability of the payload and header references, when the flag is set to `true`, the channel creates new `Message` instances with the same payload reference but different header values.| +|---|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +Starting with version 5.4.3, the `PublishSubscribeChannel` can also be configured with the `requireSubscribers` option of its `BroadcastingDispatcher` to indicate that this channel will not ignore a message silently when it has no subscribers. +A `MessageDispatchingException` with a `Dispatcher has no subscribers` message is thrown when there are no subscribers and this option is set to `true`. + +##### `ExecutorChannel` + +To create an `ExecutorChannel`, add the `` sub-element with a `task-executor` attribute. +The attribute’s value can reference any `TaskExecutor` within the context. +For example, doing so enables configuration of a thread pool for dispatching messages to subscribed handlers. +As mentioned earlier, doing so breaks the single-threaded execution context between sender and receiver so that any active transaction context is not shared by the invocation of the handler (that is, the handler may throw an `Exception`, but the `send` invocation has already returned successfully). +The following example shows how to use the `dispatcher` element and specify an executor in the `task-executor` attribute: + +Java + +``` +@Bean +public MessageChannel executorChannel() { + return new ExecutorChannel(someExecutor()); +} +``` + +XML + +``` + + + +``` + +| |The `load-balancer` and `failover` options are also both available on the \ sub-element, as described earlier in [`DirectChannel` Configuration](#channel-configuration-directchannel).
    The same defaults apply.
    Consequently, the channel has a round-robin load-balancing strategy with failover enabled unless explicit configuration is provided for one or both of those attributes, as the following example shows:

    ```



    ```| +|---|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +##### `PriorityChannel` Configuration + +To create a `PriorityChannel`, use the `` sub-element, as the following example shows: + +Java + +``` +@Bean +public PollableChannel priorityChannel() { + return new PriorityChannel(20); +} +``` + +XML + +``` + + + +``` + +By default, the channel consults the `priority` header of the message. +However, you can instead provide a custom `Comparator` reference. +Also, note that the `PriorityChannel` (like the other types) does support the `datatype` attribute. +As with the `QueueChannel`, it also supports a `capacity` attribute. +The following example demonstrates all of these: + +Java + +``` +@Bean +public PollableChannel priorityChannel() { + PriorityChannel channel = new PriorityChannel(20, widgetComparator()); + channel.setDatatypes(example.Widget.class); + return channel; +} +``` + +XML + +``` + + + +``` + +Since version 4.0, the `priority-channel` child element supports the `message-store` option (`comparator` and `capacity` are not allowed in that case). +The message store must be a `PriorityCapableChannelMessageStore`. +Implementations of the `PriorityCapableChannelMessageStore` are currently provided for `Redis`, `JDBC`, and `MongoDB`. +See [`QueueChannel` Configuration](#channel-configuration-queuechannel) and [Message Store](./message-store.html#message-store) for more information. +You can find sample configuration in [Backing Message Channels](./jdbc.html#jdbc-message-store-channels). + +##### `RendezvousChannel` Configuration + +A `RendezvousChannel` is created when the queue sub-element is a ``. +It does not provide any additional configuration options to those described earlier, and its queue does not accept any capacity value, since it is a zero-capacity direct handoff queue. +The following example shows how to declare a `RendezvousChannel`: + +Java + +``` +@Bean +public PollableChannel rendezvousChannel() { + return new RendezvousChannel(); +} +``` + +XML + +``` + + + +``` + +##### Scoped Channel Configuration + +Any channel can be configured with a `scope` attribute, as the following example shows: + +``` + +``` + +##### Channel Interceptor Configuration + +Message channels may also have interceptors, as described in [Channel Interceptors](#channel-interceptors). +The `` sub-element can be added to a `` (or the more specific element types). +You can provide the `ref` attribute to reference any Spring-managed object that implements the `ChannelInterceptor` interface, as the following example shows: + +``` + + + + + +``` + +In general, we recommend defining the interceptor implementations in a separate location, since they usually provide common behavior that can be reused across multiple channels. + +##### Global Channel Interceptor Configuration + +Channel interceptors provide a clean and concise way of applying cross-cutting behavior per individual channel. +If the same behavior should be applied on multiple channels, configuring the same set of interceptors for each channel would not be the most efficient way. +To avoid repeated configuration while also enabling interceptors to apply to multiple channels, Spring Integration provides global interceptors. +Consider the following pair of examples: + +``` + + + +``` + +``` + + + +``` + +Each `` element lets you define a global interceptor, which is applied on all channels that match any patterns defined by the `pattern` attribute. +In the preceding case, the global interceptor is applied on the 'thing1' channel and all other channels that begin with 'thing2' or 'input' but not to channels starting with 'thing3' (since version 5.0). + +| |The addition of this syntax to the pattern causes one possible (though perhaps unlikely) problem.
    If you have a bean named `!thing1` and you included a pattern of `!thing1` in your channel interceptor’s `pattern` patterns, it no longer matches.
    The pattern now matches all beans not named `thing1`.
    In this case, you can escape the `!` in the pattern with `\`.
    The pattern `\!thing1` matches a bean named `!thing1`.| +|---|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +The order attribute lets you manage where this interceptor is injected when there are multiple interceptors on a given channel. +For example, channel 'inputChannel' could have individual interceptors configured locally (see below), as the following example shows: + +``` + + + + + +``` + +A reasonable question is “how is a global interceptor injected in relation to other interceptors configured locally or through other global interceptor definitions?” +The current implementation provides a simple mechanism for defining the order of interceptor execution. +A positive number in the `order` attribute ensures interceptor injection after any existing interceptors, while a negative number ensures that the interceptor is injected before existing interceptors. +This means that, in the preceding example, the global interceptor is injected after (since its `order` is greater than `0`) the 'wire-tap' interceptor configured locally. +If there were another global interceptor with a matching `pattern`, its order would be determined by comparing the values of both interceptors' `order` attributes. +To inject a global interceptor before the existing interceptors, use a negative value for the `order` attribute. + +| |Note that both the `order` and `pattern` attributes are optional.
    The default value for `order` will be 0 and for `pattern`, the default is '\*' (to match all channels).| +|---|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +##### Wire Tap + +As mentioned earlier, Spring Integration provides a simple wire tap interceptor. +You can configure a wire tap on any channel within an `` element. +Doing so is especially useful for debugging and can be used in conjunction with Spring Integration’s logging channel adapter as follows: + +``` + + + + + + + +``` + +| |The 'logging-channel-adapter' also accepts an 'expression' attribute so that you can evaluate a SpEL expression against the 'payload' and 'headers' variables.
    Alternatively, to log the full message `toString()` result, provide a value of `true` for the 'log-full-message' attribute.
    By default, it is `false` so that only the payload is logged.
    Setting it to `true` enables logging of all headers in addition to the payload.
    The 'expression' option provides the most flexibility (for example, `expression="payload.user.name"`).| +|---|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +One of the common misconceptions about the wire tap and other similar components ([Message Publishing Configuration](./message-publishing.html#message-publishing-config)) is that they are automatically asynchronous in nature. +By default, wire tap as a component is not invoked asynchronously. +Instead, Spring Integration focuses on a single unified approach to configuring asynchronous behavior: the message channel. +What makes certain parts of the message flow synchronous or asynchronous is the type of Message Channel that has been configured within that flow. +That is one of the primary benefits of the message channel abstraction. +From the inception of the framework, we have always emphasized the need and the value of the message channel as a first-class citizen of the framework. +It is not just an internal, implicit realization of the EIP pattern. +It is fully exposed as a configurable component to the end user. +So, the wire tap component is only responsible for performing the following tasks: + +* Intercept a message flow by tapping into a channel (for example, `channelA`) + +* Grab each message + +* Send the message to another channel (for example, `channelB`) + +It is essentially a variation of the bridge pattern, but it is encapsulated within a channel definition (and hence easier to enable and disable without disrupting a flow). +Also, unlike the bridge, it basically forks another message flow. +Is that flow synchronous or asynchronous? The answer depends on the type of message channel that 'channelB' is. +We have the following options: direct channel, pollable channel, and executor channel. +The last two break the thread boundary, making communication over such channels asynchronous, because the dispatching of the message from that channel to its subscribed handlers happens on a different thread than the one used to send the message to that channel. +That is what is going to make your wire-tap flow synchronous or asynchronous. +It is consistent with other components within the framework (such as message publisher) and adds a level of consistency and simplicity by sparing you from worrying in advance (other than writing thread-safe code) about whether a particular piece of code should be implemented as synchronous or asynchronous. +The actual wiring of two pieces of code (say, component A and component B) over a message channel is what makes their collaboration synchronous or asynchronous. +You may even want to change from synchronous to asynchronous in the future, and message channel lets you to do it swiftly without ever touching the code. + +One final point regarding the wire tap is that, despite the rationale provided above for not being asynchronous by default, you should keep in mind that it is usually desirable to hand off the message as soon as possible. +Therefore, it would be quite common to use an asynchronous channel option as the wire tap’s outbound channel. +However we doe not enforce asynchronous behavior by default. +There are a number of use cases that would break if we did, including that you might not want to break a transactional boundary. +Perhaps you use the wire tap pattern for auditing purposes, and you do want the audit messages to be sent within the original transaction. +As an example, you might connect the wire tap to a JMS outbound channel adapter. +That way, you get the best of both worlds: 1) the sending of a JMS Message can occur within the transaction while 2) it is still a “fire-and-forget” action, thereby preventing any noticeable delay in the main message flow. + +| |Starting with version 4.0, it is important to avoid circular references when an interceptor (such as the [`WireTap` class](https://docs.spring.io/autorepo/docs/spring-integration/current/api/org/springframework/integration/channel/interceptor/WireTap.html)) references a channel.
    You need to exclude such channels from those being intercepted by the current interceptor.
    This can be done with appropriate patterns or programmatically.
    If you have a custom `ChannelInterceptor` that references a `channel`, consider implementing `VetoCapableInterceptor`.
    That way, the framework asks the interceptor if it is OK to intercept each channel that is a candidate, based on the supplied pattern.
    You can also add runtime protection in the interceptor methods to ensure that the channel is not one that is referenced by the interceptor.
    The `WireTap` uses both of these techniques.| +|---|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +Starting with version 4.3, the `WireTap` has additional constructors that take a `channelName` instead of a`MessageChannel` instance. +This can be convenient for Java configuration and when channel auto-creation logic is being used. +The target `MessageChannel` bean is resolved from the provided `channelName` later, on the first interaction with the +interceptor. + +| |Channel resolution requires a `BeanFactory`, so the wire tap instance must be a Spring-managed bean.| +|---|----------------------------------------------------------------------------------------------------| + +This late-binding approach also allows simplification of typical wire-tapping patterns with Java DSL configuration, as the following example shows: + +``` +@Bean +public PollableChannel myChannel() { + return MessageChannels.queue() + .wireTap("loggingFlow.input") + .get(); +} + +@Bean +public IntegrationFlow loggingFlow() { + return f -> f.log(); +} +``` + +##### Conditional Wire Taps + +Wire taps can be made conditional by using the `selector` or `selector-expression` attributes. +The `selector` references a `MessageSelector` bean, which can determine at runtime whether the message should go to the tap channel. +Similarly, the `selector-expression` is a boolean SpEL expression that performs the same purpose: If the expression evaluates to `true`, the message is sent to the tap channel. + +##### Global Wire Tap Configuration + +It is possible to configure a global wire tap as a special case of the [Global Channel Interceptor Configuration](#global-channel-configuration-interceptors). +To do so, configure a top level `wire-tap` element. +Now, in addition to the normal `wire-tap` namespace support, the `pattern` and `order` attributes are supported and work in exactly the same way as they do for the `channel-interceptor`. +The following example shows how to configure a global wire tap: + +Java + +``` +@Bean +@GlobalChannelInterceptor(patterns = "input*,thing2*,thing1", order = 3) +public WireTap wireTap(MessageChannel wiretapChannel) { + return new WireTap(wiretapChannel); +} +``` + +XML + +``` + +``` + +| |A global wire tap provides a convenient way to configure a single-channel wire tap externally without modifying the existing channel configuration.
    To do so, set the `pattern` attribute to the target channel name.
    For example, you can use this technique to configure a test case to verify messages on a channel.| +|---|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +#### Special Channels + +Two special channels are defined within the application context by default: `errorChannel` and `nullChannel`. +The 'nullChannel' (an instance of `NullChannel`) acts like `/dev/null`, logging any message sent to it at the `DEBUG` level and returning immediately. +The special treatment is applied for an `org.reactivestreams.Publisher` payload of a sent message: it is subscribed to in this channel immediately, to initiate reactive stream processing, although the data is discarded. +An error thrown from a reactive stream processing (see `Subscriber.onError(Throwable)`) is logged under the warn level for possible investigation. +If there is need to do anything with such an error, the `[ReactiveRequestHandlerAdvice](./handler-advice.html#reactive-advice)` with a `Mono.doOnError()` customization can be applied to the message handler producing `Mono` reply into this `nullChannel`. +Any time you face channel resolution errors for a reply that you do not care about, you can set the affected component’s `output-channel` attribute to 'nullChannel' (the name, 'nullChannel', is reserved within the application context). + +The 'errorChannel' is used internally for sending error messages and may be overridden with a custom configuration. +This is discussed in greater detail in [Error Handling](./error-handling.html#error-handling). + +See also [Message Channels](./dsl.html#java-dsl-channels) in the Java DSL chapter for more information about message channel and interceptors. + +### Poller + +This section describes how polling works in Spring Integration. + +#### Polling Consumer + +When Message Endpoints (Channel Adapters) are connected to channels and instantiated, they produce one of the following instances: + +* [`PollingConsumer`](https://docs.spring.io/spring-integration/api/org/springframework/integration/endpoint/PollingConsumer.html) + +* [`EventDrivenConsumer`](https://docs.spring.io/spring-integration/api/org/springframework/integration/endpoint/EventDrivenConsumer.html) + +The actual implementation depends on the type of channel to which these endpoints connect. +A channel adapter connected to a channel that implements the [`org.springframework.messaging.SubscribableChannel`](https://docs.spring.io/spring/docs/current/javadoc-api/index.html?org/springframework/messaging/SubscribableChannel.html) interface produces an instance of `EventDrivenConsumer`. +On the other hand, a channel adapter connected to a channel that implements the [`org.springframework.messaging.PollableChannel`](https://docs.spring.io/spring/docs/current/javadoc-api/index.html?org/springframework/messaging/PollableChannel.html) interface (such as a `QueueChannel`) produces an instance of `PollingConsumer`. + +Polling consumers let Spring Integration components actively poll for Messages rather than process messages in an event-driven manner. + +They represent a critical cross-cutting concern in many messaging scenarios. +In Spring Integration, polling consumers are based on the pattern with the same name, which is described in the book *Enterprise Integration Patterns*, by Gregor Hohpe and Bobby Woolf. +You can find a description of the pattern on the [book’s website](https://www.enterpriseintegrationpatterns.com/PollingConsumer.html). + +#### Pollable Message Source + +Spring Integration offers a second variation of the polling consumer pattern. +When inbound channel adapters are used, these adapters are often wrapped by a `SourcePollingChannelAdapter`. +For example, when retrieving messages from a remote FTP Server location, the adapter described in [FTP Inbound Channel Adapter](./ftp.html#ftp-inbound) is configured with a poller to periodically retrieve messages. +So, when components are configured with pollers, the resulting instances are of one of the following types: + +* [`PollingConsumer`](https://docs.spring.io/spring-integration/api/org/springframework/integration/endpoint/PollingConsumer.html) + +* [`SourcePollingChannelAdapter`](https://docs.spring.io/spring-integration/api/org/springframework/integration/endpoint/SourcePollingChannelAdapter.html) + +This means that pollers are used in both inbound and outbound messaging scenarios. +Here are some use cases in which pollers are used: + +* Polling certain external systems, such as FTP Servers, Databases, and Web Services + +* Polling internal (pollable) message channels + +* Polling internal services (such as repeatedly executing methods on a Java class) + +| |AOP advice classes can be applied to pollers, in an `advice-chain`, such as a transaction advice to start a transaction.
    Starting with version 4.1, a `PollSkipAdvice` is provided.
    Pollers use triggers to determine the time of the next poll.
    The `PollSkipAdvice` can be used to suppress (skip) a poll, perhaps because there is some downstream condition that would prevent the message being processed.
    To use this advice, you have to provide it with an implementation of a `PollSkipStrategy`.
    Starting with version 4.2.5, a `SimplePollSkipStrategy` is provided.
    To use it, you can add an instance as a bean to the application context, inject it into a `PollSkipAdvice`, and add that to the poller’s advice chain.
    To skip polling, call `skipPolls()`.
    To resume polling, call `reset()`.
    Version 4.2 added more flexibility in this area.
    See [Conditional Pollers for Message Sources](#conditional-pollers).| +|---|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +This chapter is meant to only give a high-level overview of polling consumers and how they fit into the concept of message channels (see [Message Channels](./channel.html#channel)) and channel adapters (see [Channel Adapter](./channel-adapter.html#channel-adapter)). +For more information regarding messaging endpoints in general and polling consumers in particular, see [Message Endpoints](./endpoint.html#endpoint). + +#### Deferred Acknowledgment Pollable Message Source + +Starting with version 5.0.1, certain modules provide `MessageSource` implementations that support deferring acknowledgment until the downstream flow completes (or hands off the message to another thread). +This is currently limited to the `AmqpMessageSource` and the `KafkaMessageSource`. + +With these message sources, the `IntegrationMessageHeaderAccessor.ACKNOWLEDGMENT_CALLBACK` header (see [`MessageHeaderAccessor` API](./message.html#message-header-accessor)) is added to the message. +When used with pollable message sources, the value of the header is an instance of `AcknowledgmentCallback`, as the following example shows: + +``` +@FunctionalInterface +public interface AcknowledgmentCallback { + + void acknowledge(Status status); + + boolean isAcknowledged(); + + void noAutoAck(); + + default boolean isAutoAck(); + + enum Status { + + /** + * Mark the message as accepted. + */ + ACCEPT, + + /** + * Mark the message as rejected. + */ + REJECT, + + /** + * Reject the message and requeue so that it will be redelivered. + */ + REQUEUE + + } + +} +``` + +Not all message sources (for example, a `KafkaMessageSource`) support the `REJECT` status. +It is treated the same as `ACCEPT`. + +Applications can acknowledge a message at any time, as the following example shows: + +``` +Message received = source.receive(); + +... + +StaticMessageHeaderAccessor.getAcknowledgmentCallback(received) + .acknowledge(Status.ACCEPT); +``` + +If the `MessageSource` is wired into a `SourcePollingChannelAdapter`, when the poller thread returns to the adapter after the downstream flow completes, the adapter checks whether the acknowledgment has already been acknowledged and, if not, sets its status to `ACCEPT` it (or `REJECT` if the flow throws an exception). +The status values are defined in the [`AcknowledgmentCallback.Status` enumeration](https://docs.spring.io/spring-integration/api/org/springframework/integration/support/AcknowledgmentCallback.Status.html). + +Spring Integration provides `MessageSourcePollingTemplate` to perform ad-hoc polling of a `MessageSource`. +This, too, takes care of setting `ACCEPT` or `REJECT` on the `AcknowledgmentCallback` when the `MessageHandler` callback returns (or throws an exception). +The following example shows how to poll with the `MessageSourcePollingTemplate`: + +``` +MessageSourcePollingTemplate template = + new MessageSourcePollingTemplate(this.source); +template.poll(h -> { + ... +}); +``` + +In both cases (`SourcePollingChannelAdapter` and `MessageSourcePollingTemplate`), you can disable auto ack/nack by calling `noAutoAck()` on the callback. +You might do this if you hand off the message to another thread and wish to acknowledge later. +Not all implementations support this (for example, Apache Kafka does not, because the offset commit has to be performed on the same thread). + +#### Conditional Pollers for Message Sources + +This section covers how to use conditional pollers. + +##### Background + +`Advice` objects, in an `advice-chain` on a poller, advise the whole polling task (both message retrieval and processing). +These “around advice” methods do not have access to any context for the poll — only the poll itself. +This is fine for requirements such as making a task transactional or skipping a poll due to some external condition, as discussed earlier. +What if we wish to take some action depending on the result of the `receive` part of the poll or if we want to adjust the poller depending on conditions? For those instances, Spring Integration offers “Smart” Polling. + +##### “Smart” Polling + +Version 5.3 introduced the `ReceiveMessageAdvice` interface. +(The `AbstractMessageSourceAdvice` has been deprecated in favor of `default` methods in the `MessageSourceMutator`.) +Any `Advice` objects in the `advice-chain` that implement this interface are applied only to the receive operation - `MessageSource.receive()` and `PollableChannel.receive(timeout)`. +Therefore they can be applied only for the `SourcePollingChannelAdapter` or `PollingConsumer`. +Such classes implement the following methods: + +* `beforeReceive(Object source)`This method is called before the `Object.receive()` method. + It lets you examine and reconfigure the source. + Returning `false` cancels this poll (similar to the `PollSkipAdvice` mentioned earlier). + +* `Message afterReceive(Message result, Object source)`This method is called after the `receive()` method. + Again, you can reconfigure the source or take any action (perhaps depending on the result, which can be `null` if there was no message created by the source). + You can even return a different message + +| |Thread safety

    If an advice mutates the, you should not configure the poller with a `TaskExecutor`.
    If an advice mutates the source, such mutations are not thread safe and could cause unexpected results, especially with high frequency pollers.
    If you need to process poll results concurrently, consider using a downstream `ExecutorChannel` instead of adding an executor to the poller.| +|---|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +| |Advice Chain Ordering

    You should understand how the advice chain is processed during initialization.`Advice` objects that do not implement `ReceiveMessageAdvice` are applied to the whole poll process and are all invoked first, in order, before any `ReceiveMessageAdvice`.
    Then `ReceiveMessageAdvice` objects are invoked in order around the source `receive()` method.
    If you have, for example, `Advice` objects `a, b, c, d`, where `b` and `d` are `ReceiveMessageAdvice`, the objects are applied in the following order: `a, c, b, d`.
    Also, if a source is already a `Proxy`, the `ReceiveMessageAdvice` is invoked after any existing `Advice` objects.
    If you wish to change the order, you must wire up the proxy yourself.| +|---|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +##### `SimpleActiveIdleReceiveMessageAdvice` + +(The previous `SimpleActiveIdleMessageSourceAdvice` for only `MessageSource` is deprecated.) +This advice is a simple implementation of `ReceiveMessageAdvice`. +When used in conjunction with a `DynamicPeriodicTrigger`, it adjusts the polling frequency, depending on whether or not the previous poll resulted in a message or not. +The poller must also have a reference to the same `DynamicPeriodicTrigger`. + +| |Important: Async Handoff

    `SimpleActiveIdleReceiveMessageAdvice` modifies the trigger based on the `receive()` result.
    This works only if the advice is called on the poller thread.
    It does not work if the poller has a `task-executor`.
    To use this advice where you wish to use async operations after the result of a poll, do the async handoff later, perhaps by using an `ExecutorChannel`.| +|---|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +##### `CompoundTriggerAdvice` + +This advice allows the selection of one of two triggers based on whether a poll returns a message or not. +Consider a poller that uses a `CronTrigger`.`CronTrigger` instances are immutable, so they cannot be altered once constructed. +Consider a use case where we want to use a cron expression to trigger a poll once each hour but, if no message is received, poll once per minute and, when a message is retrieved, revert to using the cron expression. + +The advice (and poller) use a `CompoundTrigger` for this purpose. +The trigger’s `primary` trigger can be a `CronTrigger`. +When the advice detects that no message is received, it adds the secondary trigger to the `CompoundTrigger`. +When the `CompoundTrigger` instance’s `nextExecutionTime` method is invoked, it delegates to the secondary trigger, if present. +Otherwise, it delegates to the primary trigger. + +The poller must also have a reference to the same `CompoundTrigger`. + +The following example shows the configuration for the hourly cron expression with a fallback to every minute: + +``` + + + + + + + + + + + + + + + + + + + + + + + +``` + +| |Important: Async Handoff

    `CompoundTriggerAdvice` modifies the trigger based on the `receive()` result.
    This works only if the advice is called on the poller thread.
    It does not work if the poller has a `task-executor`.
    To use this advice where you wish to use async operations after the result of a poll, do the async handoff later, perhaps by using an `ExecutorChannel`.| +|---|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +##### MessageSource-only Advices + +Some advices might be applied only for the `MessageSource.receive()` and they don’t make sense for `PollableChannel`. +For this purpose a `MessageSourceMutator` interface (an extension of the `ReceiveMessageAdvice`) is still present. +With `default` methods it fully replaces already deprecated `AbstractMessageSourceAdvice` and should be used in those implementations where only `MessageSource` proxying is expected. +See [Inbound Channel Adapters: Polling Multiple Servers and Directories](./ftp.html#ftp-rotating-server-advice) for more information. + +### Channel Adapter + +A channel adapter is a message endpoint that enables connecting a single sender or receiver to a message channel. +Spring Integration provides a number of adapters to support various transports, such as JMS, file, HTTP, web services, mail, and more. +Upcoming chapters of this reference guide discuss each adapter. +However, this chapter focuses on the simple but flexible method-invoking channel adapter support. +There are both inbound and outbound adapters, and each may be configured with XML elements provided in the core namespace. +These provide an easy way to extend Spring Integration, as long as you have a method that can be invoked as either a source or a destination. + +#### Configuring An Inbound Channel Adapter + +An `inbound-channel-adapter` element (a `SourcePollingChannelAdapter` in Java configuration) can invoke any method on a Spring-managed object and send a non-null return value to a `MessageChannel` after converting the method’s output to a `Message`. +When the adapter’s subscription is activated, a poller tries to receive messages from the source. +The poller is scheduled with the `TaskScheduler` according to the provided configuration. +To configure the polling interval or cron expression for an individual channel adapter, you can provide a 'poller' element with one of the scheduling attributes, such as 'fixed-rate' or 'cron'. +The following example defines two `inbound-channel-adapter` instances: + +Java DSL + +``` +@Bean +public IntegrationFlow source1() { + return IntegrationFlows.from(() -> new GenericMessage<>(...), + e -> e.poller(p -> p.fixedRate(5000))) + ... + .get(); +} + +@Bean +public IntegrationFlow source2() { + return IntegrationFlows.from(() -> new GenericMessage<>(...), + e -> e.poller(p -> p.cron("30 * 9-17 * * MON-FRI"))) + ... + .get(); +} +``` + +Java + +``` +public class SourceService { + + @InboundChannelAdapter(channel = "channel1", poller = @Poller(fixedRate = "5000")) + Object method1() { + ... + } + + @InboundChannelAdapter(channel = "channel2", poller = @Poller(cron = "30 * 9-17 * * MON-FRI")) + Object method2() { + ... + } +} +``` + +Kotlin DSL + +``` +@Bean +fun messageSourceFlow() = + integrationFlow( { GenericMessage<>(...) }, + { poller { it.fixedRate(5000) } }) { + ... + } +``` + +XML + +``` + + + + + + + +``` + +See also [Channel Adapter Expressions and Scripts](#channel-adapter-expressions-and-scripts). + +| |If no poller is provided, then a single default poller must be registered within the context.
    See [Endpoint Namespace Support](./endpoint.html#endpoint-namespace) for more detail.| +|---|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +| |Important: Poller Configuration

    All the `inbound-channel-adapter` types are backed by a `SourcePollingChannelAdapter`, which means they contain a poller configuration that polls the `MessageSource` (to invoke a custom method that produces the value that becomes a `Message` payload) based on the configuration specified in the Poller.
    The following example shows the configuration of two pollers:

    ```



    ```

    In the first configuration, the polling task is invoked once per poll, and, during each task (poll), the method (which results in the production of the message) is invoked once, based on the `max-messages-per-poll` attribute value.
    In the second configuration, the polling task is invoked 10 times per poll or until it returns 'null', thus possibly producing ten messages per poll while each poll happens at one-second intervals.
    However, what happens if the configuration looks like the following example:

    ```

    ```

    Note that there is no `max-messages-per-poll` specified.
    As we cover later, the identical poller configuration in the `PollingConsumer` (for example, `service-activator`, `filter`, `router`, and others) would have a default value of `-1` for `max-messages-per-poll`, which means “execute the polling task non-stop unless the polling method returns null (perhaps because there are no more messages in the `QueueChannel`)” and then sleep for one second.

    However, in the `SourcePollingChannelAdapter`, it is a bit different.
    The default value for `max-messages-per-poll` is `1`, unless you explicitly set it to a negative value (such as `-1`).
    This makes sure that the poller can react to lifecycle events (such as start and stop) and prevents it from potentially spinning in an infinite loop if the implementation of the custom method of the `MessageSource` has a potential to never return null and happens to be non-interruptible.

    However, if you are sure that your method can return null and you need to poll for as many sources as available per each poll, you should explicitly set `max-messages-per-poll` to a negative value, as the following example shows:

    ```

    ```

    Starting with version 5.5, a `0` value for `max-messages-per-poll` has a special meaning - skip the `MessageSource.receive()` call altogether, which may be considered as pausing for this inbound channel adapter until the `maxMessagesPerPoll` is changed to a non-zero value at a later time, e.g. via a Control Bus.

    Also see [Global Default Poller](./endpoint.html#global-default-poller) for more information.| +|---|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +#### Configuring An Outbound Channel Adapter + +An `outbound-channel-adapter` element (a `@ServiceActivator` for Java configuration) can also connect a `MessageChannel` to any POJO consumer method that should be invoked with the payload of messages sent to that channel. +The following example shows how to define an outbound channel adapter: + +Java DSL + +``` +@Bean +public IntegrationFlow outboundChannelAdapterFlow(MyPojo myPojo) { + return f -> f + .handle(myPojo, "handle"); +} +``` + +Java + +``` +public class MyPojo { + + @ServiceActivator(channel = "channel1") + void handle(Object payload) { + ... + } + +} +``` + +Kotlin DSL + +``` +@Bean +fun outboundChannelAdapterFlow(myPojo: MyPojo) = + integrationFlow { + handle(myPojo, "handle") + } +``` + +XML + +``` + + + +``` + +If the channel being adapted is a `PollableChannel`, you must provide a poller sub-element (the `@Poller` sub-annotation on the `@ServiceActivator`), as the following example shows: + +Java + +``` +public class MyPojo { + + @ServiceActivator(channel = "channel1", poller = @Poller(fixedRate = "3000")) + void handle(Object payload) { + ... + } + +} +``` + +XML + +``` + + + + + +``` + +You should use a `ref` attribute if the POJO consumer implementation can be reused in other `` definitions. +However, if the consumer implementation is referenced by only a single definition of the ``, you can define it as an inner bean, as the following example shows: + +``` + + + +``` + +| |Using both the `ref` attribute and an inner handler definition in the same `` configuration is not allowed, as it creates an ambiguous condition.
    Such a configuration results in an exception being thrown.| +|---|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +Any channel adapter can be created without a `channel` reference, in which case it implicitly creates an instance of `DirectChannel`. +The created channel’s name matches the `id` attribute of the `` or `` element. +Therefore, if `channel` is not provided, `id` is required. + +#### Channel Adapter Expressions and Scripts + +Like many other Spring Integration components, the `` and `` also provide support for SpEL expression evaluation. +To use SpEL, provide the expression string in the 'expression' attribute instead of providing the 'ref' and 'method' attributes that are used for method-invocation on a bean. +When an expression is evaluated, it follows the same contract as method-invocation where: the expression for an `` generates a message any time the evaluation result is a non-null value, while the expression for an `` must be the equivalent of a void-returning method invocation. + +Starting with Spring Integration 3.0, an `` can also be configured with a SpEL `` (or even with a ` + + + +``` + +When using the widget system of an underlying library, typically you must also include some CSS resources to obtain the desired look and feel. +For the booking-mvc reference application, Dojo's `tundra.css` is included: + +``` +" /> + +``` + +## 12.4. Spring Javascript Decorations + +A central concept in Spring Javascript is the notion of applying decorations to existing DOM nodes. +This technique is used to progressively enhance a web page such that the page will still be functional in a less capable browser. +The `addDecoration` method is used to apply decorations. + +The following example illustrates enhancing a Spring MVC `` tag with rich suggestion behavior: + +``` + + + +``` + +The `ElementDecoration` is used to apply rich widget behavior to an existing DOM node. +This decoration type does not aim to completely hide the underlying toolkit, so the toolkit's native widget type and attributes are used directly. +This approach allows you to use a common decoration model to integrate any widget from the underlying toolkit in a consistent manner. +See the `booking-mvc` reference application for more examples of applying decorations to do things from suggestions to client-side validation. + +When using the `ElementDecoration` to apply widgets that have rich validation behavior, a common need is to prevent the form from being submitted to the server until validation passes. +This can be done with the `ValidateAllDecoration`: + +``` + + + +``` + +This decorates the "Proceed" button with a special onclick event handler that fires the client side validators and does not allow the form to submit until they pass successfully. + +An `AjaxEventDecoration` applies a client-side event listener that fires a remote Ajax request to the server. It also auto-registers a callback function to link in the response: + +``` +Previous + + +``` + +This decorates the onclick event of the "Previous Results" link with an Ajax call, passing along a special parameter that specifies the fragment to be re-rendered in the response. +Note that this link would still be fully functional if Javascript was unavailable in the client. +(See [Section 12.5, “Handling Ajax Requests”](spring-js.html#spring-js-ajax) for details on how this request is handled on the server.) + +It is also possible to apply more than one decoration to an element. +The following example shows a button being decorated with Ajax and validate-all submit suppression: + +``` + + + +``` + +It is also possible to apply a decoration to multiple elements in a single statement using Dojo's query API. +The following example decorates a set of checkbox elements as Dojo Checkbox widgets: + +``` +
    + + + + +
    + +``` + +## 12.5. Handling Ajax Requests + +Spring Javascript's client-side Ajax response handling is built upon the notion of receiving "fragments" back from the server. +These fragments are just standard HTML that is meant to replace portions of the existing page. +The key piece needed on the server is a way to determine which pieces of a full response need to be pulled out for partial rendering. + +In order to be able to render partial fragments of a full response, the full response must be built using a +templating technology that allows the use of composition for constructing the response, and for the member +parts of the composition to be referenced and rendered individually. +Spring Javascript provides some simple Spring MVC extensions that make use of Tiles to achieve this. +The same technique could theoretically be used with any templating system supporting composition. + +Spring Javascript's Ajax remoting functionality is built upon the notion that the core handling code for an +Ajax request should not differ from a standard browser request, thus no special knowledge of an Ajax request +is needed directly in the code and the same hanlder can be used for both styles of request. + +### 12.5.1. Providing a Library-Specific AjaxHandler + +The key interface for integrating various Ajax libraries with the Ajax-aware behavior of Web Flow (such as not redirecting for a +partial page update) is `org.springframework.js.AjaxHandler`. A `SpringJavascriptAjaxHandler` is configured by default that is able to +detect an Ajax request submitted via the Spring JS client-side API and can respond appropriately in the case where a redirect is required. In +order to integrate a different Ajax library (be it a pure JavaScript library, or a higher-level abstraction such as an Ajax-capable JSF +component library), a custom `AjaxHandler` can be injected into the `FlowHandlerAdapter` or `FlowController`. + +### 12.5.2. Handling Ajax Requests with Spring MVC Controllers + +In order to handle Ajax requests with Spring MVC controllers, all that is needed is the configuration of +the provided Spring MVC extensions in your Spring application context for rendering the partial response +(note that these extensions require the use of Tiles for templating): + +``` + + + + +``` + +This configures the `AjaxUrlBasedViewResolver` which in turn interprets Ajax requests and creates `FlowAjaxTilesView` objects to handle rendering of the appropriate fragments. +Note that `FlowAjaxTilesView` is capable of handling the rendering for both Web Flow and pure Spring MVC requests. +The fragments correspond to individual attributes of a Tiles view definition. For example, take the following Tiles view definition: + +``` + + + + + + + + + +``` + +An Ajax request could specify the "body", "hotelSearchForm" or "bookingsTable" to be rendered as fragments in the request. + +### 12.5.3. Handling Ajax Requests with Spring MVC + Spring Web Flow + +Spring Web Flow handles the optional rendering of fragments directly in the flow definition language through use of the `render` element. +The benefit of this approach is that the selection of fragments is completely decoupled from client-side code, such that no special parameters need to be passed with the request the way they +currently must be with the pure Spring MVC controller approach. +For example, if you wanted to render the "hotelSearchForm" fragment from the previous example Tiles view into a rich Javascript popup: + +``` + + + + + + + + + +``` \ No newline at end of file diff --git a/docs/en/spring-web-flow/spring-mvc.md b/docs/en/spring-web-flow/spring-mvc.md new file mode 100644 index 0000000000000000000000000000000000000000..74a9fb5628994b6b7a3c53a9711f461ec8705315 --- /dev/null +++ b/docs/en/spring-web-flow/spring-mvc.md @@ -0,0 +1,403 @@ +# 11. Spring MVC Integration + +## 11.1. Introduction + +This chapter shows how to integrate Web Flow into a Spring MVC web +application. The `booking-mvc` sample application is a good +reference for Spring MVC with Web Flow. This application is a simplified +travel site that allows users to search for and book hotel rooms. + +## 11.2. Configuring web.xml + +The first step to using Spring MVC is to configure the`DispatcherServlet` in `web.xml`. You typically do +this once per web application. + +The example below maps all requests that begin with`/spring/` to the DispatcherServlet. An `init-param`is used to provide the `contextConfigLocation`. This is the +configuration file for the web application. + +``` + + Spring MVC Dispatcher Servlet + org.springframework.web.servlet.DispatcherServlet + + contextConfigLocation + /WEB-INF/web-application-config.xml + + + + + Spring MVC Dispatcher Servlet + /spring/* + +``` + +## 11.3. Dispatching to flows + +The `DispatcherServlet` maps requests for application +resources to handlers. A flow is one type of handler. + +### 11.3.1. Registering the FlowHandlerAdapter + +The first step to dispatching requests to flows is to enable flow +handling within Spring MVC. To this, install the`FlowHandlerAdapter`: + +``` + + + + + +``` + +### 11.3.2. Defining flow mappings + +Once flow handling is enabled, the next step is to map specific +application resources to your flows. The simplest way to do this is to +define a `FlowHandlerMapping`: + +``` + + + + + + +``` + +Configuring this mapping allows the Dispatcher to map application +resource paths to flows in a flow registry. For example, accessing the +resource path `/hotels/booking` would result in a registry +query for the flow with id `hotels/booking`. If a flow is +found with that id, that flow will handle the request. If no flow is +found, the next handler mapping in the Dispatcher's ordered chain will +be queried or a "noHandlerFound" response will be returned. + +### 11.3.3. Flow handling workflow + +When a valid flow mapping is found, the`FlowHandlerAdapter` figures out whether to start a new +execution of that flow or resume an existing execution based on +information present the HTTP request. There are a number of defaults +related to starting and resuming flow executions the adapter +employs: + +* HTTP request parameters are made available in the input map of + all starting flow executions. + +* When a flow execution ends without sending a final response, + the default handler will attempt to start a new execution in the + same request. + +* Unhandled exceptions are propagated to the Dispatcher unless + the exception is a NoSuchFlowExecutionException. The default handler + will attempt to recover from a NoSuchFlowExecutionException by + starting over a new execution. + +Consult the API documentation for `FlowHandlerAdapter`for more information. You may override these defaults by subclassing or +by implementing your own FlowHandler, discussed in the next +section. + +## 11.4. Implementing custom FlowHandlers + +`FlowHandler` is the extension point that can be used to +customize how flows are executed in a HTTP servlet environment. A`FlowHandler` is used by the `FlowHandlerAdapter`and is responsible for: + +* Returning the `id` of a flow definition to + execute + +* Creating the input to pass new executions of that flow as they + are started + +* Handling outcomes returned by executions of that flow as they + end + +* Handling any exceptions thrown by executions of that flow as + they occur + +These responsibilities are illustrated in the definition of the`org.springframework.mvc.servlet.FlowHandler` interface: + +``` +public interface FlowHandler { + + public String getFlowId(); + + public MutableAttributeMap createExecutionInputMap(HttpServletRequest request); + + public String handleExecutionOutcome(FlowExecutionOutcome outcome, + HttpServletRequest request, HttpServletResponse response); + + public String handleException(FlowException e, + HttpServletRequest request, HttpServletResponse response); +} + +``` + +To implement a FlowHandler, subclass`AbstractFlowHandler`. All these operations are optional, and +if not implemented the defaults will apply. You only need to override the +methods that you need. Specifically: + +* Override `getFlowId(HttpServletRequest)` when the id + of your flow cannot be directly derived from the HTTP request. By + default, the id of the flow to execute is derived from the pathInfo + portion of the request URI. For example,`http://localhost/app/hotels/booking?hotelId=1` results in + a flow id of `hotels/booking` by default. + +* Override`createExecutionInputMap(HttpServletRequest)` when you need + fine-grained control over extracting flow input parameters from the + HttpServletRequest. By default, all request parameters are treated as + flow input parameters. + +* Override `handleExecutionOutcome` when you need to + handle specific flow execution outcomes in a custom manner. The + default behavior sends a redirect to the ended flow's URL to restart a + new execution of the flow. + +* Override `handleException` when you need fine-grained + control over unhandled flow exceptions. The default behavior attempts + to restart the flow when a client attempts to access an ended or + expired flow execution. Any other exception is rethrown to the Spring + MVC ExceptionResolver infrastructure by default. + +### 11.4.1. Example FlowHandler + +A common interaction pattern between Spring MVC And Web Flow is +for a Flow to redirect to a @Controller when it ends. FlowHandlers allow +this to be done without coupling the flow definition itself with a +specific controller URL. An example FlowHandler that redirects to a +Spring MVC Controller is shown below: + +``` +public class BookingFlowHandler extends AbstractFlowHandler { + public String handleExecutionOutcome(FlowExecutionOutcome outcome, + HttpServletRequest request, HttpServletResponse response) { + if (outcome.getId().equals("bookingConfirmed")) { + return "/booking/show?bookingId=" + outcome.getOutput().get("bookingId"); + } else { + return "/hotels/index"; + } + } +} + +``` + +Since this handler only needs to handle flow execution outcomes in +a custom manner, nothing else is overridden. The`bookingConfirmed` outcome will result in a redirect to show +the new booking. Any other outcome will redirect back to the hotels +index page. + +### 11.4.2. Deploying a custom FlowHandler + +To install a custom FlowHandler, simply deploy it as a bean. The +bean name must match the id of the flow the handler should apply +to. + +``` + + +``` + +With this configuration, accessing the resource`/hotels/booking` will launch the `hotels/booking`flow using the custom BookingFlowHandler. When the booking flow ends, +the FlowHandler will process the flow execution outcome and redirect to +the appropriate controller. + +### 11.4.3. FlowHandler Redirects + +A FlowHandler handling a FlowExecutionOutcome or FlowException +returns a `String` to indicate the resource to redirect to +after handling. In the previous example, the`BookingFlowHandler` redirects to the`booking/show` resource URI for `bookingConfirmed`outcomes, and the `hotels/index` resource URI for all other +outcomes. + +By default, returned resource locations are relative to the +current servlet mapping. This allows for a flow handler to redirect to +other Controllers in the application using relative paths. In addition, +explicit redirect prefixes are supported for cases where more control is +needed. + +The explicit redirect prefixes supported are: + +* `servletRelative:` - redirect to a resource + relative to the current servlet + +* `contextRelative:` - redirect to a resource + relative to the current web application context path + +* `serverRelative:` - redirect to a resource relative + to the server root + +* `http://` or `https://` - redirect to a + fully-qualified resource URI + +These same redirect prefixes are also supported within a flow +definition when using the `externalRedirect:` directive in +conjunction with a view-state or end-state; for example,`view="externalRedirect:http://springframework.org"` + +## 11.5. View Resolution + +Web Flow 2 maps selected view identifiers to files located within +the flow's working directory unless otherwise specified. For existing +Spring MVC + Web Flow applications, an external `ViewResolver`is likely already handling this mapping for you. Therefore, to continue +using that resolver and to avoid having to change how your existing flow +views are packaged, configure Web Flow as follows: + +``` + + + + + + + + + + +``` + +The MvcViewFactoryCreator is the factory that allows you to +configure how the Spring MVC view system is used inside Spring Web Flow. +Use it to configure existing ViewResolvers, as well as other services such +as a custom MessageCodesResolver. You may also enable data binding use +Spring MVC's native BeanWrapper by setting the`useSpringBinding` flag to true. This is an alternative to +using the Unified EL for view-to-model data binding. See the +JavaDoc API of this class for more information. + +## 11.6. Signaling an event from a View + +When a flow enters a view-state it pauses, redirects the user to its +execution URL, and waits for a user event to resume. Events are generally +signaled by activating buttons, links, or other user interface commands. +How events are decoded server-side is specific to the view technology in +use. This section shows how to trigger events from HTML-based views +generated by templating engines such as JSP, Velocity, or +Freemarker. + +### 11.6.1. Using a named HTML button to signal an event + +The example below shows two buttons on the same form that signal`proceed` and `cancel` events when clicked, +respectively. + +``` + + + +``` + +When a button is pressed Web Flow finds a request parameter name +beginning with `_eventId_` and treats the remaining substring +as the event id. So in this example, submitting`_eventId_proceed` becomes `proceed`. This style +should be considered when there are several different events that can be +signaled from the same form. + +### 11.6.2. Using a hidden HTML form parameter to signal an event + +The example below shows a form that signals the`proceed` event when submitted: + +``` + + + +``` + +Here, Web Flow simply detects the special `_eventId`parameter and uses its value as the event id. This style should only be +considered when there is one event that can be signaled on the +form. + +### 11.6.3. Using a HTML link to signal an event + +The example below shows a link that signals the`cancel` event when activated: + +``` +Cancel + +``` + +Firing an event results in a HTTP request being sent back to the +server. On the server-side, the flow handles decoding the event from +within its current view-state. How this decoding process works is +specific to the view implementation. Recall a Spring MVC view +implementation simply looks for a request parameter named`_eventId`. If no `_eventId` parameter is found, +the view will look for a parameter that starts with`_eventId_` and will use the remaining substring as the event +id. If neither cases exist, no flow event is triggered. + +## 11.7. Embedding A Flow On A Page + +By default when a flow enters a view state, it executes a +client-side redirect before rendering the view. This approach is known as +POST-REDIRECT-GET. It has the advantage of separating the form processing +for one view from the rendering of the next view. As a result the browser +Back and Refresh buttons work seamlessly without causing any browser +warnings. + +Normally the client-side redirect is transparent from a user's +perspective. However, there are situations where POST-REDIRECT-GET may not +bring the same benefits. For example a flow may be embedded on a page and driven via +Ajax requests refreshing only the area of the page that belongs to the flow. +Not only is it unnecessary to use client-side redirects in this case, it +is also not the desired behavior with regards to keeping the surrounding +content of the page intact. + +The [Section 12.5, “Handling Ajax Requests”](spring-js.html#spring-js-ajax) explains how to do +partial rendering during Ajax requests. The focus of this section is to +explain how to control flow execution redirect behavior during +Ajax requests. To indicate a flow should execute in "page embedded" mode all +you need to do is append an extra parameter when launching the +flow: + +``` +/hotels/booking?mode=embedded +``` + +When launched in "page embedded" mode a flow will not issue +flow execution redirects during Ajax requests. The mode=embedded parameter +only needs to be passed when launching the flow. Your only other concern is +to use Ajax requests and to render only the content required to update +the portion of the page displaying the flow. + +### 11.7.1. Embedded Mode Vs Default Redirect Behavior + +By default Web Flow does a client-side redirect upon entering every view state. +However if you remain in the same view state -- for example a transition without a "to" attribute -- during an Ajax request there will not be a client-side redirect. +This behavior should be quite familiar to Spring Web Flow 2 users. +It is appropriate for a top-level flow that supports the browser back button while still taking advantage of Ajax and partial rendering for use cases where you remain in the same view such as form validation, paging trough search results, and others. +However transitions to a new view state are always followed with a client-side redirect. +That makes it impossible to embed a flow on a page or within a modal dialog and execute more than one view state without causing a full-page refresh. +Hence if your use case requires embedding a flow you can launch it in "embedded" mode. + +### 11.7.2. Embedded Flow Examples + +If you'd like to see examples of a flow embedded on a page and within +a modal dialog please refer to the webflow-showcase project. You can check out +the source code locally, build it as you would a Maven project, and import +it into Eclipse: + +``` +cd some-directory +svn co https://src.springframework.org/svn/spring-samples/webflow-showcase +cd webflow-showcase +mvn package +# import into Eclipse +``` + +## 11.8. Saving Flow Output to MVC Flash Scope + +Flow output can be automatically saved to MVC flash scope when an `end-state`performs an internal redirect. This is particularly useful when displaying a summary +screen at the end of a flow. For backwards compatibility this feature is disabled by +default, to enable set `saveOutputToFlashScopeOnRedirect` on your`FlowHandlerAdapter` to `true`. + +``` + + + + + + +``` + +The following example will add `confirmationNumber` to the MVC flash scope +before redirecting to the `summary` screen. + +``` + + + + +``` \ No newline at end of file diff --git a/docs/en/spring-web-flow/system-setup.md b/docs/en/spring-web-flow/system-setup.md new file mode 100644 index 0000000000000000000000000000000000000000..f373d10934e3c9e12ed2e0aa5a131e6bc635c162 --- /dev/null +++ b/docs/en/spring-web-flow/system-setup.md @@ -0,0 +1,488 @@ +# 10. System Setup + +## 10.1. Introduction + +This chapter shows you how to setup the Web Flow system for use in any web environment. + +## 10.2. Java Config and XML Namespace + +Web Flow provides dedicated configuration support for both Java and +XML-based configuration. + +To get started with XML based configuration declare the webflow config XML namespace: + +``` + + + + + + +``` + +To get started with Java configuration extend`AbstractFlowConfiguration` in an`@Configuration` class: + +``` +import org.springframework.context.annotation.Configuration; +import org.springframework.webflow.config.AbstractFlowConfiguration; + +@Configuration +public class WebFlowConfig extends AbstractFlowConfiguration { + +} + +``` + +## 10.3. Basic system configuration + +The next section shows the minimal configuration required to set up the Web Flow system in your application. + +### 10.3.1. FlowRegistry + +Register your flows in a `FlowRegistry` in XML: + +``` + + + + +``` + +Register your flows in a `FlowRegistry` in Java: + +``` +@Bean +public FlowDefinitionRegistry flowRegistry() { + return getFlowDefinitionRegistryBuilder() + .addFlowLocation("/WEB-INF/flows/booking/booking.xml") + .build(); +} + +``` + +### 10.3.2. FlowExecutor + +Deploy a FlowExecutor, the central service for executing flows in XML: + +``` + + +``` + +Deploy a FlowExecutor, the central service for executing flows in Java: + +``` +@Bean +public FlowExecutor flowExecutor() { + return getFlowExecutorBuilder(flowRegistry()).build(); +} + +``` + +See the Spring MVC and Spring Faces sections of this guide on how to integrate the Web Flow system with the MVC and JSF environment, respectively. + +## 10.4. flow-registry options + +This section explores flow-registry configuration options. + +### 10.4.1. Specifying flow locations + +Use the `location` element to specify paths to flow definitions to register. +By default, flows will be assigned registry identifiers equal to their filenames minus +the file extension, unless a registry bath path is defined. + +In XML: + +``` + + +``` + +In Java: + +``` +return getFlowDefinitionRegistryBuilder() + .addFlowLocation("/WEB-INF/flows/booking/booking.xml") + .build(); + +``` + +### 10.4.2. Assigning custom flow identifiers + +Specify an id to assign a custom registry identifier to a flow in XML: + +``` + + +``` + +Specify an id to assign a custom registry identifier to a flow in Java: + +``` +return getFlowDefinitionRegistryBuilder() + .addFlowLocation("/WEB-INF/flows/booking/booking.xml", "bookHotel") + .build(); + +``` + +### 10.4.3. Assigning flow meta-attributes + +Use the `flow-definition-attributes` element to assign custom meta-attributes to a registered flow. + +In XML: + +``` + + + + + + +``` + +In Java: + +``` +Map attrs = ... ; + +return getFlowDefinitionRegistryBuilder() + .addFlowLocation("/WEB-INF/flows/booking/booking.xml", null, attrs) + .build(); + +``` + +### 10.4.4. Registering flows using a location pattern + +Use the `flow-location-patterns` element to register flows that match a specific resource location pattern: + +In XML: + +``` + + +``` + +In Java: + +``` +return getFlowDefinitionRegistryBuilder() + .addFlowLocationPattern("/WEB-INF/flows/**/*-flow.xml") + .build(); + +``` + +### 10.4.5. Flow location base path + +Use the `base-path` attribute to define a base location for all flows in the application. +All flow locations are then relative to the base path. +The base path can be a resource path such as '/WEB-INF' or a location on the classpath like 'classpath:org/springframework/webflow/samples'. + +In XML: + +``` + + + + +``` + +In Java: + +``` +return getFlowDefinitionRegistryBuilder() + .setBasePath("/WEB-INF") + .addFlowLocationPattern("/hotels/booking/booking.xml") + .build(); + +``` + +With a base path defined, the algorithm that assigns flow identifiers changes slightly. +Flows will now be assigned registry identifiers equal to the the path segment between their base path and file name. +For example, if a flow definition is located at '/WEB-INF/hotels/booking/booking-flow.xml' and the base path is '/WEB-INF' the remaining path to this flow is 'hotels/booking' which becomes the flow id. + +| ![[Tip]](images/tip.png) |Directory per flow definition| +|:--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------:|:----------------------------| +|Recall it is a best practice to package each flow definition in a unique directory.
    This improves modularity, allowing dependent resources to be packaged with the flow definition.
    It also prevents two flows from having the same identifiers when using the convention.| | + +If no base path is not specified or if the flow definition is directly on the base path, flow id assignment from the filename (minus the extension) is used. +For example, if a flow definition file is 'booking.xml', the flow identifier is simply 'booking'. + +Location patterns are particularly powerful when combined with a registry base path. +Instead of the flow identifiers becoming '\*-flow', they will be based on the directory path. +For example in XML: + +``` + + + + +``` + +In Java: + +``` +return getFlowDefinitionRegistryBuilder() + .setBasePath("/WEB-INF") + .addFlowLocationPattern("/**/*-flow.xml") + .build(); + +``` + +In the above example, suppose you had flows located in `/user/login`, `/user/registration`, `/hotels/booking`, and `/flights/booking` directories within `WEB-INF`, +you'd end up with flow ids of `user/login`, `user/registration`, `hotels/booking`, and `flights/booking`, respectively. + +### 10.4.6. Configuring FlowRegistry hierarchies + +Use the `parent` attribute to link two flow registries together in a hierarchy. +When the child registry is queried, if it cannot find the requested flow it will delegate to its parent. + +In XML: + +``` + + + + + + + + + + +``` + +In Java: + +``` +@Configuration +public class WebFlowConfig extends AbstractFlowConfiguration { + + @Autowired + private SharedConfig sharedConfig; + + @Bean + public FlowDefinitionRegistry flowRegistry() { + return getFlowDefinitionRegistryBuilder() + .setParent(this.sharedConfig.sharedFlowRegistry()) + .addFlowLocation("/WEB-INF/flows/booking/booking.xml") + .build(); + } +} + +@Configuration +public class SharedConfig extends AbstractFlowConfiguration { + + @Bean + public FlowDefinitionRegistry sharedFlowRegistry() { + return getFlowDefinitionRegistryBuilder() + .addFlowLocation("/WEB-INF/flows/shared.xml") + .build(); + } +} + +``` + +### 10.4.7. Configuring custom FlowBuilder services + +Use the `flow-builder-services` attribute to customize the services and settings used to build flows in a flow-registry. +If no flow-builder-services tag is specified, the default service implementations are used. +When the tag is defined, you only need to reference the services you want to customize. + +In XML: + +``` + + + + + + +``` + +In Java: + +``` +@Bean +public FlowDefinitionRegistry flowRegistry() { + return getFlowDefinitionRegistryBuilder(flowBuilderServices()) + .addFlowLocation("/WEB-INF/flows/booking/booking.xml") + .build(); +} + +@Bean +public FlowBuilderServices flowBuilderServices() { + return getFlowBuilderServicesBuilder().build(); +} + +``` + +The configurable services are the `conversion-service`, `expression-parser`, and `view-factory-creator`. +These services are configured by referencing custom beans you define. + +For example in XML: + +``` + + + + + + +``` + +In Java: + +``` +@Bean +public FlowBuilderServices flowBuilderServices() { + return getFlowBuilderServicesBuilder() + .setConversionService(conversionService()) + .setExpressionParser(expressionParser) + .setViewFactoryCreator(mvcViewFactoryCreator()) + .build(); +} + +@Bean +public ConversionService conversionService() { + // ... +} + +@Bean +public ExpressionParser expressionParser() { + // ... +} + +@Bean +public ViewFactoryCreator viewFactoryCreator() { + // ... +} + +``` + +#### conversion-service + +Use the `conversion-service` attribute to customize the `ConversionService` used by the Web Flow system. +Type conversion is used to convert from one type to another when required during flow execution such as when processing request parameters, invoking actions, and so on. +Many common object types such as numbers, classes, and enums are supported. +However you'll probably need to provide your own type conversion and formatting logic for custom data types. +Please read [Section 5.7, “Performing type conversion”](views.html#view-type-conversion) for important information on how to provide custom type conversion logic. + +#### expression-parser + +Use the `expression-parser` attribute to customize the `ExpressionParser` used by the Web Flow system. +The default ExpressionParser uses the Unified EL if available on the classpath, otherwise Spring EL is used. + +#### view-factory-creator + +Use the `view-factory-creator` attribute to customize the `ViewFactoryCreator` used by the Web Flow system. +The default ViewFactoryCreator produces Spring MVC ViewFactories capable of rendering JSP, Velocity, and Freemarker views. + +The configurable settings are `development`. +These settings are global configuration attributes that can be applied during the flow construction process. + +#### development + +Set this to `true` to switch on flow *development mode*. +Development mode switches on hot-reloading of flow definition changes, including changes to dependent flow resources such as message bundles. + +## 10.5. flow-executor options + +This section explores flow-executor configuration options. + +### 10.5.1. Attaching flow execution listeners + +Use the `flow-execution-listeners` element to register listeners that observe the lifecycle +of flow executions. For example in XML: + +``` + + + + + +``` + +In Java: + +``` +@Bean +public FlowExecutor flowExecutor() { + return getFlowExecutorBuilder(flowRegistry()) + .addFlowExecutionListener(securityListener()) + .addFlowExecutionListener(persistenceListener()) + .build(); +} + +``` + +You may also configure a listener to observe only certain flows. For example in XML: + +``` + + +``` + +In Java: + +``` +@Bean +public FlowExecutor flowExecutor() { + return getFlowExecutorBuilder(flowRegistry()) + .addFlowExecutionListener(securityListener(), "securedFlow1,securedFlow2") + .build(); +} + +``` + +### 10.5.2. Tuning FlowExecution persistence + +Use the `flow-execution-repository` element to tune flow execution persistence settings. +For example in XML: + +``` + + + + +``` + +In Java: + +``` +@Bean +public FlowExecutor flowExecutor() { + return getFlowExecutorBuilder(flowRegistry()) + .setMaxFlowExecutions(5) + .setMaxFlowExecutionSnapshots(30) + .build(); +} + +``` + +#### max-executions + +Tune the `max-executions` attribute to place a cap on the number of flow executions that can be created per user session. +When the maximum number of executions is exceeded, the oldest execution is removed. + +| ![[Note]](images/note.png) |Note| +|:--------------------------------------------------------------------------------------------------------:|:---| +|The `max-executions` attribute is per user session, i.e. it works across instances of any flow definition.| | + +#### max-execution-snapshots + +Tune the `max-execution-snapshots` attribute to place a cap on the number of history snapshots that can be taken per flow execution. +To disable snapshotting, set this value to 0. To enable an unlimited number of snapshots, set this value to -1. + +| ![[Note]](images/note.png) |Note| +|:--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------:|:---| +|History snapshots enable browser back button support.
    When snapshotting is disabled pressing the browser back button will not work.
    It will result in using an execution key that points to a snapshot that has not be recorded.| | \ No newline at end of file diff --git a/docs/en/spring-web-flow/testing.md b/docs/en/spring-web-flow/testing.md new file mode 100644 index 0000000000000000000000000000000000000000..1794502527cc5a295600f56ba27acb5b5c869464 --- /dev/null +++ b/docs/en/spring-web-flow/testing.md @@ -0,0 +1,141 @@ +# 14. Testing flows + +## 14.1. Introduction + +This chapter shows you how to test flows. + +## 14.2. Extending AbstractXmlFlowExecutionTests + +To test the execution of a XML-based flow definition, extend `AbstractXmlFlowExecutionTests`: + +``` +public class BookingFlowExecutionTests extends AbstractXmlFlowExecutionTests { + +} + +``` + +## 14.3. Specifying the path to the flow to test + +At a minimum, you must override `getResource(FlowDefinitionResourceFactory)` to return the path to the flow you wish to test: + +``` +@Override +protected FlowDefinitionResource getResource(FlowDefinitionResourceFactory resourceFactory) { + return resourceFactory.createFileResource("src/main/webapp/WEB-INF/hotels/booking/booking.xml"); +} + +``` + +## 14.4. Registering flow dependencies + +If your flow has dependencies on externally managed services, +also override `configureFlowBuilderContext(MockFlowBuilderContext)` to register stubs or mocks of those services: + +``` +@Override +protected void configureFlowBuilderContext(MockFlowBuilderContext builderContext) { + builderContext.registerBean("bookingService", new StubBookingService()); +} + +``` + +If your flow extends from another flow, or has states that extend other states, +also override `getModelResources(FlowDefinitionResourceFactory)` to return the path to the parent flows. + +``` +@Override +protected FlowDefinitionResource[] getModelResources(FlowDefinitionResourceFactory resourceFactory) { +return new FlowDefinitionResource[] { + resourceFactory.createFileResource("src/main/webapp/WEB-INF/common/common.xml") +}; +} + +``` + +## 14.5. Testing flow startup + +Have your first test exercise the startup of your flow: + +``` +public void testStartBookingFlow() { + + Booking booking = createTestBooking(); + + MutableAttributeMap input = new LocalAttributeMap(); + input.put("hotelId", "1"); + MockExternalContext context = new MockExternalContext(); + context.setCurrentUser("keith"); + startFlow(input, context); + + assertCurrentStateEquals("enterBookingDetails"); + assertTrue(getRequiredFlowAttribute("booking") instanceof Booking); +} + +``` + +Assertions generally verify the flow is in the correct state you expect. + +## 14.6. Testing flow event handling + +Define additional tests to exercise flow event handling behavior. +You goal should be to exercise all paths through the flow. +You can use the convenient `setCurrentState(String)` method to jump to the flow state where you wish to begin your test. + +``` +public void testEnterBookingDetails_Proceed() { + + setCurrentState("enterBookingDetails"); + + getFlowScope().put("booking", createTestBooking()); + + MockExternalContext context = new MockExternalContext(); + context.setEventId("proceed"); + resumeFlow(context); + + assertCurrentStateEquals("reviewBooking"); +} + +``` + +## 14.7. Mocking a subflow + +To test calling a subflow, register a mock implementation of the subflow that asserts input was passed in correctly and +returns the correct outcome for your test scenario. + +``` +public void testBookHotel() { + + setCurrentState("reviewHotel"); + + Hotel hotel = new Hotel(); + hotel.setId(1L); + hotel.setName("Jameson Inn"); + getFlowScope().put("hotel", hotel); + + getFlowDefinitionRegistry().registerFlowDefinition(createMockBookingSubflow()); + + MockExternalContext context = new MockExternalContext(); + context.setEventId("book"); + resumeFlow(context); + + // verify flow ends on 'bookingConfirmed' + assertFlowExecutionEnded(); + assertFlowExecutionOutcomeEquals("finish"); +} + +public Flow createMockBookingSubflow() { + Flow mockBookingFlow = new Flow("booking"); + mockBookingFlow.setInputMapper(new Mapper() { + public MappingResults map(Object source, Object target) { + // assert that 1L was passed in as input + assertEquals(1L, ((AttributeMap) source).get("hotelId")); + return null; + } + }); + // immediately return the bookingConfirmed outcome so the caller can respond + new EndState(mockBookingFlow, "bookingConfirmed"); + return mockBookingFlow; +} + +``` \ No newline at end of file diff --git a/docs/en/spring-web-flow/views.md b/docs/en/spring-web-flow/views.md new file mode 100644 index 0000000000000000000000000000000000000000..5c7e44ad3ee868502e80cab6ba219e91ee5e79ac --- /dev/null +++ b/docs/en/spring-web-flow/views.md @@ -0,0 +1,845 @@ +# 5. Rendering views + +## 5.1. Introduction + +This chapter shows you how to use the `view-state` element to render views within a flow. + +## 5.2. Defining view states + +Use the `view-state` element to define a step of the flow that renders a view and waits for a user event to resume: + +``` + + + + +``` + +By convention, a view-state maps its id to a view template in the directory where the flow is located. +For example, the state above might render `/WEB-INF/hotels/booking/enterBookingDetails.xhtml`if the flow itself was located in the `/WEB-INF/hotels/booking` directory. + +Below is a sample directory structure showing views and other resources like message bundles co-located with their flow definition: + + + +Flow Packaging + +## 5.3. Specifying view identifiers + +Use the `view` attribute to specify the id of the view to render explicitly. + +### 5.3.1. Flow relative view ids + +The view id may be a relative path to view resource in the flow's working directory: + +``` + + +``` + +### 5.3.2. Absolute view ids + +The view id may be a absolute path to a view resource in the webapp root directory: + +``` + + +``` + +### 5.3.3. Logical view ids + +With some view frameworks, such as Spring MVC's view framework, the view id may also be a logical identifier resolved by the framework: + +``` + + +``` + +See the Spring MVC integration section for more information on how to integrate with the MVC `ViewResolver` infrastructure. + +## 5.4. View scope + +A view-state allocates a new `viewScope` when it enters. +This scope may be referenced within the view-state to assign variables that should live for the duration of the state. +This scope is useful for manipulating objects over a series of requests from the same view, often Ajax requests. +A view-state destroys its viewScope when it exits. + +### 5.4.1. Allocating view variables + +Use the `var` tag to declare a view variable. +Like a flow variable, any `@Autowired` references are automatically restored when the view state resumes. + +``` + + +``` + +### 5.4.2. Assigning a viewScope variable + +Use the `on-render` tag to assign a variable from an action result before the view renders: + +``` + + + + +``` + +### 5.4.3. Manipulating objects in view scope + +Objects in view scope are often manipulated over a series of requests from the same view. +The following example pages through a search results list. +The list is updated in view scope before each render. +Asynchronous event handlers modify the current data page, then request re-rendering of the search results fragment. + +``` + + + + + + + + + + + + + + +``` + +## 5.5. Executing render actions + +Use the `on-render` element to execute one or more actions before view rendering. +Render actions are executed on the initial render as well as any subsequent refreshes, including any partial re-renderings of the view. + +``` + + + + +``` + +## 5.6. Binding to a model + +Use the `model` attribute to declare a model object the view binds to. +This attribute is typically used in conjunction with views that render data controls, such as forms. +It enables form data binding and validation behaviors to be driven from metadata on your model object. + +The following example declares an `enterBookingDetails` state manipulates the `booking` model: + +``` + + +``` + +The model may be an object in any accessible scope, such as `flowScope` or `viewScope`. +Specifying a `model` triggers the following behavior when a view event occurs: + +1. View-to-model binding. On view postback, user input values are bound to model object properties for you. + +2. Model validation. After binding, if the model object requires validation that validation logic will be invoked. + +For a flow event to be generated that can drive a view state transition, model binding must complete successfully. +If model binding fails, the view is re-rendered to allow the user to revise their edits. + +## 5.7. Performing type conversion + +When request parameters are used to populate the model (commonly referred to as data binding), type conversion is required to parse String-based request parameter values before setting target model properties. +Default type conversion is available for many common Java types such as numbers, primitives, enums, and Dates. +Users also have the ability to register their own type conversion logic for user-defined types, and to override the default Converters. + +### 5.7.1. Type Conversion Options + +Starting with version 2.1 Spring Web Flow uses the [type conversion](http://static.springsource.org/spring/docs/3.0.x/spring-framework-reference/html/validation.html#core-convert) and [formatting](http://static.springsource.org/spring/docs/3.0.x/spring-framework-reference/html/validation.html#format) system introduced in Spring 3 for nearly all type conversion needs. +Previously Web Flow applications used a type conversion mechanism that was different from the one in Spring MVC, which relied on the `java.beans.PropertyEditor` abstraction. +Spring 3 offers a modern type conversion alternative to PropertyEditors that was actually influenced by Web Flow's own type conversion system. +Hence Web Flow users should find it natural to work with the new Spring 3 type conversion. +Another obvious and very important benefit of this change is that a single type conversion mechanism can now be used across Spring MVC And Spring Web Flow. + +### 5.7.2. Upgrading to Spring 3 Type Conversion And Formatting + +What does this practically mean for existing applications? +Existing applications are likely registering their own converters of type `org.springframework.binding.convert.converters.Converter` through a sub-class of `DefaultConversionService` available in Spring Binding. +Those converters can continue to be registered as before. +They will be adapted as Spring 3 `GenericConverter` types and registered with a Spring 3 `org.springframework.core.convert.ConversionService` instance. +In other words existing converters will be invoked through Spring's type conversion service. + +The only exception to this rule are named converters, which can be referenced from a `binding` element in a `view-state`: + +``` +public class ApplicationConversionService extends DefaultConversionService { + public ApplicationConversionService() { + addDefaultConverters(); + addDefaultAliases(); + addConverter("customConverter", new CustomConverter()); + } +} + +``` + +``` + + + + + + +``` + +Named converters are not supported and cannot be used with the type conversion service available in Spring 3. +Therefore such converters will not be adapted and will continue to work as before, i.e. will not involve the Spring 3 type conversion. +However, this mechanism is deprecated and applications are encouraged to favor Spring 3 type conversion and formatting features. + +Also note that the existing Spring Binding `DefaultConversionService` no longer registers any default converters. +Instead Web Flow now relies on the default type converters and formatters in Spring 3. + +In summary the Spring 3 type conversion and formatting is now used almost exclusively in Web Flow. +Although existing applications will work without any changes, we encourage moving towards unifying the type conversion needs of Spring MVC and Spring Web Flow parts of applications. + +### 5.7.3. Configuring Type Conversion and Formatting + +In Spring MVC an instance of a `FormattingConversionService` is created automatically through the custom MVC namespace: + +``` + + + + + + +``` + +Internally that is done with the help of `FormattingConversionServiceFactoryBean`, which registers a default set of converters and formatters. +You can customize the conversion service instance used in Spring MVC through the `conversion-service` attribute: + +``` + + +``` + +In Web Flow an instance of a Spring Binding `DefaultConversionService` is created automatically, which does not register any converters. +Instead it delegates to a `FormattingConversionService` instance for all type conversion needs. +By default this is not the same `FormattingConversionService` instance as the one used in Spring 3. +However that won't make a practical difference until you start registering your own formatters. + +The `DefaultConversionService` used in Web Flow can be customized through the flow-builder-services element: + +``` + + +``` + +Connecting the dots in order to register your own formatters for use in both Spring MVC and in Spring Web Flow you can do the following. +Create a class to register your custom formatters: + +``` +public class ApplicationConversionServiceFactoryBean extends FormattingConversionServiceFactoryBean { + + @Override + protected void installFormatters(FormatterRegistry registry) { + // ... + } + +} + + +``` + +Configure it for use in Spring MVC: + +``` + + + + + + + + + + +``` + +Connection the Web Flow `DefaultConversionService` to the same "applicationConversionService" bean used in Spring MVC: + +``` + + + + + + + + +``` + +Of course it is also possible to mix and match. +Register new Spring 3 `Formatter` types through the "applicationConversionService". +Register existing Spring Binding `Converter` types through the "defaultConversionService". + +### 5.7.4. Working With Spring 3 Type Conversion And Formatting + +An important concept to understand is the difference between type converters and formatters. + +Type converters in Spring 3, provided in `org.springframework.core`, are for general-purpose type conversion between any two object types. +In addition to the most simple `Converter` type, two other interfaces are `ConverterFactory` and `GenericConverter`. + +Formatters in Spring 3, provided in `org.springframework.context`, have the more specialized purpose of representing Objects as Strings. +The `Formatter` interface extends the `Printer` and `Parser` interfaces for converting an Object to a String and turning a String into an Object. + +Web developers will find the `Formatter` interface most relevant because it fits the needs of web applications for type conversion. + +| ![[Note]](images/note.png) |Note| +|:-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------:|:---| +|An important point to be made is that Object-to-Object conversion is a generalization of the more specific Object-to-String conversion.
    In fact in the end `Formatters` are reigstered as `GenericConverter` types with Spring's `GenericConversionService` making them equal to any other converter.| | + +### 5.7.5. Formatting Annotations + +One of the best features of the new type conversion is the ability to use annotations for a better control over formatting in a concise manner. +Annotations can be placed on model attributes and on arguments of @Controller methods that are mapped to requests. +Out of the box Spring provides two annotations `NumberFormat` and `DateTimeFormat` but you can create your own and have them registered along with the associated formatting logic. +You can see examples of the `DateTimeFormat` annotation in the [Spring Travel](https://src.springframework.org/svn/spring-samples/travel) and in the [Petcare](https://src.springframework.org/svn/spring-samples/petcare) along with other samples in the [Spring Samples](https://src.springframework.org/svn/spring-samples) repository. + +### 5.7.6. Working With Dates + +The `DateTimeFormat` annotation implies use of [Joda Time](http://joda-time.sourceforge.net/). +If that is present on the classpath the use of this annotation is enabled automatically. +By default neither Spring MVC nor Web Flow register any other date formatters or converters. +Therefore it is important for applications to register a custom formatter to specify the default way for printing and parsing dates. +The `DateTimeFormat` annotation on the other hand provides more fine-grained control where it is necessary to deviate from the default. + +For more information on working with Spring 3 type conversion and formatting please refer to the relevant sections of the [Spring documentation](http://static.springsource.org/spring/docs/3.0.x/spring-framework-reference/html/index.html). + +## 5.8. Suppressing binding + +Use the `bind` attribute to suppress model binding and validation for particular view events. +The following example suppresses binding when the `cancel` event occurs: + +``` + + + + + +``` + +## 5.9. Specifying bindings explicitly + +Use the `binder` element to configure the exact set of model properties to +apply data binding to. This is useful to restrict the set of "allowed fields" per view. +Not using this could lead to a security issue, depending on the application domain and actual users, +since by default if the binder element is not specified all public properties of the model are +eligible for data binding by the view. By contrast when the `binder` element is specified, +only the explicitly configured bindings are allowed. Below is an example: + +``` + + + + + + + + + + + + +``` + +Each binding may also apply a converter to format the model property value for display in a custom manner. +If no converter is specified, the default converter for the model property's type will be used. + +``` + + + + + + + + + + + + + + +``` + +In the example above, the `shortDate` converter is bound to the`checkinDate` and `checkoutDate` properties. +Custom converters may be registered with the application's ConversionService. + +Each binding may also apply a required check that will generate a validation error +if the user provided value is null on form postback: + +``` + + + + + + + + + + + + + +``` + +In the example above, all of the bindings are required. +If one or more blank input values are bound, validation errors will be generated and the view will re-render with those errors. + +## 5.10. Validating a model + +Model validation is driven by constraints specified against a model object. +Web Flow supports enforcing such constraints programatically as well as +declaratively with JSR-303 Bean Validation annotations. + +### 5.10.1. JSR-303 Bean Validation + +Web Flow provides built-in support for the JSR-303 Bean Validation API +building on equivalent support available in Spring MVC. +To enable JSR-303 validation configure the flow-builder-services with +Spring MVC's `LocalValidatorFactoryBean`: + +``` + + + + + + +``` + +With the above in place, the configured validator will be applied to +all model attributes after data binding. + +Note that JSR-303 bean validation and validation by convention +(explained in the next section) are not mutually exclusive. +In other words Web Flow will apply all available validation +mechanisms. + +#### Partial Validation + +JSR-303 Bean Validation supports partial validation through validation groups. For example: + +``` +@NotNull +@Size(min = 2, max = 30, groups = State1.class) +private String name; + +``` + +In a flow definition you can specify validation hints on a view state +or on a transition and those will be resolved to validation groups. +For example: + +``` + + +``` + +The *validation-hints* attribute is an expression +that in the above example resolves to a comma-delimited String consisting +of the hints "group1" and "group2". A `ValidationHintResolver`is used to resolve these hints. The `BeanValidationHintResolver`used by default tries to resolve these strings to Class-based bean validation +groups. To do that it looks for matching inner types in the model or its parent. + +For example given `org.example.MyModel` with inner types`Group1` and `Group2` it is +sufficient to supply the simple type names, i.e. "group1" and "group2". +You can also provide fully qualified type names. + +A hint with the value "default" has a special meaning and is translated +to the default validation group in Bean Validation`javax.validation.groups.Default`. + +A custom `ValidationHintResolver`can be configured if necessary through the validationHintResolver property +of the flow-builder-services element: + +``` + + + + +``` + +### 5.10.2. Programmatic validation + +There are two ways to perform model validation programatically. +The first is to implement validation logic in your model object. +The second is to implement an external `Validator`. +Both ways provide you with a `ValidationContext` to record error messages and access information about the current user. + +#### Implementing a model validate method + +Defining validation logic in your model object is the simplest way to validate its state. +Once such logic is structured according to Web Flow conventions, Web Flow will automatically invoke that logic during the view-state postback lifecycle. +Web Flow conventions have you structure model validation logic by view-state, allowing you to easily validate the subset of model properties that are editable on that view. +To do this, simply create a public method with the name `validate${state}`, where `${state}` is the id of your view-state where you want validation to run. +For example: + +``` +public class Booking { + private Date checkinDate; + private Date checkoutDate; + ... + + public void validateEnterBookingDetails(ValidationContext context) { + MessageContext messages = context.getMessageContext(); + if (checkinDate.before(today())) { + messages.addMessage(new MessageBuilder().error().source("checkinDate"). + defaultText("Check in date must be a future date").build()); + } else if (!checkinDate.before(checkoutDate)) { + messages.addMessage(new MessageBuilder().error().source("checkoutDate"). + defaultText("Check out date must be later than check in date").build()); + } + } +} + + +``` + +In the example above, when a transition is triggered in a `enterBookingDetails` view-state that is editing a `Booking` model, +Web Flow will invoke the `validateEnterBookingDetails(ValidationContext)` method automatically unless validation has been suppressed for that transition. +An example of such a view-state is shown below: + +``` + + + + +``` + +Any number of validation methods are defined. Generally, a flow edits a model over a series of views. In that case, a validate method would be defined +for each view-state where validation needs to run. + +#### Implementing a Validator + +The second way is to define a separate object, called a *Validator*, which validates your model object. +To do this, first create a class whose name has the pattern ${model}Validator, where `${model}` is the capitialized form of the model expression, such as `booking`. +Then define a public method with the name `validate${state}`, where `${state}` is the id of your view-state, such as `enterBookingDetails`. +The class should then be deployed as a Spring bean. Any number of validation methods can be defined. +For example: + +``` +@Component +public class BookingValidator { + public void validateEnterBookingDetails(Booking booking, ValidationContext context) { + MessageContext messages = context.getMessageContext(); + if (booking.getCheckinDate().before(today())) { + messages.addMessage(new MessageBuilder().error().source("checkinDate"). + defaultText("Check in date must be a future date").build()); + } else if (!booking.getCheckinDate().before(booking.getCheckoutDate())) { + messages.addMessage(new MessageBuilder().error().source("checkoutDate"). + defaultText("Check out date must be later than check in date").build()); + } + } +} + +``` + +In the example above, when a transition is triggered in a `enterBookingDetails` view-state that is editing a `Booking` model, +Web Flow will invoke the `validateEnterBookingDetails(Booking, ValidationContext)` method automatically unless validation has been suppressed for that transition. + +A Validator can also accept a Spring MVC `Errors` object, which is required for invoking existing Spring Validators. + +Validators must be registered as Spring beans employing the naming convention `${model}Validator` to be detected and invoked automatically. +In the example above, Spring 2.5 classpath-scanning would detect the `@Component` and automatically register it as a bean with the name `bookingValidator`. +Then, anytime the `booking` model needs to be validated, this `bookingValidator` instance would be invoked for you. + +#### Default validate method + +A *Validator* class can also define a method called `validate` not associated (by convention) with any specific view-state. + +``` +@Component +public class BookingValidator { + public void validate(Booking booking, ValidationContext context) { + //... + } +} + +``` + +In the above code sample the method `validate` will be called every time a Model of type `Booking` is validated (unless validation has been suppressed for that transition). +If needed the default method can also be called in addition to an existing state-specific method. Consider the following example: + +``` +@Component +public class BookingValidator { + public void validate(Booking booking, ValidationContext context) { + //... + } + public void validateEnterBookingDetails(Booking booking, ValidationContext context) { + //... + } +} + +``` + +In above code sample the method `validateEnterBookingDetails` will be called first. +The default `validate` method will be called next. + +### 5.10.3. ValidationContext + +A ValidationContext allows you to obtain a `MessageContext` to record messages during validation. +It also exposes information about the current user, such as the signaled `userEvent` and the current user's `Principal` identity. +This information can be used to customize validation logic based on what button or link was activated in the UI, or who is authenticated. +See the API Javadocs for `ValidationContext` for more information. + +## 5.11. Suppressing validation + +Use the `validate` attribute to suppress model validation for particular view events: + +``` + + + + + +``` + +In this example, data binding will still occur on `back` but validation will be suppressed. + +## 5.12. Executing view transitions + +Define one or more `transition` elements to handle user events that may occur on the view. +A transition may take the user to another view, or it may simply execute an action and re-render the current view. +A transition may also request the rendering of parts of a view called "fragments" when handling an Ajax event. +Finally, "global" transitions that are shared across all views may also be defined. + +Implementing view transitions is illustrated in the following sections. + +### 5.12.1. Transition actions + +A view-state transition can execute one or more actions before executing. +These actions may return an error result to prevent the transition from exiting the +current view-state. If an error result occurs, the view will re-render and should display +an appropriate message to the user. + +If the transition action invokes a plain Java method, the invoked method may return a boolean +whose value, true or false, indicates whether the transition should take place or be prevented +from executing. A method may also return a String where the literal values "success", "yes", or +"true" indicate the transition should occur, and any other value means the opposite. +This technique can be used to handle exceptions thrown by service-layer methods. +The example below invokes an action that calls a service and handles an exceptional situation: + +``` + + + + +``` + +``` +public class BookingAction { + public boolean makeBooking(Booking booking, MessageContext context) { + try { + bookingService.make(booking); + return true; + } catch (RoomNotAvailableException e) { + context.addMessage(new MessageBuilder().error(). + .defaultText("No room is available at this hotel").build()); + return false; + } + } +} + +``` + +When there is more than one action defined on a transition, if one returns an error result the +remaining actions in the set will *not* be executed. If you need to ensure one +transition action's result cannot impact the execution of another, define a single transition +action that invokes a method that encapsulates all the action logic. + +### 5.12.2. Global transitions + +Use the flow's `global-transitions` element to create transitions that apply across all views. +Global-transitions are often used to handle global menu links that are part of the layout. + +``` + + + + + +``` + +### 5.12.3. Event handlers + +From a view-state, transitions without targets can also be defined. Such transitions are called "event handlers": + +``` + + + + +``` + +These event handlers do not change the state of the flow. +They simply execute their actions and re-render the current view or one or more fragments of the current view. + +### 5.12.4. Rendering fragments + +Use the `render` element within a transition to request partial re-rendering of the current view after handling the event: + +``` + + + + + +``` + +The fragments attribute should reference the id(s) of the view element(s) you wish to re-render. +Specify multiple elements to re-render by separating them with a comma delimiter. + +Such partial rendering is often used with events signaled by Ajax to update a specific zone of the view. + +## 5.13. Working with messages + +Spring Web Flow's `MessageContext` is an API for recording messages during the course of flow executions. +Plain text messages can be added to the context, as well as internationalized messages resolved by a Spring `MessageSource`. +Messages are renderable by views and automatically survive flow execution redirects. +Three distinct message severities are provided: `info`, `warning`, and `error`. +In addition, a convenient `MessageBuilder` exists for fluently constructing messages. + +### 5.13.1. Adding plain text messages + +``` +MessageContext context = ... +MessageBuilder builder = new MessageBuilder(); +context.addMessage(builder.error().source("checkinDate") + .defaultText("Check in date must be a future date").build()); +context.addMessage(builder.warn().source("smoking") + .defaultText("Smoking is bad for your health").build()); +context.addMessage(builder.info() + .defaultText("We have processed your reservation - thank you and enjoy your stay").build()); + +``` + +### 5.13.2. Adding internationalized messages + +``` +MessageContext context = ... +MessageBuilder builder = new MessageBuilder(); +context.addMessage(builder.error().source("checkinDate").code("checkinDate.notFuture").build()); +context.addMessage(builder.warn().source("smoking").code("notHealthy") + .resolvableArg("smoking").build()); +context.addMessage(builder.info().code("reservationConfirmation").build()); + +``` + +### 5.13.3. Using message bundles + +Internationalized messages are defined in message bundles accessed by a Spring `MessageSource`. +To create a flow-specific message bundle, simply define `messages.properties` file(s) in your flow's directory. +Create a default `messages.properties` file and a .properties file for each additional `Locale` you need to support. + +``` +#messages.properties +checkinDate=Check in date must be a future date +notHealthy={0} is bad for your health +reservationConfirmation=We have processed your reservation - thank you and enjoy your stay + +``` + +From within a view or a flow, you may also access message resources using the `resourceBundle` EL variable: + +``` + + +``` + +### 5.13.4. Understanding system generated messages + +There are several places where Web Flow itself will generate messages to display to the user. +One important place this occurs is during view-to-model data binding. +When a binding error occurs, such as a type conversion error, Web Flow will map that error to a message retrieved from your resource bundle automatically. +To lookup the message to display, Web Flow tries resource keys that contain the binding error code and target property name. + +As an example, consider a binding to a `checkinDate` property of a `Booking` object. +Suppose the user typed in a alphabetic string. +In this case, a type conversion error will be raised. +Web Flow will map the 'typeMismatch' error code to a message by first querying your resource bundle for a message with the following key: + +``` +booking.checkinDate.typeMismatch + +``` + +The first part of the key is the model class's short name. +The second part of the key is the property name. The third part is the error code. +This allows for the lookup of a unique message to display to the user when a binding fails on a model property. +Such a message might say: + +``` +booking.checkinDate.typeMismatch=The check in date must be in the format yyyy-mm-dd. + +``` + +If no such resource key can be found of that form, a more generic key will be tried. +This key is simply the error code. The field name of the property is provided as a message argument. + +``` +typeMismatch=The {0} field is of the wrong type. + +``` + +## 5.14. Displaying popups + +Use the `popup` attribute to render a view in a modal popup dialog: + +``` + + +``` + +When using Web Flow with the Spring Javascript, no client side code is necessary for the popup to display. +Web Flow will send a response to the client requesting a redirect to the view from a popup, and the client will honor the request. + +## 5.15. View backtracking + +By default, when you exit a view state and transition to a new view state, you can go back to the previous state using the browser back button. +These view state history policies are configurable on a per-transition basis by using the `history` attribute. + +### 5.15.1. Discarding history + +Set the history attribute to `discard` to prevent backtracking to a view: + +``` + + +``` + +### 5.15.2. Invalidating history + +Set the history attribute to `invalidate` to prevent backtracking to a view as well all previously displayed views: + +``` + + +``` \ No newline at end of file diff --git a/docs/en/spring-web-flow/whatsnew.md b/docs/en/spring-web-flow/whatsnew.md new file mode 100644 index 0000000000000000000000000000000000000000..a1580803c9e46d80088240778217f43da0c3c61d --- /dev/null +++ b/docs/en/spring-web-flow/whatsnew.md @@ -0,0 +1,226 @@ +# 2. What's New + +## 2.1. Spring Web Flow 2.5 + +This release provides an upgrade path to Spring Framework 5 that in turn requires +Java 8+, Servlet 3.1, Hibernate 5, Tiles 3. See the[Spring Framework wiki](https://github.com/spring-projects/spring-framework/wiki/What%27s-New-in-Spring-Framework-5.x)for more details. The [samples repository](https://github.com/spring-projects/spring-webflow-samples)has been upgraded to Spring Web Flow 2.5. + +As of 2.5 there is no longer a *spring-js* module. The classes from that module +have been kept but moved to new packages in the *spring-webflow* module. +The *spring-js-resources* module is available as an optional module that +must be included explicitly. + +This release requires JSF 2.2 or higher. + +## 2.2. Spring Web Flow 2.4 + +This release requires JDK 1.6. + +### 2.2.1. Java-based Configuration + +Web Flow now supports a Java-based alternative for its system configuration. +See the updated [Chapter 10, *System Setup*](system-setup.html). + +Also see the[booking-mvc](https://github.com/spring-projects/spring-webflow-samples/tree/master/booking-mvc) and[booking-faces](https://github.com/spring-projects/spring-webflow-samples/tree/master/booking-faces)samples that have been updated to use all Java config. + +### 2.2.2. Spring MVC Flash Scope Integration + +When a flow ends it can now redirect to a Spring MVC controller after saving +attributes in Spring MVC's flash scope for the controller to access. + +See [Section 11.8, “Saving Flow Output to MVC Flash Scope”](spring-mvc.html#spring-mvc-flash-output). + +### 2.2.3. Partial JSR-303 Bean Validation + +A flow definition can apply partial validation on the model through the validation-hints +attribute supported on view state and transition elements. + +See [the section called “Partial Validation”](views.html#view-validation-jsr303-partial). + +### 2.2.4. Hibernate Support + +The `HibernateFlowExecutionListener` now supports Hibernate 4 in addition to Hibernate 3. + +As of 2.4.4 the `HibernateFlowExecutionListener` also works with Hibernate 5. + +### 2.2.5. Tiles 3 Support + +The `AjaxTilesView` now supports Tiles 3 in addition to Tiles 2.2. + +### 2.2.6. Minimum JSF 2.0 Requirement + +Java ServerFaces version 1.2 and earlier are no longer supported by Spring Web Flow, if you have not done so already you will need to upgrade to JSF 2.0 or above. +In addition the Spring Faces components that were previously provided with JSF 1.2 for progressive AJAX enhancements have been removed in this release. + +See [???](). + +### 2.2.7. Portlet API 2.0 and JSF 2.0 support + +The internal Portlet integration introduced in Spring Web Flow 2.2 has been upgraded for JSF 2.0 compatibility. +Some of the more advanced JSF 2.0 features, such as partial state saving, are not supported in a Portlet environment, however, existing application can now upgrade to the minimum required JSF version. +Upgraded projects will need to ensure that the `` elements is +included as part of their Spring configuration. + +### 2.2.8. Deprecations + +This release deprecates *Spring.js*. The deprecation includes the entire*spring-js-resources* module including *Spring.js* and*Spring-Dojo.js* and the bundled Dojo and CSS Framework. +Also deprecated is the `SpringJavascriptAjaxHandler`from the *spring-js* module. The rest of *spring-js*, +e.g. `AjaxHandler`, `AjaxTilesView`, will be +folded into *spring-webflow* in a future release. + +OGNL support is now deprecated. + +## 2.3. Spring Web Flow 2.3 + +### 2.3.1. Embedding A Flow On A Page + +By default Web Flow does a client-side redirect upon entering every view state. +That makes it impossible to embed a flow on a page or within a modal dialog and execute more than one view state without causing a full-page refresh. +Web Flow now supports launching a flow in "embedded" mode. +In this mode a flow can transition to other view states without a client-side redirect during Ajax requests. +See [Section 11.7, “Embedding A Flow On A Page”](spring-mvc.html#spring-mvc-embedded-flow) and [Section 13.6, “Embedding a Flow On a Page”](spring-faces.html#spring-faces-embedded-mode). + +### 2.3.2. Support For JSR-303 Bean Validation + +Support for the JSR-303 Bean Validation API is now available building on equivalent support available in Spring MVC. +See [Section 5.10, “Validating a model”](views.html#view-validate) for more details. + +### 2.3.3. Flow-Managed Persistence Context Propagation + +Starting with Web Flow 2.3 a flow managed `PersistenceContext` is automatically extended (propagated) to sub-flows assuming the subflow also has the feature enabled as well. +See [Section 7.3, “Flow Managed Persistence And Sub-Flows”](flow-managed-persistence.html#flow-managed-persistence-propagation). + +### 2.3.4. Portlet 2.0 Resource Requests + +Support for Portlet 2.0 resource requests has now been added enabling Ajax requests with partial rendering. +URLs for such requests can be prepared with the `` tag in JSP pages. +Server-side processing is similar to a combined an action and a render requests but combined in a single request. +Unlike a render request, the response from a resource request includes content from the target portlet only. + +### 2.3.5. Custom ConversationManager + +The `` element now provides a conversation-manager attribute accepting a reference to a ConversationManager instance. + +### 2.3.6. Redirect In Same State + +By default Web Flow does a client-side redirect when remaining in the same view state as long as the current request is not an Ajax request. +This is useful after form validation failure. +Hitting Refresh or Back won't result in browser warnings. Hence this behavior is usually desirable. +However a new flow execution attribute makes it possible to disable it and that may also be necessary in some cases specific to JSF applications. +See [Section 13.7, “Redirect In Same State”](spring-faces.html#spring-faces-redirect-in-same-state). + +### 2.3.7. Samples + +The process for building the samples included with the distribution has been simplified. +Maven can be used to build all samples in one step. +Eclipse settings include source code references to simplify debugging. + +Additional samples can be accessed as follows: + +``` +mkdir spring-samples +cd spring-samples +svn co https://src.springframework.org/svn/spring-samples/webflow-primefaces-showcase +cd webflow-primefaces-showcase +mvn package +# import into Eclipse + +``` + +``` +mkdir spring-samples +cd spring-samples +svn co https://src.springframework.org/svn/spring-samples/webflow-showcase +cd webflow-showcase +mvn package +# import into Eclipse + +``` + +## 2.4. Spring Web Flow 2.2 + +### 2.4.1. JSF 2 Support + +#### Comprehensive JSF 2 Support + +Building on 2.1, Spring Web Flow version 2.2 adds support for core JSF 2 features +The following features that were not supported in 2.1 are now available: +partial state saving, JSF 2 resource request, handling, and JSF 2 Ajax requests. +At this point support for JSF 2 is considered +comprehensive although not covering every JSF 2 feature -- +excluded are mostly features that overlap with the core value Web Flow provides +such as those relating to navigation and state management. + +See [Section 13.3, “Configuring Web Flow for use with JSF”](spring-faces.html#spring-faces-webflow-config) for important configuration changes. +Note that partial state saving is only supported with Sun Mojarra 2.0.3 or later. +It is not yet supported with Apache MyFaces. This is due to the +fact MyFaces was not as easy to customize with regards to how component state is stored. +We will work with Apache MyFaces to provide this support. In the mean time you will need to use +the `javax.faces.PARTIAL_STATE_SAVING` context parameter in `web.xml`to disable partial state saving with Apache MyFaces. + +#### Travel Sample With the PrimeFaces Components + +The main Spring Travel sample demonstrating Spring Web Flow and JSF support +is now built on JSF 2 and components from the PrimeFaces component library. +Please check out the booking-faces sample in the distribution. + +Additional samples can be found at the Spring Web Flow - Prime Faces[Showcase](https://src.springframework.org/svn/spring-samples/webflow-primefaces-showcase), an SVN repository within the[spring-samples](https://src.springframework.org/svn/spring-samples)repository. Use these commands to check out and build: + +``` +svn co https://src.springframework.org/svn/spring-samples/webflow-primefaces-showcase + cd webflow-primefaces-showcase + mvn package + +``` + +### 2.4.2. Spring Security Facelets Tag Library + +A new Spring Security tag library is available for use with with JSF 2.0 or with JSF 1.2 Facelets views. +It provides an \ tag as well as several EL functions. +See [Section 13.9, “Using the Spring Security Facelets Tag Library”](spring-faces.html#spring-faces-security-taglib) for more details. + +### 2.4.3. Spring JavaScript Updates + +#### Deprecated ResourcesServlet + +Starting with Spring 3.0.4, the Spring Framework includes +a replacement for the ResourcesServlet. Please see +the Spring Framework documentation for details on the custom mvc namespace, +specifically the new["resources"](http://static.springsource.org/spring/docs/3.0.x/spring-framework-reference/html/mvc.html#mvc-static-resources)element. + +#### Dojo 1.5 and dojox + +The bundled custom Dojo build is upgraded to version 1.5. It now includes dojox. + +Note that applications are generally encouraged to prepare their own custom +Dojo build for optimized performance depending on what parts of Dojo are +commonly used together. For examples see the[scripts](https://src.springframework.org/svn/spring-webflow/branches/spring-webflow-2.2-maintenance/spring-js-resources/scripts/dojo)used by Spring Web Flow to prepare its own custom Dojo build. + +#### Two Spring JS artifacts + +The `spring-js` artifact has been split in two -- the new artifact +(`spring-js-resources`) contains client side resource (.js, .css, etc.) while +the existing artifact (`spring-js`) contains server-side Java code only. + +Applications preparing their own custom Dojo build have an option now to +avoid including `spring-js-resources` and put `Spring.js` and`Spring-Dojo.js` directly under the root of their web application. + +#### Client resources moved into META-INF/web-resources + +Bundled client resources (.js, .css, etc.) +have been moved to `META-INF/web-resources` from their previous location +under `META-INF`. This change is transparent for applications but will result +in simpler and safer configuration when using the new resource handling +mechanism available in Spring 3.0.4. + +### 2.4.4. JSF Portlet Support + +#### Portlet API 2.0 and JSF 1.2 support + +In previous versions of Spring Web Flow support for JSF Portlets relied on +a Portlet Bridge for JSF implementation and was considered experimental. +Spring Web Flow 2.2 adds support for JSF Portlets based on its own internal +Portlet integration targeting Portlet API 2.0 and JSF 1.2 environments. +See [???]() for more details. +The Spring Web Flow Travel JSF Portlets sample has been successfully +tested on the Apache Pluto portal container. \ No newline at end of file diff --git a/docs/en/spring-web-services/READEME.md b/docs/en/spring-web-services/READEME.md new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/docs/en/spring-web-services/spring-web-service.md b/docs/en/spring-web-services/spring-web-service.md new file mode 100644 index 0000000000000000000000000000000000000000..977b1794e5c0af2624a990ee6761862cc7e406e3 --- /dev/null +++ b/docs/en/spring-web-services/spring-web-service.md @@ -0,0 +1,3476 @@ +# Spring Web Services Reference Documentation + +## Preface + +In the current age of Service Oriented Architectures, more and more people use web services to connect previously unconnected systems. Initially, web services were considered to be just another way to do a Remote Procedure Call (RPC). Over time, however, people found out that there is a big difference between RPCs and web services. Especially when interoperability with other platforms is important, it is often better to send encapsulated XML documents that contain all the data necessary to process the request. Conceptually, XML-based web services are better compared to message queues than to remoting solutions. Overall, XML should be considered the platform-neutral representation of data, the *common language* of SOA. When developing or using web services, the focus should be on this XML and not on Java. + +Spring Web Services focuses on creating these document-driven web services. Spring Web Services facilitates contract-first SOAP service development, allowing for the creation of flexible web services by using one of the many ways to manipulate XML payloads. Spring-WS provides a powerful [message dispatching framework](#server), a [WS-Security](#security) solution that integrates with your existing application security solution, and a [Client-side API](#client) that follows the familiar Spring template pattern. + +# I. Introduction + +This first part of the reference documentation [is an overview](#what-is-spring-ws) of Spring Web Services and the underlying concepts. Spring-WS is then introduced, and [the concepts](#why-contract-first) behind contract-first web service development are explained. + +## 1. What is Spring Web Services? + +### 1.1. Introduction + +Spring Web Services (Spring-WS) is a product of the Spring community and is focused on creating document-driven web services. Spring Web Services aims to facilitate contract-first SOAP service development, allowing for the creation of flexible web services by using one of the many ways to manipulate XML payloads. The product is based on Spring itself, which means you can use the Spring concepts (such as dependency injection) as an integral part of your web service. + +People use Spring-WS for many reasons, but most are drawn to it after finding alternative SOAP stacks lacking when it comes to following web service best practices. Spring-WS makes the best practice an easy practice. This includes practices such as the WS-I basic profile, contract-first development, and having a loose coupling between contract and implementation. The other key features of Spring Web Services are: + +* [Powerful mappings](#features-powerful-mappings) + +* [XML API support](#features-xml-api-support) + +* [Flexible XML Marshalling](#features-flexible-xml-marshalling) + +* [Reusing Your Spring expertise](#features-reusing-your-spring-expertise) + +* [Support for WS-Security](#features-support-for-ws-security) + +* [Integration with Spring Security](#features-integration-with-spring-security) + +* [Apache license](#features-apache-license) + +#### 1.1.1. Powerful mappings + +You can distribute incoming XML requests to any object, depending on message payload, SOAP Action header, or an XPath expression. + +#### 1.1.2. XML API support + +Incoming XML messages can be handled not only with standard JAXP APIs such as DOM, SAX, and StAX, but also with JDOM, dom4j, XOM, or even marshalling technologies. + +#### 1.1.3. Flexible XML Marshalling + +Spring Web Services builds on the Object/XML Mapping module in the Spring Framework, which supports JAXB 1 and 2, Castor, XMLBeans, JiBX, and XStream. + +#### 1.1.4. Reusing Your Spring expertise + +Spring-WS uses Spring application contexts for all configuration, which should help Spring developers get up-to-speed quickly. Also, the architecture of Spring-WS resembles that of Spring-MVC. + +#### 1.1.5. Support for WS-Security + +WS-Security lets you sign SOAP messages, encrypt and decrypt them, or authenticate against them. + +#### 1.1.6. Integration with Spring Security + +The WS-Security implementation of Spring Web Services provides integration with Spring Security. This means you can use your existing Spring Security configuration for your SOAP service as well. + +#### 1.1.7. Apache license + +You can confidently use Spring-WS in your project. + +### 1.2. Runtime environment + +Spring Web Services requires a standard Java 8 Runtime Environment. Spring-WS is built on Spring Framework 4.0.9, but higher versions are supported. + +Spring-WS consists of a number of modules, which are described in the remainder of this section. + +* The XML module (`spring-xml.jar`) contains various XML support classes for Spring Web Services. This module is mainly intended for the Spring-WS framework itself and not web service developers. + +* The Core module (`spring-ws-core.jar`) is the central part of the Spring’s web services functionality. It provides the central [`WebServiceMessage`](#web-service-messages) and [`SoapMessage`](#soap-message) interfaces, the [server-side](#server) framework (with powerful message dispatching), the various support classes for implementing web service endpoints, and the [client-side](#client) `WebServiceTemplate`. + +* The Support module (`spring-ws-support.jar`) contains additional transports (JMS, Email, and others). + +* The [Security](#security) package (`spring-ws-security.jar`) provides a WS-Security implementation that integrates with the core web service package. It lets you sign, decrypt and encrypt, and add principal tokens to SOAP messages. Additionally, it lets you use your existing Spring Security security implementation for authentication and authorization. + +The following figure shows and the dependencies between the Spring-WS modules. Arrows indicate dependencies (that is, Spring-WS Core depends on Spring-XML and the OXM module found in Spring 3 and higher). + +![spring deps](spring-deps.png) + +### 1.3. Supported standards + +Spring Web Services supports the following standards: + +* SOAP 1.1 and 1.2 + +* WSDL 1.1 and 2.0 (XSD-based generation is supported only for WSDL 1.1) + +* WS-I Basic Profile 1.0, 1.1, 1.2, and 2.0 + +* WS-Addressing 1.0 and the August 2004 draft + +* SOAP Message Security 1.1, Username Token Profile 1.1, X.509 Certificate Token Profile 1.1, SAML Token Profile 1.1, Kerberos Token Profile 1.1, Basic Security Profile 1.1 + +## 2. Why Contract First? + +When creating web services, there are two development styles: contract-last and contract-first. When you use a contract-last approach, you start with the Java code and let the web service contract (in WSDL — see sidebar) be generated from that. When using contract-first, you start with the WSDL contract and use Java to implement the contract. + +**What is WSDL?** + +WSDL stands for Web Service Description Language. A WSDL file is an XML document that describes a web service. It specifies the location of the service and the operations (or methods) the service exposes. For more information about WSDL, see the [WSDL specification](https://www.w3.org/TR/wsdl). + +Spring-WS supports only the contract-first development style, and this section explains why. + +### 2.1. Object/XML Impedance Mismatch + +Similar to the field of ORM, where we have an [Object/Relational impedance mismatch](https://en.wikipedia.org/wiki/Object-Relational_impedance_mismatch), converting Java objects to XML has a similar problem. At first glance, the O/X mapping problem appears simple: Create an XML element for each Java object to convert all Java properties and fields to sub-elements or attributes. However, things are not as simple as they appear, because there is a fundamental difference between hierarchical languages, such as XML (and especially XSD), and the graph model of Java. + +| |Most of the contents in this section were inspired by [[alpine]](#alpine) and [[effective-enterprise-java]](#effective-enterprise-java).| +|---|----------------------------------------------------------------------------------------------------------------------------------------| + +#### 2.1.1. XSD Extensions + +In Java, the only way to change the behavior of a class is to subclass it to add the new behavior to that subclass. In XSD, you can extend a data type by restricting it — that is, constraining the valid values for the elements and attributes. For instance, consider the following example: + +``` + + + + + +``` + +This type restricts a XSD string by way of a regular expression, allowing only three upper case letters. If this type is converted to Java, we end up with an ordinary `java.lang.String`. The regular expression is lost in the conversion process, because Java does not allow for these sorts of extensions. + +#### 2.1.2. Unportable Types + +One of the most important goals of a web service is to be interoperable: to support multiple platforms such as Java, .NET, Python, and others. Because all of these languages have different class libraries, you must use some common, cross-language format to communicate between them. That format is XML, which is supported by all of these languages. + +Because of this conversion, you must make sure that you use portable types in your service implementation. Consider, for example, a service that returns a `java.util.TreeMap`: + +``` +public Map getFlights() { + // use a tree map, to make sure it's sorted + TreeMap map = new TreeMap(); + map.put("KL1117", "Stockholm"); + ... + return map; +} +``` + +Undoubtedly, the contents of this map can be converted into some sort of XML, but since there is no standard way to describe a map in XML, it will be proprietary. Also, even if it can be converted to XML, many platforms do not have a data structure similar to the `TreeMap`. So when a .NET client accesses your web service, it probably ends up with a `System.Collections.Hashtable`, which has different semantics. + +This problem is also present when working on the client side. Consider the following XSD snippet, which describes a service contract: + +``` + + + + + + + + + +``` + +This contract defines a request that takes an `date`, which is a XSD datatype representing a year, month, and day. If we call this service from Java, we probably use either a `java.util.Date` or `java.util.Calendar`. However, both of these classes actually describe times, rather than dates. So, we actually end up sending data that represents the fourth of April 2007 at midnight (`2007-04-04T00:00:00`), which is not the same as `2007-04-04`. + +#### 2.1.3. Cyclic Graphs + +Imagine we have the following class structure: + +``` +public class Flight { + private String number; + private List passengers; + + // getters and setters omitted +} + +public class Passenger { + private String name; + private Flight flight; + + // getters and setters omitted +} +``` + +This is a cyclic graph: the `Flight` refers to the `Passenger`, which refers to the `Flight` again. Cyclic graphs like these are quite common in Java. If we take a naive approach to converting this to XML, we end up with something like: + +``` + + + + Arjen Poutsma + + + + Arjen Poutsma + + + + Arjen Poutsma + ... +``` + +Processing such a structure is likely to take a long time to finish, because there is no stop condition for this loop. + +One way to solve this problem is to use references to objects that were already marshalled: + +``` + + + + Arjen Poutsma + + + ... + + +``` + +This solves the recursion problem but introduces new ones. For one, you cannot use an XML validator to validate this structure. Another issue is that the standard way to use these references in SOAP (RPC/encoded) has been deprecated in favor of document/literal (see the WS-I [Basic Profile](http://www.ws-i.org/Profiles/BasicProfile-1.1.html#SOAP_encodingStyle_Attribute)). + +These are just a few of the problems when dealing with O/X mapping. It is important to respect these issues when writing web services. The best way to respect them is to focus on the XML completely, while using Java as an implementation language. This is what contract-first is all about. + +### 2.2. Contract-first Versus Contract-last + +Besides the Object/XML Mapping issues mentioned in the previous section, there are other reasons for preferring a contract-first development style. + +* [Fragility](#contract-first-fragility) + +* [Performance](#contract-first-performance) + +* [Reusability](#contract-first-reusability) + +* [Versioning](#contract-first-versioning) + +#### 2.2.1. Fragility + +As mentioned earlier, the contract-last development style results in your web service contract (WSDL and your XSD) being generated from your Java contract (usually an interface). If you use this approach, you have no guarantee that the contract stays constant over time. Each time you change your Java contract and redeploy it, there might be subsequent changes to the web service contract. + +Additionally, not all SOAP stacks generate the same web service contract from a Java contract. This means that changing your current SOAP stack for a different one (for whatever reason) might also change your web service contract. + +When a web service contract changes, users of the contract have to be instructed to obtain the new contract and potentially change their code to accommodate for any changes in the contract. + +For a contract to be useful, it must remain constant for as long as possible. If a contract changes, you have to contact all the users of your service and instruct them to get the new version of the contract. + +#### 2.2.2. Performance + +When a Java object is automatically transformed into XML, there is no way to be sure as to what is sent across the wire. An object might reference another object, which refers to another, and so on. In the end, half of the objects on the heap in your virtual machine might be converted into XML, which results in slow response times. + +When using contract-first, you explicitly describe what XML is sent where, thus making sure that it is exactly what you want. + +#### 2.2.3. Reusability + +Defining your schema in a separate file lets you reuse that file in different scenarios. Consider the definition of an `AirportCode` in a file called `airline.xsd`: + +``` + + + + + +``` + +You can reuse this definition in other schemas, or even WSDL files, by using an `import` statement. + +#### 2.2.4. Versioning + +Even though a contract must remain constant for as long as possible, they do need to be changed sometimes. In Java, this typically results in a new Java interface, such as `AirlineService2`, and a (new) implementation of that interface. Of course, the old service must be kept around, because there might be clients who have not yet migrated. + +If using contract-first, we can have a looser coupling between contract and implementation. Such a looser coupling lets us implement both versions of the contract in one class. We could, for instance, use an XSLT stylesheet to convert any “old-style” messages to the “new-style” messages. + +## 3. Writing Contract-First Web Services + +This tutorial shows you how to write [contract-first web services](#why-contract-first) — that is, how to develop web services that start with the XML Schema or WSDL contract first followed by the Java code second. Spring-WS focuses on this development style, and this tutorial should help you get started. Note that the first part of this tutorial contains almost no Spring-WS specific information. It is mostly about XML, XSD, and WSDL. The [second part](#tutorial-creating-project) focuses on implementing this contract with Spring-WS . + +The most important thing when doing contract-first web service development is tothink in terms of XML. This means that Java language concepts are of lesser importance. It is the XML that is sent across the wire, and you should focus on that. Java being used to implement the web service is an implementation detail. + +In this tutorial, we define a web service that is created by a Human Resources department. Clients can send holiday request forms to this service to book a holiday. + +### 3.1. Messages + +In this section, we focus on the actual XML messages that are sent to and from the web service. We start out by determining what these messages look like. + +#### 3.1.1. Holiday + +In the scenario, we have to deal with holiday requests, so it makes sense to determine what a holiday looks like in XML: + +``` + + 2006-07-03 + 2006-07-07 + +``` + +A holiday consists of a start date and an end date. We have also decided to use the standard [ISO 8601](https://www.cl.cam.ac.uk/~mgk25/iso-time.html) date format for the dates, because that saves a lot of parsing hassle. We have also added a namespace to the element, to make sure our elements can used within other XML documents. + +#### 3.1.2. Employee + +There is also the notion of an employee in the scenario. Here is what it looks like in XML: + +``` + + 42 + Arjen + Poutsma + +``` + +We have used the same namespace as before. If this `` element could be used in other scenarios, it might make sense to use a different namespace, such as `[http://example.com/employees/schemas](http://example.com/employees/schemas)`. + +#### 3.1.3. HolidayRequest + +Both the `holiday` element and the `employee` element can be put in a ``: + +``` + + + 2006-07-03 + 2006-07-07 + + + 42 + Arjen + Poutsma + + +``` + +The order of the two elements does not matter: `` could have been the first element. What matters is that all of the data is there. In fact, the data is the only thing that is important: We take a data-driven approach. + +### 3.2. Data Contract + +Now that we have seen some examples of the XML data that we can use, it makes sense to formalize this into a schema. This data contract defines the message format we accept. There are four different ways of defining such a contract for XML: + +* DTDs + +* [XML Schema (XSD)](https://www.w3.org/XML/Schema) + +* [RELAX NG](http://www.relaxng.org/) + +* [Schematron](http://www.schematron.com/) + +DTDs have limited namespace support, so they are not suitable for web services. Relax NG and Schematron are easier than XML Schema. Unfortunately, they are not so widely supported across platforms. As a result, we use XML Schema. + +By far, the easiest way to create an XSD is to infer it from sample documents. Any good XML editor or Java IDE offers this functionality. Basically, these tools use some sample XML documents to generate a schema that validates them all. The end result certainly needs to be polished up, but it is a great starting point. + +Using the sample described earlier, we end up with the following generated schema: + +``` + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +``` + +This generated schema can be improved. The first thing to notice is that every type has a root-level element declaration. This means that the web service should be able to accept all of these elements as data. This is not desirable: We want to accept only a ``. By removing the wrapping element tags (thus keeping the types) and inlining the results, we can accomplish this, as follows: + +``` + + + + + + + + + + + + + + + + + + + + + + + +``` + +The schema still has one problem: With a schema like this, you can expect the following message to validate: + +``` + + + this is not a date + neither is this + + PlainText Section qName:lineannotation level:4, chunks:[<, !-- ... --, >] attrs:[:] + +``` + +Clearly, we must make sure that the start and end date are really dates. XML Schema has an excellent built-in `date` type that we can use. We also change the `NCName` s to `string` instances. Finally, we change the `sequence` in `` to `all`. This tells the XML parser that the order of `` and `` is not significant. Our final XSD now looks like the following listing: + +``` + + + + + (1) + (1) + + + + + + (2) + (2) + + + + + + (3) + (3) + + + +``` + +|**1**| `all` tells the XML parser that the order of `` and `` is not significant. | +|-----|-----------------------------------------------------------------------------------------------------------------| +|**2**|We use the `xs:date` data type (which consist of a year, a month, and a day) for `` and ``.| +|**3**| `xs:string` is used for the first and last names. | + +We store this file as `hr.xsd`. + +### 3.3. Service Contract + +A service contract is generally expressed as a [WSDL](https://www.w3.org/TR/wsdl) file. Note that, in Spring-WS, writing the WSDL by hand is not required. Based on the XSD and some conventions, Spring-WS can create the WSDL for you, as explained in the section entitled [Implementing the Endpoint](#tutorial-implementing-endpoint). The remainder of this section shows how to write WSDL by hand. You may want to skip to [the next section](#tutorial-creating-project). + +We start our WSDL with the standard preamble and by importing our existing XSD. To separate the schema from the definition, we use a separate namespace for the WSDL definitions: `[http://mycompany.com/hr/definitions](http://mycompany.com/hr/definitions)`. The following listing shows the preamble: + +``` + + + + + + +``` + +Next, we add our messages based on the written schema types. We only have one message, the `` we put in the schema: + +``` + + + +``` + +We add the message to a port type as an operation: + +``` + + + + + +``` + +That message finishes the abstract part of the WSDL (the interface, as it were) and leaves the concrete part. The concrete part consists of a `binding` (which tells the client how to invoke the operations you have just defined) and a `service` (which tells the client where to invoke it). + +Adding a concrete part is pretty standard. To do so, refer to the abstract part you defined previously, make sure you use `document/literal` for the `soap:binding` elements (`rpc/encoded` is deprecated), pick a `soapAction` for the operation (in this case, `[http://mycompany.com/RequestHoliday](http://mycompany.com/RequestHoliday)`, but any URI works), and determine the `location` URL where you want the request to arrive (in this case, `[http://mycompany.com/humanresources](http://mycompany.com/humanresources)`): + +``` + + + + + + + (2) + (3) + + (4) + + (2) + + + (4)(5) + (7) + + (8) + + (6) + + + + + (5) + (9) + + + +``` + +|**1**| We import the schema defined in [Data Contract](#tutorial.xsd). | +|-----|---------------------------------------------------------------------------------------------------------------------------------------------| +|**2**| We define the `HolidayRequest` message, which gets used in the `portType`. | +|**3**| The `HolidayRequest` type is defined in the schema. | +|**4**| We define the `HumanResource` port type, which gets used in the `binding`. | +|**5**| We define the `HumanResourceBinding` binding, which gets used in the `port`. | +|**6**| We use a document/literal style. | +|**7**| The literal `[http://schemas.xmlsoap.org/soap/http](http://schemas.xmlsoap.org/soap/http)` signifies a HTTP transport. | +|**8**| The `soapAction` attribute signifies the `SOAPAction` HTTP header that will be sent with every request. | +|**9**|The `[http://localhost:8080/holidayService/](http://localhost:8080/holidayService/)` address is the URL where the web service can be invoked.| + +The preceding listing shows the final WSDL. We describe how to implement the resulting schema and WSDL in the next section. + +### 3.4. Creating the project + +In this section, we use [Maven](https://maven.apache.org/) to create the initial project structure for us. Doing so is not required but greatly reduces the amount of code we have to write to setup our HolidayService. + +The following command creates a Maven web application project for us by using the Spring-WS archetype (that is, project template): + +``` +mvn archetype:create -DarchetypeGroupId=org.springframework.ws \ + -DarchetypeArtifactId=spring-ws-archetype \ + -DarchetypeVersion= \ + -DgroupId=com.mycompany.hr \ + -DartifactId=holidayService +``` + +The preceding command creates a new directory called `holidayService`. In this directory is a `src/main/webapp` directory, which contains the root of the WAR file. You can find the standard web application deployment descriptor (`'WEB-INF/web.xml'`) here, which defines a Spring-WS `MessageDispatcherServlet` and maps all incoming requests to this servlet: + +``` + + + MyCompany HR Holiday Service + + + + spring-ws + org.springframework.ws.transport.http.MessageDispatcherServlet + + + + spring-ws + /* + + + +``` + +In addition to the preceding `WEB-INF/web.xml` file, you also need another, Spring-WS-specific, configuration file, named `WEB-INF/spring-ws-servlet.xml`. This file contains all of the Spring-WS-specific beans, such as `EndPoints` and `WebServiceMessageReceivers` and is used to create a new Spring container. The name of this file is derived from the name of the attendant servlet (in this case `'spring-ws'`) with `-servlet.xml` appended to it. So if you define a `MessageDispatcherServlet` with the name `'dynamite'`, the name of the Spring-WS-specific configuration file becomes `WEB-INF/dynamite-servlet.xml`. + +(You can see the contents of the `WEB-INF/spring-ws-servlet.xml` file for this example in [[tutorial.example.sws-conf-file]](#tutorial.example.sws-conf-file).) + +Once you had the project structure created, you can put the schema and the WSDL from the previous section into `'WEB-INF/'` folder. + +### 3.5. Implementing the Endpoint + +In Spring-WS, you implement endpoints to handle incoming XML messages. An endpoint is typically created by annotating a class with the `@Endpoint` annotation. In this endpoint class, you can create one or more methods that handle incoming request. The method signatures can be quite flexible. You can include almost any sort of parameter type related to the incoming XML message, as we explain later in this chapter. + +#### 3.5.1. Handling the XML Message + +In this sample application, we use [JDom 2](http://www.jdom.org/) to handle the XML message. We also use [XPath](https://www.w3.org/TR/xpath20/), because it lets us select particular parts of the XML JDOM tree without requiring strict schema conformance. + +The following listing shows the class that defines our holiday endpoint: + +``` +package com.mycompany.hr.ws; + +import java.text.ParseException; +import java.text.SimpleDateFormat; +import java.util.Arrays; +import java.util.Date; + +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.ws.server.endpoint.annotation.Endpoint; +import org.springframework.ws.server.endpoint.annotation.PayloadRoot; +import org.springframework.ws.server.endpoint.annotation.RequestPayload; + +import com.mycompany.hr.service.HumanResourceService; +import org.jdom2.Element; +import org.jdom2.JDOMException; +import org.jdom2.Namespace; +import org.jdom2.filter.Filters; +import org.jdom2.xpath.XPathExpression; +import org.jdom2.xpath.XPathFactory; + +@Endpoint (1) +public class HolidayEndpoint { + + private static final String NAMESPACE_URI = "http://mycompany.com/hr/schemas"; + + private XPathExpression startDateExpression; + + private XPathExpression endDateExpression; + + private XPathExpression firstNameExpression; + + private XPathExpression lastNameExpression; + + private HumanResourceService humanResourceService; + + @Autowired (2) + public HolidayEndpoint(HumanResourceService humanResourceService) throws JDOMException { + this.humanResourceService = humanResourceService; + + Namespace namespace = Namespace.getNamespace("hr", NAMESPACE_URI); + XPathFactory xPathFactory = XPathFactory.instance(); + startDateExpression = xPathFactory.compile("//hr:StartDate", Filters.element(), null, namespace); + endDateExpression = xPathFactory.compile("//hr:EndDate", Filters.element(), null, namespace); + firstNameExpression = xPathFactory.compile("//hr:FirstName", Filters.element(), null, namespace); + lastNameExpression = xPathFactory.compile("//hr:LastName", Filters.element(), null, namespace); + } + + @PayloadRoot(namespace = NAMESPACE_URI, localPart = "HolidayRequest") (3) + public void handleHolidayRequest(@RequestPayload Element holidayRequest) throws Exception {(4) + Date startDate = parseDate(startDateExpression, holidayRequest); + Date endDate = parseDate(endDateExpression, holidayRequest); + String name = firstNameExpression.evaluateFirst(holidayRequest).getText() + " " + lastNameExpression.evaluateFirst(holidayRequest).getText(); + + humanResourceService.bookHoliday(startDate, endDate, name); + } + + private Date parseDate(XPathExpression expression, Element element) throws ParseException { + Element result = expression.evaluateFirst(element); + if (result != null) { + SimpleDateFormat dateFormat = new SimpleDateFormat("yyyy-MM-dd"); + return dateFormat.parse(result.getText()); + } else { + throw new IllegalArgumentException("Could not evaluate [" + expression + "] on [" + element + "]"); + } + } + +} +``` + +|**1**| The `HolidayEndpoint` is annotated with `@Endpoint`. This marks the class as a special sort of `@Component`, suitable for handling XML messages in Spring-WS, and also makes it eligible for suitable for component scanning. | +|-----|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +|**2**| The `HolidayEndpoint` requires the `HumanResourceService` business service to operate, so we inject the dependency in the constructor and annotate it with `@Autowired`.
    Next, we set up XPath expressions by using the JDOM2 API. There are four expressions: `//hr:StartDate` for extracting the `` text value, `//hr:EndDate` for extracting the end date, and two for extracting the names of the employee. | +|**3**| The `@PayloadRoot` annotation tells Spring-WS that the `handleHolidayRequest` method is suitable for handling XML messages. The sort of message that this method can handle is indicated by the annotation values. In this case, it can
    handle XML elements that have the `HolidayRequest` local part and the `[http://mycompany.com/hr/schemas](http://mycompany.com/hr/schemas)` namespace.
    More information about mapping messages to endpoints is provided in the next section. | +|**4**|The `handleHolidayRequest(..)` method is the main handling method, which gets passed the ``element from the incoming XML message. The `@RequestPayload` annotation indicates that the `holidayRequest` parameter should be mapped to the payload of the
    request message. We use the XPath expressions to extract the string values from the XML messages and convert these values to `Date` objects by using a`SimpleDateFormat` (the `parseData` method). With these values, we invoke a method on the business service.
    Typically, this results in a database transaction being started and some records being altered in the database.
    Finally, we define a `void` return type, which indicates to Spring-WS that we do not want to send a response message.
    If we want a response message, we could return a JDOM Element to represent the payload of the response message.| + +Using JDOM is just one of the options to handle the XML. Other options include DOM, dom4j, XOM, SAX, and StAX, but also marshalling techniques like JAXB, Castor, XMLBeans, JiBX, and XStream, as explained in [the next chapter](#common). We chose JDOM because it gives us access to the raw XML and because it is based on classes (not interfaces and factory methods as with W3C DOM and dom4j), which makes the code less verbose. We use XPath because it is less fragile than marshalling technologies. We do not need strict schema conformance as long as we can find the dates and the name. + +Because we use JDOM, we must add some dependencies to the Maven `pom.xml`, which is in the root of our project directory. Here is the relevant section of the POM: + +``` + + + org.springframework.ws + spring-ws-core + + + + jdom + jdom + 2.0.1 + + + jaxen + jaxen + 1.1 + + +``` + +Here is how we would configure these classes in our `spring-ws-servlet.xml` Spring XML configuration file by using component scanning. We also instruct Spring-WS to use annotation-driven endpoints, with the `` element. + +``` + + + + + + + +``` + +#### 3.5.2. Routing the Message to the Endpoint + +As part of writing the endpoint, we also used the `@PayloadRoot` annotation to indicate which sort of messages can be handled by the `handleHolidayRequest` method. In Spring-WS, this process is the responsibility of an `EndpointMapping`. Here, we route messages based on their content by using a `PayloadRootAnnotationMethodEndpointMapping`. The following listing shows the annotation we used earlier: + +``` +@PayloadRoot(namespace = "http://mycompany.com/hr/schemas", localPart = "HolidayRequest") +``` + +The annotation shown in the preceding example basically means that whenever an XML message is received with the namespace `[http://mycompany.com/hr/schemas](http://mycompany.com/hr/schemas)` and the `HolidayRequest` local name, it is routed to the `handleHolidayRequest` method. By using the `` element in our configuration, we enable the detection of the `@PayloadRoot` annotations. It is possible (and quite common) to have multiple, related handling methods in an endpoint, each of them handling different XML messages. + +There are also other ways to map endpoints to XML messages, which is described in [the next chapter](#common). + +#### 3.5.3. Providing the Service and Stub implementation + +Now that we have the endpoint, we need `HumanResourceService` and its implementation for use by `HolidayEndpoint`. The following listing shows the `HumanResourceService` interface: + +``` +package com.mycompany.hr.service; + +import java.util.Date; + +public interface HumanResourceService { + void bookHoliday(Date startDate, Date endDate, String name); +} +``` + +For tutorial purposes, we use a simple stub implementation of the `HumanResourceService`: + +``` +package com.mycompany.hr.service; + +import java.util.Date; + +import org.springframework.stereotype.Service; + +@Service (1) +public class StubHumanResourceService implements HumanResourceService { + public void bookHoliday(Date startDate, Date endDate, String name) { + System.out.println("Booking holiday for [" + startDate + "-" + endDate + "] for [" + name + "] "); + } +} +``` + +|**1**|The `StubHumanResourceService` is annotated with `@Service`. This marks the class as a business facade, which makes this a candidate for injection by `@Autowired` in `HolidayEndpoint`.| +|-----|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +### 3.6. Publishing the WSDL + +Finally, we need to publish the WSDL. As stated in [Service Contract](#tutorial-service-contract), we do not need to write a WSDL ourselves. Spring-WS can generate one based on some conventions. Here is how we define the generation: + +``` + (5) + (2) + +``` + +|**1**| The `id` determines the URL where the WSDL can be retrieved. In this case, the `id` is `holiday`, which means that the WSDL can be retrieved
    as `holiday.wsdl` in the servlet context. The full URL is `[http://localhost:8080/holidayService/holiday.wsdl](http://localhost:8080/holidayService/holiday.wsdl)`. | +|-----|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +|**2**| Next, we set the WSDL port type to be `HumanResource`. | +|**3**|We set the location where the service can be reached: `/holidayService/`. We use a relative URI, and we instruct the framework to transform it
    dynamically to an absolute URI. Hence, if the service is deployed to different contexts, we do not have to change the URI manually.
    For more information, see [the section called “Automatic WSDL exposure”](#server-automatic-wsdl-exposure). For the location transformation to work, we need to add an init parameter to `spring-ws`servlet in `web.xml` (shown in the next listing).| +|**4**| We define the target namespace for the WSDL definition itself. Setting this attribute is not required. If not set, the WSDL has the same namespace as the XSD schema. | +|**5**| The `xsd` element refers to the human resource schema we defined in [Data Contract](#tutorial.xsd). We placed the schema in the `WEB-INF` directory of the application. | + +The following listing shows how to add the init parameter: + +``` + + transformWsdlLocations + true + +``` + +You can create a WAR file by using `mvn install`. If you deploy the application (to Tomcat, Jetty, and so on) and point your browser at [this location](http://localhost:8080/holidayService/holiday.wsdl), you see the generated WSDL. This WSDL is ready to be used by clients, such as [soapUI](http://www.soapui.org/) or other SOAP frameworks. + +That concludes this tutorial. The tutorial code can be found in the full distribution of Spring-WS. If you wish to continue, look at the echo sample application that is part of the distribution. After that, look at the airline sample, which is a bit more complicated, because it uses JAXB, WS-Security, Hibernate, and a transactional service layer. Finally, you can read the rest of the reference documentation. + +# II. Reference + +This part of the reference documentation details the various components that comprise Spring Web Services. This includes [a chapter](#common) that discusses the parts common to both client- and server-side WS, a chapter devoted to the specifics of [writing server-side web services](#server), a chapter about using web services on [the client-side](#client), and a chapter on using [WS-Security](#security). + +## 4. Shared components + +This chapter explores the components that are shared between client- and server-side Spring-WS development. These interfaces and classes represent the building blocks of Spring-WS, so you need to understand what they do, even if you do not use them directly. + +### 4.1. Web Service Messages + +This section describes the messages and message factories that Spring-WS uses. + +#### 4.1.1. `WebServiceMessage` + +One of the core interfaces of Spring Web Services is the `WebServiceMessage`. This interface represents a protocol-agnostic XML message. The interface contains methods that provide access to the payload of the message, in the form of a `javax.xml.transform.Source` or a `javax.xml.transform.Result`. `Source` and `Result` are tagging interfaces that represent an abstraction over XML input and output. Concrete implementations wrap various XML representations, as indicated in the following table: + +| Source or Result implementation | Wrapped XML representation | +|-----------------------------------------|-----------------------------------------------------------| +| `javax.xml.transform.dom.DOMSource` | `org.w3c.dom.Node` | +| `javax.xml.transform.dom.DOMResult` | `org.w3c.dom.Node` | +| `javax.xml.transform.sax.SAXSource` | `org.xml.sax.InputSource` and `org.xml.sax.XMLReader` | +| `javax.xml.transform.sax.SAXResult` | `org.xml.sax.ContentHandler` | +|`javax.xml.transform.stream.StreamSource`|`java.io.File`, `java.io.InputStream`, or `java.io.Reader` | +|`javax.xml.transform.stream.StreamResult`|`java.io.File`, `java.io.OutputStream`, or `java.io.Writer`| + +In addition to reading from and writing to the payload, a web service message can write itself to an output stream. + +#### 4.1.2. `SoapMessage` + +`SoapMessage` is a subclass of `WebServiceMessage`. It contains SOAP-specific methods, such as getting SOAP Headers, SOAP Faults, and so on. Generally, your code should not be dependent on `SoapMessage`, because the content of the SOAP Body (the payload of the message) can be obtained by using `getPayloadSource()` and `getPayloadResult()` in the `WebServiceMessage`. Only when it is necessary to perform SOAP-specific actions (such as adding a header, getting an attachment, and so on) should you need to cast `WebServiceMessage` to `SoapMessage`. + +#### 4.1.3. Message Factories + +Concrete message implementations are created by a `WebServiceMessageFactory`. This factory can create an empty message or read a message from an input stream. There are two concrete implementations of `WebServiceMessageFactory`. One is based on SAAJ, the SOAP with Attachments API for Java. The other is based on Axis 2’s AXIOM (AXis Object Model). + +##### `SaajSoapMessageFactory` + +The `SaajSoapMessageFactory` uses the SOAP with Attachments API for Java (SAAJ) to create `SoapMessage` implementations. SAAJ is part of J2EE 1.4, so it should be supported under most modern application servers. Here is an overview of the SAAJ versions supplied by common application servers: + +| Application Server | SAAJ Version | +|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|-------------------| +| BEA WebLogic 8 | 1.1 | +| BEA WebLogic 9 |1.1/1.21| +| IBM WebSphere 6 | 1.2 | +| SUN Glassfish 1 | 1.3 | +|1Weblogic 9 has a known bug in the SAAJ 1.2 implementation: it implements all the 1.2 interfaces but throws an `UnsupportedOperationException` when called. Spring Web Services has a workaround: It uses SAAJ 1.1 when operating on WebLogic 9.| | + +Additionally, Java SE 6 includes SAAJ 1.3. You can wire up a `SaajSoapMessageFactory` as follows: + +``` + +``` + +| |SAAJ is based on DOM, the Document Object Model. This means that all SOAP messages are stored in memory. For larger SOAP messages, this may not be performant. In that case, the `AxiomSoapMessageFactory` might be more applicable.| +|---|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +##### `AxiomSoapMessageFactory` + +The `AxiomSoapMessageFactory` uses the AXis 2 Object Model (AXIOM) to create `SoapMessage` implementations. AXIOM is based on StAX, the Streaming API for XML. StAX provides a pull-based mechanism for reading XML messages, which can be more efficient for larger messages. + +To increase reading performance on the `AxiomSoapMessageFactory`, you can set the `payloadCaching` property to false (default is true). Doing so causesthe contents of the SOAP body to be read directly from the socket stream. When this setting is enabled, the payload can be read only once. This means that you have to make sure that any pre-processing (logging or other work) of the message does not consume it. + +You can use the `AxiomSoapMessageFactory` as follows: + +``` + + + +``` + +In addition to payload caching, AXIOM supports full streaming messages, as defined in the `StreamingWebServiceMessage`. This means that you can directly set the payload on the response message, rather than writing it to a DOM tree or buffer. + +Full streaming for AXIOM is used when a handler method returns a JAXB2-supported object. It automatically sets this marshalled object into the response message and writes it out to the outgoing socket stream when the response is going out. + +For more information about full streaming, see the class-level Javadoc for `StreamingWebServiceMessage` and `StreamingPayload`. + +##### SOAP 1.1 or 1.2 + +Both the `SaajSoapMessageFactory` and the `AxiomSoapMessageFactory` have a `soapVersion` property, where you can inject a `SoapVersion` constant. By default, the version is 1.1, but you can set it to 1.2: + +``` + + + + + + + + + +``` + +In the preceding example, we define a `SaajSoapMessageFactory` that accepts only SOAP 1.2 messages. + +| |Even though both versions of SOAP are quite similar in format, the 1.2 version is not backwards compatible with 1.1, because it uses a different XML namespace. Other major differences between SOAP 1.1 and 1.2 include the different structure of a fault and the fact that `SOAPAction` HTTP headers are effectively deprecated, though they still work.

    One important thing to note with SOAP version numbers (or WS-\* specification version numbers in general) is that the latest version of a specification is generally not the most popular version. For SOAP, this means that (currently) the best version to use is 1.1. Version 1.2 might become more popular in the future, but 1.1 is currently the safest bet.| +|---|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +#### 4.1.4. `MessageContext` + +Typically, messages come in pairs: a request and a response. A request is created on the client-side, which is sent over some transport to the server-side, where a response is generated. This response gets sent back to the client, where it is read. + +In Spring Web Services, such a conversation is contained in a `MessageContext`, which has properties to get request and response messages. On the client-side, the message context is created by the [`WebServiceTemplate`](#client-web-service-template). On the server-side, the message context is read from the transport-specific input stream. For example, in HTTP, it is read from the `HttpServletRequest`, and the response is written back to the `HttpServletResponse`. + +### 4.2. `TransportContext` + +One of the key properties of the SOAP protocol is that it tries to be transport-agnostic. This is why, for instance, Spring-WS does not support mapping messages to endpoints by HTTP request URL but rather by message content. + +However, it is sometimes necessary to get access to the underlying transport, either on the client or the server side. For this, Spring Web Services has the `TransportContext`. The transport context allows access to the underlying `WebServiceConnection`, which typically is a `HttpServletConnection` on the server side or a `HttpUrlConnection` or `CommonsHttpConnection` on the client side. For example, you can obtain the IP address of the current request in a server-side endpoint or interceptor: + +``` +TransportContext context = TransportContextHolder.getTransportContext(); +HttpServletConnection connection = (HttpServletConnection )context.getConnection(); +HttpServletRequest request = connection.getHttpServletRequest(); +String ipAddress = request.getRemoteAddr(); +``` + +### 4.3. Handling XML With XPath + +One of the best ways to handle XML is to use XPath. Quoting [[effective-xml]](#effective-xml), item 35: + +> XPath is a fourth generation declarative language that allows you to specify which nodes you want to process without specifying exactly how the processor is supposed to navigate to those nodes. XPath’s data model is very well designed to support exactly what almost all developers want from XML. For instance, it merges all adjacent text including that in CDATA sections, allows values to be calculated that skip over comments and processing instructions` and include text from child and descendant elements, and requires all external entity references to be resolved. In practice, XPath expressions tend to be much more robust against unexpected but perhaps insignificant changes in the input document. + +— Elliotte Rusty Harold + +Spring Web Services has two ways to use XPath within your application: the faster `XPathExpression` or the more flexible `XPathTemplate`. + +#### 4.3.1. `XPathExpression` + +The `XPathExpression` is an abstraction over a compiled XPath expression, such as the Java 5 `javax.xml.xpath.XPathExpression` interface or the Jaxen `XPath` class. To construct an expression in an application context, you can use `XPathExpressionFactoryBean`. The following example uses this factory bean: + +``` + + + + + + + + + + + +``` + +The preceding expression does not use namespaces, but we could set those by using the `namespaces` property of the factory bean. The expression can be used in the code as follows: + +``` +package sample; + +public class MyXPathClass { + + private final XPathExpression nameExpression; + + public MyXPathClass(XPathExpression nameExpression) { + this.nameExpression = nameExpression; + } + + public void doXPath(Document document) { + String name = nameExpression.evaluateAsString(document.getDocumentElement()); + System.out.println("Name: " + name); + } + +} +``` + +For a more flexible approach, you can use a `NodeMapper`, which is similar to the `RowMapper` in Spring’s JDBC support. The following example shows how to use it: + +``` +package sample; + +public class MyXPathClass { + + private final XPathExpression contactExpression; + + public MyXPathClass(XPathExpression contactExpression) { + this.contactExpression = contactExpression; + } + + public void doXPath(Document document) { + List contacts = contactExpression.evaluate(document, + new NodeMapper() { + public Object mapNode(Node node, int nodeNum) throws DOMException { + Element contactElement = (Element) node; + Element nameElement = (Element) contactElement.getElementsByTagName("Name").item(0); + Element phoneElement = (Element) contactElement.getElementsByTagName("Phone").item(0); + return new Contact(nameElement.getTextContent(), phoneElement.getTextContent()); + } + }); + PlainText Section qName; // do something with the list of Contact objects + } +} +``` + +Similar to mapping rows in Spring JDBC’s `RowMapper`, each result node is mapped by using an anonymous inner class. In this case, we create a `Contact` object, which we use later on. + +#### 4.3.2. `XPathTemplate` + +The `XPathExpression` lets you evaluate only a single, pre-compiled expression. A more flexible, though slower, alternative is the `XpathTemplate`. This class follows the common template pattern used throughout Spring (`JdbcTemplate`, `JmsTemplate`, and others). The following listing shows an example: + +``` +package sample; + +public class MyXPathClass { + + private XPathOperations template = new Jaxp13XPathTemplate(); + + public void doXPath(Source source) { + String name = template.evaluateAsString("/Contacts/Contact/Name", request); + // do something with name + } + +} +``` + +### 4.4. Message Logging and Tracing + +When developing or debugging a web service, it can be quite useful to look at the content of a (SOAP) message when it arrives or before it is sent. Spring Web Services offer this functionality, through the standard Commons Logging interface. + +| |Make sure to use Commons Logging version 1.1 or higher. Earlier versions have class loading issues and do not integrate with the Log4J TRACE level.| +|---|---------------------------------------------------------------------------------------------------------------------------------------------------| + +To log all server-side messages, set the `org.springframework.ws.server.MessageTracing` logger level to `DEBUG` or `TRACE`. On the `DEBUG` level, only the payload root element is logged. On the `TRACE` level, the entire message content is logged. If you want to log only sent messages, use the `org.springframework.ws.server.MessageTracing.sent` logger. Similarly, you can use `org.springframework.ws.server.MessageTracing.received` to log only received messages. + +On the client-side, similar loggers exist: `org.springframework.ws.client.MessageTracing.sent` and `org.springframework.ws.client.MessageTracing.received`. + +The following example of a `log4j.properties` configuration file logs the full content of sent messages on the client side and only the payload root element for client-side received messages. On the server-side, the payload root is logged for both sent and received messages: + +``` +log4j.rootCategory=INFO, stdout +log4j.logger.org.springframework.ws.client.MessageTracing.sent=TRACE +log4j.logger.org.springframework.ws.client.MessageTracing.received=DEBUG + +log4j.logger.org.springframework.ws.server.MessageTracing=DEBUG + +log4j.appender.stdout=org.apache.log4j.ConsoleAppender +log4j.appender.stdout.layout=org.apache.log4j.PatternLayout +log4j.appender.stdout.layout.ConversionPattern=%p [%c{3}] %m%n +``` + +With this configuration, a typical output is: + +``` +TRACE [client.MessageTracing.sent] Sent request [ +``` + +Alternatively, it can be a `@Bean` method in a `@Configuration` class: + +``` +@Bean +public SimpleWsdl11Definition orders() { + return new SimpleWsdl11Definition(new ClassPathResource("orders.wsdl")); +} +``` + +You can access the WSDL defined in the `orders.wsdl` file on the classpath through `GET` requests to a URL of the following form (substitute the host, port and servlet context path as appropriate): + +``` +http://localhost:8080/spring-ws/orders.wsdl +``` + +| |All `WsdlDefinition` bean definitions are exposed by the `MessageDispatcherServlet` under their bean name with a suffix of`.wsdl`. So, if the bean name is `echo`, the host name is `server`, and the Servlet context (war name) is `spring-ws`, the WSDL can be found at `[http://server/spring-ws/echo.wsdl](http://server/spring-ws/echo.wsdl)`.| +|---|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +Another nice feature of the `MessageDispatcherServlet` (or more correctly the `WsdlDefinitionHandlerAdapter`) is that it can transform the value of the `location` of all the WSDL that it exposes to reflect the URL of the incoming request. + +Note that this `location` transformation feature is off by default. To switch this feature on, you need to specify an initialization parameter to the `MessageDispatcherServlet`: + +``` + + + + spring-ws + org.springframework.ws.transport.http.MessageDispatcherServlet + + transformWsdlLocations + true + + + + + spring-ws + /* + + + +``` + +If you use `AbstractAnnotationConfigMessageDispatcherServletInitializer`, enabling transformation is as simple as overriding the `isTransformWsdlLocations()` method to return `true`. + +Consult the class-level Javadoc on the [`WsdlDefinitionHandlerAdapter`](https://docs.spring.io/spring-ws/docs/current/org/springframework/ws/transport/http/WsdlDefinitionHandlerAdapter.html) class to learn more about the whole transformation process. + +As an alternative to writing the WSDL by hand and exposing it with ``, Spring Web Services can also generate a WSDL from an XSD schema. This is the approach shown in [Publishing the WSDL](#tutorial-publishing-wsdl). The next application context snippet shows how to create such a dynamic WSDL file: + +``` + + + +``` + +Alternatively, you can use the Java `@Bean` method: + +``` +@Bean +public DefaultWsdl11Definition orders() { + DefaultWsdl11Definition definition = new DefaultWsdl11Definition(); + definition.setPortTypeName("Orders"); + definition.setLocationUri("http://localhost:8080/ordersService/"); + definition.setSchema(new SimpleXsdSchema(new ClassPathResource("echo.xsd"))); + + return definition; +} +``` + +The `` element depends on the `DefaultWsdl11Definition` class. This definition class uses WSDL providers in the [`org.springframework.ws.wsdl.wsdl11.provider`](https://docs.spring.io/spring-ws/sites/1.5/apidocs/org/springframework/ws/wsdl/wsdl11/provider/package-summary.html) package and the [`ProviderBasedWsdl4jDefinition`](https://docs.spring.io/spring-ws/docs/current/org/springframework/ws/wsdl/wsdl11/ProviderBasedWsdl4jDefinition.html) class to generate a WSDL the first time it is requested. See the class-level Javadoc of these classes to see how you can extend this mechanism, if necessary. + +The `DefaultWsdl11Definition` (and therefore, the `` tag) builds a WSDL from an XSD schema by using conventions. It iterates over all `element` elements found in the schema and creates a `message` for all elements. Next, it creates a WSDL `operation` for all messages that end with the defined request or response suffix. The default request suffix is `Request`. The default response suffix is `Response`, though these can be changed by setting the `requestSuffix` and `responseSuffix` attributes on ``, respectively. It also builds a `portType`, `binding`, and `service` based on the operations. + +For instance, if our `Orders.xsd` schema defines the `GetOrdersRequest` and `GetOrdersResponse` elements, `` creates a `GetOrdersRequest` and `GetOrdersResponse` message and a `GetOrders` operation, which is put in a `Orders` port type. + +To use multiple schemas, either by includes or imports, you can put Commons XMLSchema on the class path. If Commons XMLSchema is on the class path, the `` element follows all XSD imports and includes and inlines them in the WSDL as a single XSD. This greatly simplifies the deployment of the schemas, while still making it possible to edit them separately. + +| |Even though it can be handy to create the WSDL at runtime from your XSDs, there are a couple of drawbacks to this approach. First, though we try to keep the WSDL generation process consistent between releases, there is still the possibility that it changes (slightly). Second, the generation is a bit slow, though, once generated, the WSDL is cached for later reference.| +|---|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +Therefore, you should use `` only during the development stages of your project. We recommend using your browser to download the generated WSDL, store it in the project, and expose it with ``. This is the only way to be really sure that the WSDL does not change over time. + +#### 5.2.2. Wiring up Spring-WS in a `DispatcherServlet` + +As an alternative to the `MessageDispatcherServlet`, you can wire up a `MessageDispatcher` in a standard, Spring-Web MVC `DispatcherServlet`. By default, the `DispatcherServlet` can delegate only to `Controllers`, but we can instruct it to delegate to a `MessageDispatcher` by adding a `WebServiceMessageReceiverHandlerAdapter` to the servlet’s web application context: + +``` + + + + + + + + + ... + + + + +``` + +Note that, by explicitly adding the `WebServiceMessageReceiverHandlerAdapter`, the dispatcher servlet does not load the default adapters and is unable to handle standard Spring-MVC `@Controllers`. Therefore, we add the `RequestMappingHandlerAdapter` at the end. + +In a similar fashion, you can wire a `WsdlDefinitionHandlerAdapter` to make sure the `DispatcherServlet` can handle implementations of the `WsdlDefinition` interface: + +``` + + + + + + + + + + myServiceDefinition + + + + + + + + + + + + ... + + +``` + +#### 5.2.3. JMS transport + +Spring Web Services supports server-side JMS handling through the JMS functionality provided in the Spring framework. Spring Web Services provides the `WebServiceMessageListener` to plug in to a `MessageListenerContainer`. This message listener requires a `WebServiceMessageFactory` and `MessageDispatcher` to operate. The following configuration example shows this: + +``` + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +``` + +#### 5.2.4. Email Transport + +In addition to HTTP and JMS, Spring Web Services also provides server-side email handling. This functionality is provided through the `MailMessageReceiver` class. This class monitors a POP3 or IMAP folder, converts the email to a `WebServiceMessage`, and sends any response by using SMTP. You can configure the host names through the `storeUri`, which indicates the mail folder to monitor for requests (typically a POP3 or IMAP folder), and a `transportUri`, which indicates the server to use for sending responses (typically an SMTP server). + +You can configure how the `MailMessageReceiver` monitors incoming messages with a pluggable strategy: the `MonitoringStrategy`. By default, a polling strategy is used, where the incoming folder is polled for new messages every five minutes. You can change this interval by setting the `pollingInterval` property on the strategy. By default, all `MonitoringStrategy` implementations delete the handled messages. You can change this setting by setting the `deleteMessages` property. + +As an alternative to the polling approaches, which are quite inefficient, there is a monitoring strategy that uses IMAP IDLE. The IDLE command is an optional expansion of the IMAP email protocol that lets the mail server send new message updates to the `MailMessageReceiver` asynchronously. If you use an IMAP server that supports the IDLE command, you can plug the `ImapIdleMonitoringStrategy` into the `monitoringStrategy` property. In addition to a supporting server, you need to use JavaMail version 1.4.1 or higher. + +The following piece of configuration shows how to use the server-side email support, overriding the default polling interval to check every 30 seconds (30.000 milliseconds): + +``` + + + + + + + + + + + + + + + + + + + + + + + + + + + +``` + +#### 5.2.5. Embedded HTTP Server transport + +Spring Web Services provides a transport based on Sun’s JRE 1.6 [HTTP server](http://java.sun.com/javase/6/docs/jre/api/net/httpserver/spec/index.html). The embedded HTTP Server is a standalone server that is simple to configure. It offers a lighter alternative to conventional servlet containers. + +When using the embedded HTTP server, you need no external deployment descriptor (`web.xml`). You need only define an instance of the server and configure it to handle incoming requests. The remoting module in the Core Spring Framework contains a convenient factory bean for the HTTP server: the `SimpleHttpServerFactoryBean`. The most important property is `contexts`, which maps context paths to corresponding `HttpHandler` instances. + +Spring Web Services provides two implementations of the `HttpHandler` interface: [`WsdlDefinitionHttpHandler`](https://docs.spring.io/spring-ws/docs/current/api/org/springframework/ws/transport/http/WsdlDefinitionHttpHandler.html) and [`WebServiceMessageReceiverHttpHandler`](https://docs.spring.io/spring-ws/docs/current/api/org/springframework/ws/transport/http/WebServiceMessageReceiverHttpHandler.html). The former maps an incoming GET request to a `WsdlDefinition`. The latter is responsible for handling POST requests for web services messages and, thus, needs a `WebServiceMessageFactory` (typically a `SaajSoapMessageFactory`) and a `WebServiceMessageReceiver` (typically the `SoapMessageDispatcher`) to accomplish its task. + +To draw parallels with the servlet world, the `contexts` property plays the role of servlet mappings in `web.xml` and the `WebServiceMessageReceiverHttpHandler` is the equivalent of a `MessageDispatcherServlet`. + +The following snippet shows a configuration example of the HTTP server transport: + +``` + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +``` + +For more information on the `SimpleHttpServerFactoryBean`, see the [Javadoc](http://static.springframework.org/spring/docs/2.5.x/api/org/springframework/remoting/support/SimpleHttpServerFactoryBean.html). + +#### 5.2.6. XMPP transport + +Spring Web Services 2.0 introduced support for XMPP, otherwise known as Jabber. The support is based on the [Smack](https://www.igniterealtime.org/projects/smack/index.jsp) library. + +Spring Web Services support for XMPP is very similar to the other transports: There is a a `XmppMessageSender` for the `WebServiceTemplate` and a `XmppMessageReceiver` to use with the `MessageDispatcher`. + +The following example shows how to set up the server-side XMPP components: + +``` + + + + + + + + + + + + + + + + + + + + + + + + + + + +``` + +#### 5.2.7. MTOM + +[MTOM](https://en.wikipedia.org/wiki/Message_Transmission_Optimization_Mechanism) is the mechanism for sending binary data to and from Web Services. You can look at how to implement this with Spring WS through the [MTOM sample](https://github.com/spring-projects/spring-ws-samples/tree/main/mtom). + +### 5.3. Endpoints + +Endpoints are the central concept in Spring-WS’s server-side support. Endpoints provide access to the application behavior, which is typically defined by a business service interface. An endpoint interprets the XML request message and uses that input to (typically) invoke a method on the business service. The result of that service invocation is represented as a response message. Spring-WS has a wide variety of endpoints and uses various ways to handle the XML message and to create a response. + +You can create an endpoint by annotating a class with the `@Endpoint` annotation. In the class, you define one or more methods that handle the incoming XML request, by using a wide variety of parameter types (such as DOM elements, JAXB2 objects, and others). You can indicate the sort of messages a method can handle by using another annotation (typically `@PayloadRoot`). + +Consider the following sample endpoint: + +``` +package samples; + +import org.w3c.dom.Element; + +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.ws.server.endpoint.annotation.Endpoint; +import org.springframework.ws.server.endpoint.annotation.PayloadRoot; +import org.springframework.ws.soap.SoapHeader; + +@Endpoint (1) +public class AnnotationOrderEndpoint { + + private final OrderService orderService; + + @Autowired (2) + public AnnotationOrderEndpoint(OrderService orderService) { + this.orderService = orderService; + } + + @PayloadRoot(localPart = "order", namespace = "http://samples") (5) + public void order(@RequestPayload Element orderElement) { (3) + Order order = createOrder(orderElement); + orderService.createOrder(order); + } + + @PayloadRoot(localPart = "orderRequest", namespace = "http://samples") (5) + @ResponsePayload + public Order getOrder(@RequestPayload OrderRequest orderRequest, SoapHeader header) { (4) + checkSoapHeaderForSomething(header); + return orderService.getOrder(orderRequest.getId()); + } + + ... + +} +``` + +|**1**| The class is annotated with `@Endpoint`, marking it as a Spring-WS endpoint. | +|-----|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +|**2**| The constructor is marked with `@Autowired` so that the `OrderService` business service is injected into this endpoint. | +|**3**| The `order` method takes an `Element` (annotated with `@RequestPayload`) as a parameter. This means that the payload of the message is passed on this method as a DOM element. The method has a `void` return type, indicating that no response message is sent.
    For more information about endpoint methods, see [`@Endpoint` handling methods](#server-atEndpoint-methods). | +|**4**|The `getOrder` method takes an `OrderRequest` (also annotated with `@RequestPayload`) as a parameter. This parameter is a JAXB2-supported object (it is annotated with `@XmlRootElement`). This means that the payload of the message is passed to this method as a unmarshalled object. The `SoapHeader` type is also given as a parameter. On invocation, this parameter contains the SOAP header of the request message. The method is also annotated with `@ResponsePayload`, indicating that the return value (the `Order`) is used as the payload of the response message.
    For more information about endpoint methods, see [`@Endpoint` handling methods](#server-atEndpoint-methods).| +|**5**| The two handling methods of this endpoint are marked with `@PayloadRoot`, indicating what sort of request messages can be handled by the method: the `getOrder` method is invoked for requests with a `orderRequest` local name and a `[http://samples](http://samples)` namespace URI. The order method is invoked for requests with a `order` local name.
    For more information about `@PayloadRoot`, see [Endpoint mappings](#server-endpoint-mapping). | + +To enable the support for `@Endpoint` and related Spring-WS annotations, you need to add the following to your Spring application context: + +``` + + + * + + +``` + +Alternatively, if you use `@Configuration` classes instead of Spring XML, you can annotate your configuration class with `@EnableWs`: + +``` +@EnableWs +@Configuration +public class EchoConfig { + + // @Bean definitions go here + +} +``` + +To customize the `@EnableWs` configuration, you can implement `WsConfigurer` or, better yet, extend the `WsConfigurerAdapter`: + +``` +@Configuration +@EnableWs +@ComponentScan(basePackageClasses = { MyConfiguration.class }) +public class MyConfiguration extends WsConfigurerAdapter { + + @Override + public void addInterceptors(List interceptors) { + interceptors.add(new MyInterceptor()); + } + + @Override + public void addArgumentResolvers(List argumentResolvers) { + argumentResolvers.add(new MyArgumentResolver()); + } + + // More overridden methods ... +} +``` + +In the next couple of sections, a more elaborate description of the `@Endpoint` programming model is given. + +| |Endpoints, like any other Spring Bean, are scoped as a singleton by default. That is, one instance of the bean definition is created per container. Being a singleton implies that more than one thread can use it at the same time, so the endpoint has to be thread safe. If you want to use a different scope, such as prototype, see the [Spring Reference documentation](https://docs.spring.io/spring/docs/current/spring-framework-reference/core.html#beans-factory-scopes).| +|---|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +``` +Note that all abstract base classes provided in Spring-WS are thread safe, unless otherwise indicated in the class-level Javadoc. +``` + +#### 5.3.1. `@Endpoint` handling methods + +For an endpoint to actually handle incoming XML messages, it needs to have one or more handling methods. Handling methods can take wide range of parameters and return types. However, they typically have one parameter that contains the message payload, and they return the payload of the response message (if any). This section covers which parameter and return types are supported. + +To indicate what sort of messages a method can handle, the method is typically annotated with either the `@PayloadRoot` or the `@SoapAction` annotation. You can learn more about these annotations in [Endpoint mappings](#server-endpoint-mapping). + +The following example shows a handling method: + +``` +@PayloadRoot(localPart = "order", namespace = "http://samples") +public void order(@RequestPayload Element orderElement) { + Order order = createOrder(orderElement); + orderService.createOrder(order); +} +``` + +The `order` method takes an `Element` (annotated with `@RequestPayload`) as a parameter. This means that the payload of the message is passed on this method as a DOM element. The method has a `void` return type, indicating that no response message is sent. + +##### Handling Method Parameters + +The handling method typically has one or more parameters that refer to various parts of the incoming XML message. Most commonly, the handling method has a single parameter that maps to the payload of the message, but it can also map to other parts of the request message, such as a SOAP header. This section describes the parameters you can use in your handling method signatures. + +To map a parameter to the payload of the request message, you need to annotate this parameter with the `@RequestPayload` annotation. This annotation tells Spring-WS that the parameter needs to be bound to the request payload. + +The following table describes the supported parameter types. It shows the supported types, whether the parameter should be annotated with `@RequestPayload`, and any additional notes. + +| Name | Supported parameter types |`@RequestPayload` required?| Additional notes | +|---------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|---------------------------|-------------------------------------------------------------------------------------| +| TrAX | `javax.xml.transform.Source` and sub-interfaces (`DOMSource`, `SAXSource`, `StreamSource`, and `StAXSource`) | Yes | Enabled by default. | +| W3C DOM | `org.w3c.dom.Element` | Yes | Enabled by default | +| dom4j | `org.dom4j.Element` | Yes | Enabled when dom4j is on the classpath. | +| JDOM | `org.jdom.Element` | Yes | Enabled when JDOM is on the classpath. | +| XOM | `nu.xom.Element` | Yes | Enabled when XOM is on the classpath. | +| StAX | `javax.xml.stream.XMLStreamReader` and `javax.xml.stream.XMLEventReader` | Yes | Enabled when StAX is on the classpath. | +| XPath |Any boolean, double, `String`, `org.w3c.Node`, `org.w3c.dom.NodeList`, or type that can be converted from a `String` by a Spring [conversion service](https://docs.spring.io/spring/docs/current/spring-framework-reference/core.html#core-convert-ConversionService-API), and that is annotated with `@XPathParam`.| No | Enabled by default, see [the section called `XPathParam`](#server-xpath-param). | +|Message context| `org.springframework.ws.context.MessageContext` | No | Enabled by default. | +| SOAP | `org.springframework.ws.soap.SoapMessage`, `org.springframework.ws.soap.SoapBody`, `org.springframework.ws.soap.SoapEnvelope`, `org.springframework.ws.soap.SoapHeader`, and `org.springframework.ws.soap.SoapHeaderElement`s when used in combination with the `@SoapHeader` annotation. | No | Enabled by default. | +| JAXB2 | Any type that is annotated with `javax.xml.bind.annotation.XmlRootElement`, and `javax.xml.bind.JAXBElement`. | Yes | Enabled when JAXB2 is on the classpath. | +| OXM | Any type supported by a Spring OXM [`Unmarshaller`](https://docs.spring.io/spring/docs/current/spring-framework-reference/data-access.html#oxm-marshaller-unmarshaller). | Yes |Enabled when the `unmarshaller` attribute of `` is specified.| + +The next few examples show possible method signatures. The following method is invoked with the payload of the request message as a DOM `org.w3c.dom.Element`: + +``` +public void handle(@RequestPayload Element element) +``` + +The following method is invoked with the payload of the request message as a `javax.xml.transform.dom.DOMSource`. The `header` parameter is bound to the SOAP header of the request message. + +``` +public void handle(@RequestPayload DOMSource domSource, SoapHeader header) +``` + +The following method is invoked with the payload of the request message unmarshalled into a `MyJaxb2Object` (which is annotated with `@XmlRootElement`). The payload of the message is also given as a DOM `Element`. The whole [message context](#message-context) is passed on as the third parameter. + +``` +public void handle(@RequestPayload MyJaxb2Object requestObject, @RequestPayload Element element, Message messageContext) +``` + +As you can see, there are a lot of possibilities when it comes to defining how to handle method signatures. You can even extend this mechanism to support your own parameter types. See the Javadoc of [`DefaultMethodEndpointAdapter`](https://docs.spring.io/spring-ws/docs/current/api/org/springframework/ws/server/endpoint/adapter/DefaultMethodEndpointAdapter.html) and [`MethodArgumentResolver`](https://docs.spring.io/spring-ws/docs/current/api/org/springframework/ws/server/endpoint/adapter/method/MethodArgumentResolver.html) to see how. + +###### `@XPathParam` + +One parameter type needs some extra explanation: `@XPathParam`. The idea here is that you annotate one or more method parameters with an XPath expression and that each such annotated parameter is bound to the evaluation of the expression. The following example shows how to do so: + +``` +package samples; + +import javax.xml.transform.Source; + +import org.springframework.ws.server.endpoint.annotation.Endpoint; +import org.springframework.ws.server.endpoint.annotation.Namespace; +import org.springframework.ws.server.endpoint.annotation.PayloadRoot; +import org.springframework.ws.server.endpoint.annotation.XPathParam; + +@Endpoint +public class AnnotationOrderEndpoint { + + private final OrderService orderService; + + public AnnotationOrderEndpoint(OrderService orderService) { + this.orderService = orderService; + } + + @PayloadRoot(localPart = "orderRequest", namespace = "http://samples") + @Namespace(prefix = "s", uri="http://samples") + public Order getOrder(@XPathParam("/s:orderRequest/@id") int orderId) { + Order order = orderService.getOrder(orderId); + // create Source from order and return it + } + +} +``` + +Since we use the `s` prefix in our XPath expression, we must bind it to the `[http://samples](http://samples)` namespace. This is accomplished with the `@Namespace` annotation. Alternatively, we could have placed this annotation on the type-level to use the same namespace mapping for all handler methods or even the package-level (in `package-info.java`) to use it for multiple endpoints. + +By using the `@XPathParam`, you can bind to all the data types supported by XPath: + +* `boolean` or `Boolean` + +* `double` or `Double` + +* `String` + +* `Node` + +* `NodeList` + +In addition to this list, you can use any type that can be converted from a `String` by a Spring [conversion service](https://docs.spring.io/spring/docs/current/spring-framework-reference/core.html#core-convert-ConversionService-API). + +##### Handling method return types + +To send a response message, the handling needs to specify a return type. If no response message is required, the method can declare a `void` return type. Most commonly, the return type is used to create the payload of the response message. However, you can also map to other parts of the response message. This section describes the return types you can use in your handling method signatures. + +To map the return value to the payload of the response message, you need to annotate the method with the `@ResponsePayload` annotation. This annotation tells Spring-WS that the return value needs to be bound to the response payload. + +The following table describes the supported return types. It shows the supported types, whether the parameter should be annotated with `@ResponsePayload`, and any additional notes. + +| Name | Supported return types |`@ResponsePayload` required?| Additional notes | +|-----------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------|----------------------------|-----------------------------------------------------------------------------------| +|No response| `void` | No | Enabled by default. | +| TrAX | `javax.xml.transform.Source` and sub-interfaces (`DOMSource`, `SAXSource`, `StreamSource`, and `StAXSource`) | Yes | Enabled by default. | +| W3C DOM | `org.w3c.dom.Element` | Yes | Enabled by default | +| dom4j | `org.dom4j.Element` | Yes | Enabled when dom4j is on the classpath. | +| JDOM | `org.jdom.Element` | Yes | Enabled when JDOM is on the classpath. | +| XOM | `nu.xom.Element` | Yes | Enabled when XOM is on the classpath. | +| JAXB2 | Any type that is annotated with `javax.xml.bind.annotation.XmlRootElement`, and `javax.xml.bind.JAXBElement`. | Yes | Enabled when JAXB2 is on the classpath. | +| OXM |Any type supported by a Spring OXM [`Marshaller`](https://docs.spring.io/spring/docs/current/spring-framework-reference/data-access.html#oxm-marshaller-unmarshaller).| Yes |Enabled when the `marshaller` attribute of `` is specified.| + +There are a lot of possibilities when it comes to defining handling method signatures. It is even possible to extend this mechanism to support your own parameter types. See the class-level Javadoc of [`DefaultMethodEndpointAdapter`](https://docs.spring.io/spring-ws/docs/current/api/org/springframework/ws/server/endpoint/adapter/DefaultMethodEndpointAdapter.html) and [`MethodReturnValueHandler`](https://docs.spring.io/spring-ws/docs/current/api/org/springframework/ws/server/endpoint/adapter/method/MethodReturnValueHandler.html) to see how. + +### 5.4. Endpoint mappings + +The endpoint mapping is responsible for mapping incoming messages to appropriate endpoints. Some endpoint mappings are enabled by default — for example, the `PayloadRootAnnotationMethodEndpointMapping` or the `SoapActionAnnotationMethodEndpointMapping`. However, we first need to examine the general concept of an `EndpointMapping`. + +An `EndpointMapping` delivers a `EndpointInvocationChain`, which contains the endpoint that matches the incoming request and may also contain a list of endpoint interceptors that are applied to the request and response. When a request comes in, the `MessageDispatcher` hands it over to the endpoint mapping to let it inspect the request and come up with an appropriate `EndpointInvocationChain`. Then the `MessageDispatcher` invokes the endpoint and any interceptors in the chain. + +The concept of configurable endpoint mappings that can optionally contain interceptors (which can, in turn, manipulate the request, the response, or both) is extremely powerful. A lot of supporting functionality can be built into custom `EndpointMapping` implementations. For example, a custom endpoint mapping could choose an endpoint based not only on the contents of a message but also on a specific SOAP header (or, indeed, multiple SOAP headers). + +Most endpoint mappings inherit from the `AbstractEndpointMapping`, which offers an ‘interceptors’ property, which is the list of interceptors to use. `EndpointInterceptors` are discussed in [Intercepting Requests — the `EndpointInterceptor` Interface](#server-endpoint-interceptor). Additionally, there is the `defaultEndpoint`, which is the default endpoint to use when this endpoint mapping does not result in a matching endpoint. + +As explained in [Endpoints](#server-endpoints), the `@Endpoint` style lets you handle multiple requests in one endpoint class. This is the responsibility of the `MethodEndpointMapping`. This mapping determines which method is to be invoked for an incoming request message. + +There are two endpoint mappings that can direct requests to methods: the `PayloadRootAnnotationMethodEndpointMapping` and the `SoapActionAnnotationMethodEndpointMapping` You can enable both methods by using `` in your application context. + +The `PayloadRootAnnotationMethodEndpointMapping` uses the `@PayloadRoot` annotation, with the `localPart` and `namespace` elements, to mark methods with a particular qualified name. Whenever a message comes in with this qualified name for the payload root element, the method is invoked. For an example, see [above](#server-payload-root-annotation). + +Alternatively, the `SoapActionAnnotationMethodEndpointMapping` uses the `@SoapAction` annotation to mark methods with a particular SOAP Action. Whenever a message comes in with this `SOAPAction` header, the method is invoked. + +#### 5.4.1. WS-Addressing + +WS-Addressing specifies a transport-neutral routing mechanism. It is based on the `To` and `Action` SOAP headers, which indicate the destination and intent of the SOAP message, respectively. Additionally, WS-Addressing lets you define a return address (for normal messages and for faults) and a unique message identifier, which can be used for correlation. For more information on WS-Addressing, see [https://en.wikipedia.org/wiki/WS-Addressing](https://en.wikipedia.org/wiki/WS-Addressing). The following example shows a WS-Addressing message: + +``` + + + urn:uuid:21363e0d-2645-4eb7-8afd-2f5ee1bb25cf + + http://example.com/business/client1 + + http://example/com/fabrikam + http://example.com/fabrikam/mail/Delete + + + + 42 + + + +``` + +In the preceding example, the destination is set to `[http://example/com/fabrikam](http://example/com/fabrikam)`, while the action is set to `[http://example.com/fabrikam/mail/Delete](http://example.com/fabrikam/mail/Delete)`. Additionally, there is a message identifier and a reply-to address. By default, this address is the “anonymous” address, indicating that a response should be sent byusing the same channel as the request (that is, the HTTP response), but it can also be another address, as indicated in this example. + +In Spring Web Services, WS-Addressing is implemented as an endpoint mapping. By using this mapping, you associate WS-Addressing actions with endpoints, similar to the `SoapActionAnnotationMethodEndpointMapping` described earlier. + +##### Using `AnnotationActionEndpointMapping` + +The `AnnotationActionEndpointMapping` is similar to the `SoapActionAnnotationMethodEndpointMapping` but uses WS-Addressing headers instead of the SOAP Action transport header. + +To use the `AnnotationActionEndpointMapping`, annotate the handling methods with the `@Action` annotation, similar to the `@PayloadRoot` and `@SoapAction` annotations described in [`@Endpoint` handling methods](#server-atEndpoint-methods) and [Endpoint mappings](#server-endpoint-mapping). The following example shows how to do so: + +``` +package samples; + +import org.springframework.ws.server.endpoint.annotation.Endpoint; +import org.springframework.ws.soap.addressing.server.annotation.Action + +@Endpoint +public class AnnotationOrderEndpoint { + private final OrderService orderService; + + public AnnotationOrderEndpoint(OrderService orderService) { + this.orderService = orderService; + } + + @Action("http://samples/RequestOrder") + public Order getOrder(OrderRequest orderRequest) { + return orderService.getOrder(orderRequest.getId()); + } + + @Action("http://samples/CreateOrder") + public void order(Order order) { + orderService.createOrder(order); + } + +} +``` + +The preceding mapping routes requests that have a WS-Addressing `Action` of `[http://samples/RequestOrder](http://samples/RequestOrder)` to the `getOrder` method. Requests with `[http://samples/CreateOrder](http://samples/CreateOrder)` are routed to the `order` method.. + +By default, the `AnnotationActionEndpointMapping` supports both the 1.0 (May 2006), and the August 2004 editions of WS-Addressing. These two versions are most popular and are interoperable with Axis 1 and 2, JAX-WS, XFire, Windows Communication Foundation (WCF), and Windows Services Enhancements (WSE) 3.0. If necessary, specific versions of the spec can be injected into the `versions` property. + +In addition to the `@Action` annotation, you can annotate the class with the `@Address` annotation. If set, the value is compared to the `To` header property of the incoming message. + +Finally, there is the `messageSenders` property, which is required for sending response messages to non-anonymous, out-of-bound addresses. You can set `MessageSender` implementations in this property, the same as you would on the `WebServiceTemplate`. See [URIs and Transports](#client-transports). + +#### 5.4.2. Intercepting Requests — the `EndpointInterceptor` Interface + +The endpoint mapping mechanism has the notion of endpoint interceptors. These can be extremely useful when you want to apply specific functionality to certain requests — for example, dealing with security-related SOAP headers or the logging of request and response message. + +Endpoint interceptors are typically defined by using a `` element in your application context. In this element, you can define endpoint interceptor beans that apply to all endpoints defined in that application context. Alternatively, you can use `` or `` elements to specify for which payload root name or SOAP action the interceptor should apply. The following example shows how to do so: + +``` + + + + + + + + + + + + +``` + +In the preceding example, we define one “global” interceptor (`MyGlobalInterceptor`) that intercepts all requests and responses. We also define an interceptor that applies only to XML messages that have the `[http://www.example.com](http://www.example.com)` as a payload root namespace. We could have defined a `localPart` attribute in addition to the `namespaceUri` to further limit the messages the to which interceptor applies. Finally, we define two interceptors that apply when the message has a `[http://www.example.com/SoapAction](http://www.example.com/SoapAction)` SOAP action. Notice how the second interceptor is actually a reference to a bean definition outside of the `` element. You can use bean references anywhere inside the `` element. + +When you use `@Configuration` classes, you can extend from `WsConfigurerAdapter` to add interceptors: + +``` +@Configuration +@EnableWs +public class MyWsConfiguration extends WsConfigurerAdapter { + + @Override + public void addInterceptors(List interceptors) { + interceptors.add(new MyPayloadRootInterceptor()); + } + +} +``` + +Interceptors must implement the `EndpointInterceptor` interface from the `org.springframework.ws.server` package. This interface defines three methods, one that can be used for handling the request message **before** the actual endpoint is processed, one that can be used for handling a normal response message, and one that can be used for handling fault messages. The second two are called **after** the endpoint is processed. These three methods should provide enough flexibility to do all kinds of pre- and post-processing. + +The `handleRequest(..)` method on the interceptor returns a boolean value. You can use this method to interrupt or continue the processing of the invocation chain. When this method returns `true`, the endpoint processing chain will continue. When it returns `false`, the `MessageDispatcher` interprets this to mean that the interceptor itself has taken care of things and does not continue processing the other interceptors and the actual endpoint in the invocation chain. The `handleResponse(..)` and `handleFault(..)` methods also have a boolean return value. When these methods return `false`, the response will not be sent back to the client. + +There are a number of standard `EndpointInterceptor` implementations that you can use in your Web service. Additionally, there is the `XwsSecurityInterceptor`, which is described in [`XwsSecurityInterceptor`](#security-xws-security-interceptor). + +##### `PayloadLoggingInterceptor` and `SoapEnvelopeLoggingInterceptor` + +When developing a web service, it can be useful to log the incoming and outgoing XML messages. Spring WS facilitates this with the `PayloadLoggingInterceptor` and `SoapEnvelopeLoggingInterceptor` classes. The former logs only the payload of the message to the Commons Logging Log. The latter logs the entire SOAP envelope, including SOAP headers. The following example shows how to define the `PayloadLoggingInterceptor` in an endpoint mapping: + +``` + + + +``` + +Both of these interceptors have two properties, `logRequest` and `logResponse`, which can be set to `false` to disable logging for either request or response messages. + +You could use the `WsConfigurerAdapter` approach, as described earlier, for the `PayloadLoggingInterceptor` as well. + +##### `PayloadValidatingInterceptor` + +One of the benefits of using a contract-first development style is that we can use the schema to validate incoming and outgoing XML messages. Spring-WS facilitates this with the `PayloadValidatingInterceptor`. This interceptor requires a reference to one or more W3C XML or RELAX NG schemas and can be set to validate requests, responses, or both. + +| |Note that request validation may sound like a good idea, but it makes the resulting Web service very strict. Usually, it is not really important whether the request validates, only if the endpoint can get sufficient information to fulfill a request. Validating the response is a good idea, because the endpoint should adhere to its schema. Remember Postel’s Law:
    "Be conservative in what you do; be liberal in what you accept from others."| +|---|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +The following example uses the `PayloadValidatingInterceptor`. In this example, we use the schema in `/WEB-INF/orders.xsd` to validate the response but not the request. Note that the `PayloadValidatingInterceptor` can also accept multiple schemas by setting the `schemas` property. + +``` + + + + + +``` + +Of course, you could use the `WsConfigurerAdapter` approach, as described earlier, for the `PayloadValidatingInterceptor` as well. + +##### Using `PayloadTransformingInterceptor` + +To transform the payload to another XML format, Spring Web Services offers the `PayloadTransformingInterceptor`. This endpoint interceptor is based on XSLT style sheets and is especially useful when supporting multiple versions of a web service, because you can transform the older message format to the newer format. The following example uses the `PayloadTransformingInterceptor`: + +``` + + + + +``` + +In the preceding example, we transform requests by using `/WEB-INF/oldRequests.xslt` and response messages by using `/WEB-INF/oldResponses.xslt`. Note that, since endpoint interceptors are registered at the endpoint-mapping level, you can create an endpoint mapping that applies to the “old style” messages and add the interceptor to that mapping. Hence, the transformation applies only to these “old style” message. + +You could use the `WsConfigurerAdapter` approach, as described earlier, for the `PayloadTransformingInterceptor` as well. + +### 5.5. Handling Exceptions + +Spring-WS provides `EndpointExceptionResolvers` to ease the pain of unexpected exceptions occurring while your message is being processed by an endpoint that matched the request. Endpoint exception resolvers somewhat resemble the exception mappings that can be defined in the web application descriptor `web.xml`. However, they provide a more flexible way to handle exceptions. They provide information about what endpoint was invoked when the exception was thrown. Furthermore, a programmatic way of handling exceptions gives you many more options for how to respond appropriately. Rather than expose the innards of your application by giving an exception and stack trace, you can handle the exception any way you want — for example, by returning a SOAP fault with a specific fault code and string. + +Endpoint exception resolvers are automatically picked up by the `MessageDispatcher`, so no explicit configuration is necessary. + +Besides implementing the `EndpointExceptionResolver` interface, which is only a matter of implementing the `resolveException(MessageContext, endpoint, Exception)` method, you may also use one of the provided implementations. The simplest implementation is the `SimpleSoapExceptionResolver`, which creates a SOAP 1.1 Server or SOAP 1.2 Receiver fault and uses the exception message as the fault string. The `SimpleSoapExceptionResolver` is the default, but it can be overridden by explicitly adding another resolver. + +#### 5.5.1. `SoapFaultMappingExceptionResolver` + +The `SoapFaultMappingExceptionResolver` is a more sophisticated implementation. This resolver lets you take the class name of any exception that might be thrown and map it to a SOAP Fault: + +``` + + + + + + org.springframework.oxm.ValidationFailureException=CLIENT,Invalid request + + + + +``` + +The key values and default endpoint use a format of `faultCode,faultString,locale`, where only the fault code is required. If the fault string is not set, it defaults to the exception message. If the language is not set, it defaults to English. The preceding configuration maps exceptions of type `ValidationFailureException` to a client-side SOAP fault with a fault string of `Invalid request`, as follows: + +``` + + + + SOAP-ENV:Client + Invalid request + + + +``` + +If any other exception occurs, it returns the default fault: a server-side fault with the exception message as the fault string. + +#### 5.5.2. Using `SoapFaultAnnotationExceptionResolver` + +You can also annotate exception classes with the `@SoapFault` annotation, to indicate the SOAP fault that should be returned whenever that exception is thrown. For these annotations to be picked up, you need to add the `SoapFaultAnnotationExceptionResolver` to your application context. The elements of the annotation include a fault code enumeration, fault string or reason, and language. The following example shows such an exception: + +``` +package samples; + +import org.springframework.ws.soap.server.endpoint.annotation.FaultCode; +import org.springframework.ws.soap.server.endpoint.annotation.SoapFault; + +@SoapFault(faultCode = FaultCode.SERVER) +public class MyBusinessException extends Exception { + + public MyClientException(String message) { + super(message); + } +} +``` + +Whenever the `MyBusinessException` is thrown with the constructor string `"Oops!"` during endpoint invocation, it results in the following response: + +``` + + + + SOAP-ENV:Server + Oops! + + + +``` + +### 5.6. Server-side Testing + +When it comes to testing your Web service endpoints, you have two possible approaches: + +* Write Unit Tests, where you provide (mock) arguments for your endpoint to consume. + + The advantage of this approach is that it is quite easy to accomplish (especially for classes annotated with `@Endpoint`). The disadvantage is that you are not really testing the exact content of the XML messages that are sent over the wire. + +* Write Integrations Tests, which do test the contents of the message. + +The first approach can easily be accomplished with mocking frameworks such as EasyMock, JMock, and others. The next section focuses on writing integration tests, using the test features introduced in Spring Web Services 2.0. + +#### 5.6.1. Writing server-side integration tests + +Spring Web Services 2.0 introduced support for creating endpoint integration tests. In this context, an endpoint is a class that handles (SOAP) messages (see [Endpoints](#server-endpoints)). + +The integration test support lives in the `org.springframework.ws.test.server` package. The core class in that package is the `MockWebServiceClient`. The underlying idea is that this client creates a request message and then sends it over to the endpoints that are configured in a standard `MessageDispatcherServlet` application context (see [`MessageDispatcherServlet`](#message-dispatcher-servlet)). These endpoints handle the message and create a response. The client then receives this response and verifies it against registered expectations. + +The typical usage of the `MockWebServiceClient` is: . + +1. Create a `MockWebServiceClient` instance by calling `MockWebServiceClient.createClient(ApplicationContext)` or `MockWebServiceClient.createClient(WebServiceMessageReceiver, WebServiceMessageFactory)`. + +2. Send request messages by calling `sendRequest(RequestCreator)`, possibly by using the default `RequestCreator` implementations provided in `RequestCreators` (which can be statically imported). + +3. Set up response expectations by calling `andExpect(ResponseMatcher)`, possibly by using the default `ResponseMatcher` implementations provided in `ResponseMatchers` (which can be statically imported). Multiple expectations can be set up by chaining `andExpect(ResponseMatcher)` calls. + +| |Note that the `MockWebServiceClient` (and related classes) offers a “fluent” API, so you can typically use the code-completion features in your IDE to guide you through the process of setting up the mock server.| +|---|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +| |Also note that you can rely on the standard logging features available in Spring Web Services in your unit tests. Sometimes, it might be useful to inspect the request or response message to find out why a particular tests failed. See [Message Logging and Tracing](#logging) for more information.| +|---|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +Consider, for example, the following web service endpoint class: + +``` +import org.springframework.ws.server.endpoint.annotation.Endpoint; +import org.springframework.ws.server.endpoint.annotation.RequestPayload; +import org.springframework.ws.server.endpoint.annotation.ResponsePayload; + +@Endpoint (1) +public class CustomerEndpoint { + + @ResponsePayload (2) + public CustomerCountResponse getCustomerCount( (2) + @RequestPayload CustomerCountRequest request) { (2) + CustomerCountResponse response = new CustomerCountResponse(); + response.setCustomerCount(10); + return response; + } + +} +``` + +|**1**| The `CustomerEndpoint` in annotated with `@Endpoint`. See [Endpoints](#server-endpoints). | +|-----|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +|**2**|The `getCustomerCount()` method takes a `CustomerCountRequest` as its argument and returns a `CustomerCountResponse`. Both of these classes are objects supported by a marshaller. For instance, they can have a `@XmlRootElement` annotation to be supported by JAXB2.| + +The following example shows a typical test for `CustomerEndpoint`: + +``` +import javax.xml.transform.Source; +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.context.ApplicationContext; +import org.springframework.test.context.ContextConfiguration; +import org.springframework.test.context.junit4.SpringJUnit4ClassRunner; +import org.springframework.xml.transform.StringSource; + +import org.junit.Before; +import org.junit.Test; +import org.junit.runner.RunWith; + +import org.springframework.ws.test.server.MockWebServiceClient; (1) +import static org.springframework.ws.test.server.RequestCreators.*; (1) +import static org.springframework.ws.test.server.ResponseMatchers.*; (1) + +@RunWith(SpringJUnit4ClassRunner.class) (2) +@ContextConfiguration("spring-ws-servlet.xml") (2) +public class CustomerEndpointIntegrationTest { + + @Autowired + private ApplicationContext applicationContext; (3) + + private MockWebServiceClient mockClient; + + @Before + public void createClient() { + mockClient = MockWebServiceClient.createClient(applicationContext); (4) + } + + @Test + public void customerEndpoint() throws Exception { + Source requestPayload = new StringSource( + "" + + "John Doe" + + ""); + Source responsePayload = new StringSource( + "" + + "10" + + ""); + + mockClient.sendRequest(withPayload(requestPayload)). (5) + andExpect(payload(responsePayload)); (5) + } +} +``` + +|**1**| The `CustomerEndpointIntegrationTest` imports the `MockWebServiceClient` and statically imports `RequestCreators` and `ResponseMatchers`. | +|-----|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +|**2**| This test uses the standard testing facilities provided in the Spring Framework. This is not required but is generally the easiest way to set up the test. | +|**3**| The application context is a standard Spring-WS application context (see [`MessageDispatcherServlet`](#message-dispatcher-servlet)), read from `spring-ws-servlet.xml`. In this case, the application context contains a bean definition for `CustomerEndpoint` (or perhaps a `` is used). | +|**4**| In a `@Before` method, we create a `MockWebServiceClient` by using the `createClient` factory method. | +|**5**|We send a request by calling `sendRequest()` with a `withPayload()` `RequestCreator` provided by the statically imported `RequestCreators` (see [Using `RequestCreator` and `RequestCreators`](#server-test-request-creator)).

    We also set up response expectations by calling `andExpect()` with a `payload()` `ResponseMatcher` provided by the statically imported `ResponseMatchers` (see [Using `ResponseMatcher` and `ResponseMatchers`](#server-test-response-matcher)).

    This part of the test might look a bit confusing, but the code completion features of your IDE are of great help. After typing `sendRequest(`, your IDE can provide you with a list of possible request creating strategies, provided you statically imported `RequestCreators`. The same applies to `andExpect()`, provided you statically imported `ResponseMatchers`.| + +#### 5.6.2. Using `RequestCreator` and `RequestCreators` + +Initially, the `MockWebServiceClient` needs to create a request message for the endpoint to consume. The client uses the `RequestCreator` strategy interface for this purpose: + +``` +public interface RequestCreator { + + WebServiceMessage createRequest(WebServiceMessageFactory messageFactory) + throws IOException; + +} +``` + +You can write your own implementations of this interface, creating a request message by using the message factory, but you certainly do not have to. The `RequestCreators` class provides a way to create a `RequestCreator` based on a given payload in the `withPayload()` method. You typically statically import `RequestCreators`. + +#### 5.6.3. Using `ResponseMatcher` and `ResponseMatchers` + +When the request message has been processed by the endpoint and a response has been received, the `MockWebServiceClient` can verify whether this response message meets certain expectations. The client uses the `ResponseMatcher` strategy interface for this purpose: + +``` +public interface ResponseMatcher { + + void match(WebServiceMessage request, + WebServiceMessage response) + throws IOException, AssertionError; + +} +``` + +Once again, you can write your own implementations of this interface, throwing `AssertionError` instances when the message does not meet your expectations, but you certainly do not have to, as the `ResponseMatchers` class provides standard `ResponseMatcher` implementations for you to use in your tests. You typically statically import this class. + +The `ResponseMatchers` class provides the following response matchers: + +| `ResponseMatchers` method | Description | +|---------------------------------------------------------------------------------------------------------|-----------------------------------------------------------------------------------| +| `payload()` | Expects a given response payload. | +| `validPayload()` | Expects the response payload to validate against given XSD schemas. | +| `xpath()` |Expects a given XPath expression to exist, not exist, or evaluate to a given value.| +| `soapHeader()` | Expects a given SOAP header to exist in the response message. | +| `noFault()` | Expects that the response message does not contain a SOAP Fault. | +|`mustUnderstandFault()`, `clientOrSenderFault()`, `serverOrReceiverFault()`, and `versionMismatchFault()`| Expects the response message to contain a specific SOAP Fault. | + +You can set up multiple response expectations by chaining `andExpect()` calls: + +``` +mockClient.sendRequest(...). + andExpect(payload(expectedResponsePayload)). + andExpect(validPayload(schemaResource)); +``` + +For more information on the response matchers provided by `ResponseMatchers`, see the [Javadoc](https://docs.spring.io/spring-ws/docs/current/api/org/springframework/ws/test/server/ResponseMatchers.html). + +## 6. Using Spring Web Services on the Client + +Spring-WS provides a client-side Web service API that allows for consistent, XML-driven access to web services. It also caters to the use of marshallers and unmarshallers so that your service-tier code can deal exclusively with Java objects. + +The `org.springframework.ws.client.core` package provides the core functionality for using the client-side access API. It contains template classes that simplify the use of Web services, much like the core Spring `JdbcTemplate` does for JDBC. The design principle common to Spring template classes is to provide helper methods to perform common operations and, for more sophisticated usage, delegate to user implemented callback interfaces. The web service template follows the same design. The classes offer various convenience methods for + +* Sending and receiving of XML messages + +* Marshalling objects to XML before sending + +* Allowing for multiple transport options + +### 6.1. Using the Client-side API + +This section describs how to use the client-side API. For how to use the server-side API, see [Creating a Web service with Spring-WS](#server). + +#### 6.1.1. `WebServiceTemplate` + +The `WebServiceTemplate` is the core class for client-side web service access in Spring-WS. It contains methods for sending `Source` objects and receiving response messages as either `Source` or `Result`. Additionally, it can marshal objects to XML before sending them across a transport and unmarshal any response XML into an object again. + +##### URIs and Transports + +The `WebServiceTemplate` class uses an URI as the message destination. You can either set a `defaultUri` property on the template itself or explicitly supply a URI when calling a method on the template. The URI is resolved into a `WebServiceMessageSender`, which is responsible for sending the XML message across a transport layer. You can set one or more message senders by using the `messageSender` or `messageSenders` properties of the `WebServiceTemplate` class. + +###### HTTP transports + +There are two implementations of the `WebServiceMessageSender` interface for sending messages over HTTP. The default implementation is the `HttpUrlConnectionMessageSender`, which uses the facilities provided by Java itself. The alternative is the `HttpComponentsMessageSender`, which uses the [Apache HttpComponents HttpClient](https://hc.apache.org/httpcomponents-client-ga). Use the latter if you need more advanced and easy-to-use functionality (such as authentication, HTTP connection pooling, and so forth). + +To use the HTTP transport, either set the `defaultUri` to something like `[http://example.com/services](http://example.com/services)` or supply the `uri` parameter for one of the methods. + +The following example shows how to use default configuration for HTTP transports: + +``` + + + + + + + + + + +``` + +The following example shows how to override the default configuration and how to use Apache HttpClient to authenticate with HTTP authentication: + +``` + + + + + + + + + + + + + +``` + +###### JMS transport + +For sending messages over JMS, Spring Web Services provides `JmsMessageSender`. This class uses the facilities of the Spring framework to transform the `WebServiceMessage` into a JMS `Message`, send it on its way on a `Queue` or `Topic`, and receive a response (if any). + +To use `JmsMessageSender`, you need to set the `defaultUri` or `uri` parameter to a JMS URI, which — at a minimum — consists of the `jms:` prefix and a destination name. Some examples of JMS URIs are: `jms:SomeQueue`, `jms:SomeTopic?priority=3&deliveryMode=NON_PERSISTENT`, and `jms:RequestQueue?replyToName=ResponseName`. For more information on this URI syntax, see the [Javadoc for `JmsMessageSender`](https://docs.spring.io/spring-ws/docs/current/api/org/springframework/ws/transport/jms/JmsMessageSender.html). + +By default, the `JmsMessageSender` sends JMS `BytesMessage`, but you can override this to use `TextMessages` by using the `messageType` parameter on the JMS URI — for example, `jms:Queue?messageType=TEXT_MESSAGE`. Note that `BytesMessages` are the preferred type, because `TextMessages` do not support attachments and character encodings reliably. + +The following example shows how to use the JMS transport in combination with an ActiveMQ connection factory: + +``` + + + + + + + + + + + + + + + + + + + +``` + +###### Email Transport + +Spring Web Services also provides an email transport, which you can use to send web service messages over SMTP and retrieve them over either POP3 or IMAP. The client-side email functionality is contained in the `MailMessageSender` class. This class creates an email message from the request `WebServiceMessage` and sends it over SMTP. It then waits for a response message to arrive at the incoming POP3 or IMAP server. + +To use the `MailMessageSender`, set the `defaultUri` or `uri` parameter to a `mailto` URI — for example, `mailto:[[email protected]](/cdn-cgi/l/email-protection)` or `mailto:[[email protected]](/cdn-cgi/l/email-protection)?subject=SOAP%20Test`. Make sure that the message sender is properly configured with a `transportUri`, which indicates the server to use for sending requests (typically a SMTP server), and a `storeUri`, which indicates the server to poll for responses (typically a POP3 or IMAP server). + +The following example shows how to use the email transport: + +``` + + + + + + + + + + + + + + + + + +``` + +###### XMPP Transport + +Spring Web Services 2.0 introduced an XMPP (Jabber) transport, which you can use to send and receive web service messages over XMPP. The client-side XMPP functionality is contained in the `XmppMessageSender` class. This class creates an XMPP message from the request `WebServiceMessage` and sends it over XMPP. It then listens for a response message to arrive. + +To use the `XmppMessageSender`, set the `defaultUri` or `uri` parameter to a `xmpp` URI — for example, `xmpp:[[email protected]](/cdn-cgi/l/email-protection)`. The sender also requires an `XMPPConnection` to work, which can be conveniently created by using the `org.springframework.ws.transport.xmpp.support.XmppConnectionFactoryBean`. + +The following example shows how to use the XMPP transport: + +``` + + + + + + + + + + + + + + + + + + + + + +``` + +##### Message factories + +In addition to a message sender, the `WebServiceTemplate` requires a web service message factory. There are two message factories for SOAP: `SaajSoapMessageFactory` and `AxiomSoapMessageFactory`. If no message factory is specified (by setting the `messageFactory` property), Spring-WS uses the `SaajSoapMessageFactory` by default. + +#### 6.1.2. Sending and Receiving a `WebServiceMessage` + +The `WebServiceTemplate` contains many convenience methods to send and receive web service messages. There are methods that accept and return a `Source` and those that return a `Result`. Additionally, there are methods that marshal and unmarshal objects to XML. The following example sends a simple XML message to a web service: + +``` +import java.io.StringReader; +import javax.xml.transform.stream.StreamResult; +import javax.xml.transform.stream.StreamSource; + +import org.springframework.ws.WebServiceMessageFactory; +import org.springframework.ws.client.core.WebServiceTemplate; +import org.springframework.ws.transport.WebServiceMessageSender; + +public class WebServiceClient { + + private static final String MESSAGE = + "Hello, Web Service World"; + + private final WebServiceTemplate webServiceTemplate = new WebServiceTemplate(); + + public void setDefaultUri(String defaultUri) { + webServiceTemplate.setDefaultUri(defaultUri); + } + + // send to the configured default URI + public void simpleSendAndReceive() { + StreamSource source = new StreamSource(new StringReader(MESSAGE)); + StreamResult result = new StreamResult(System.out); + webServiceTemplate.sendSourceAndReceiveToResult(source, result); + } + + // send to an explicit URI + public void customSendAndReceive() { + StreamSource source = new StreamSource(new StringReader(MESSAGE)); + StreamResult result = new StreamResult(System.out); + webServiceTemplate.sendSourceAndReceiveToResult("http://localhost:8080/AnotherWebService", + source, result); + } + +} +``` + +``` + + + + + + + +``` + +The preceding example uses the `WebServiceTemplate` to send a “Hello, World” message to the web service located at `[http://localhost:8080/WebService](http://localhost:8080/WebService)` (in the case of the `simpleSendAndReceive()` method) and writes the result to the console. The `WebServiceTemplate` is injected with the default URI, which is used because no URI was supplied explicitly in the Java code. + +Note that the `WebServiceTemplate` class is thread-safe once configured (assuming that all of its dependencies are also thread-safe, which is the case for all of the dependencies that ship with Spring-WS), so multiple objects can use the same shared `WebServiceTemplate` instance. The `WebServiceTemplate` exposes a zero-argument constructor and `messageFactory` and `messageSender` bean properties that you can use to construct the instance (by using a Spring container or plain Java code). Alternatively, consider deriving from Spring-WS’s `WebServiceGatewaySupport` convenience base class, which exposes convenient bean properties to enable easy configuration. (You do not have to extend this base class. It is provided as a convenience class only.) + +#### 6.1.3. Sending and Receiving POJOs — Marshalling and Unmarshalling + +To facilitate the sending of plain Java objects, the `WebServiceTemplate` has a number of `send(..)` methods that take an `Object` as an argument for a message’s data content. The method `marshalSendAndReceive(..)` in the `WebServiceTemplate` class delegates the conversion of the request object to XML to a `Marshaller` and the conversion of the response XML to an object to an `Unmarshaller`. (For more information about marshalling and unmarshaller, see [the Spring Framework reference documentation](https://docs.spring.io/spring/docs/current/spring-framework-reference/data-access.html#oxm-marshaller-unmarshaller).) By using the marshallers, your application code can focus on the business object that is being sent or received and not be concerned with the details of how it is represented as XML. To use the marshalling functionality, you have to set a marshaller and an unmarshaller with the `marshaller` and `unmarshaller` properties of the `WebServiceTemplate` class. + +#### 6.1.4. Using `WebServiceMessageCallback` + +To accommodate setting SOAP headers and other settings on the message, the `WebServiceMessageCallback` interface gives you access to the message after it has been created but before it is sent. The following example demonstrates how to set the SOAP action header on a message that is created by marshalling an object: + +``` +public void marshalWithSoapActionHeader(MyObject o) { + + webServiceTemplate.marshalSendAndReceive(o, new WebServiceMessageCallback() { + + public void doWithMessage(WebServiceMessage message) { + ((SoapMessage)message).setSoapAction("http://tempuri.org/Action"); + } + }); +} +``` + +| |Note that you can also use the `org.springframework.ws.soap.client.core.SoapActionCallback` to set the SOAP action header.| +|---|--------------------------------------------------------------------------------------------------------------------------| + +##### WS-Addressing + +In addition to the [server-side WS-Addressing](#server-ws-addressing) support, Spring Web Services also has support for this specification on the client-side. + +For setting WS-Addressing headers on the client, you can use `org.springframework.ws.soap.addressing.client.ActionCallback`. This callback takes the desired action header as a parameter. It also has constructors for specifying the WS-Addressing version and a `To` header. If not specified, the `To` header defaults to the URL of the connection being made. + +The following example sets the `Action` header to `[http://samples/RequestOrder](http://samples/RequestOrder)`: + +``` +webServiceTemplate.marshalSendAndReceive(o, new ActionCallback("http://samples/RequestOrder")); +``` + +#### 6.1.5. Using `WebServiceMessageExtractor` + +The `WebServiceMessageExtractor` interface is a low-level callback interface that you have full control over the process to extract an `Object` from a received `WebServiceMessage`. The `WebServiceTemplate` invokes the `extractData(..)` method on a supplied `WebServiceMessageExtractor` while the underlying connection to the serving resource is still open. The following example shows the `WebServiceMessageExtractor` in action: + +``` +public void marshalWithSoapActionHeader(final Source s) { + final Transformer transformer = transformerFactory.newTransformer(); + webServiceTemplate.sendAndReceive(new WebServiceMessageCallback() { + public void doWithMessage(WebServiceMessage message) { + transformer.transform(s, message.getPayloadResult()); + }, + new WebServiceMessageExtractor() { + public Object extractData(WebServiceMessage message) throws IOException { + // do your own transforms with message.getPayloadResult() + // or message.getPayloadSource() + } + } + }); +} +``` + +### 6.2. Client-side Testing + +When it comes to testing your Web service clients (that is, classes that use the `WebServiceTemplate` to access a Web service), you have two possible approaches: + +* Write unit tests, which mock away the `WebServiceTemplate` class, `WebServiceOperations` interface, or the complete client class. + + The advantage of this approach is that it s easy to accomplish. The disadvantage is that you are not really testing the exact content of the XML messages that are sent over the wire, especially when mocking out the entire client class. + +* Write integrations tests, which do test the contents of the message. + +The first approach can easily be accomplished with mocking frameworks, such as EasyMock, JMock, and others. The next section focuses on writing integration tests, using the test features introduced in Spring Web Services 2.0. + +#### 6.2.1. Writing Client-side Integration Tests + +Spring Web Services 2.0 introduced support for creating Web service client integration tests. In this context, a client is a class that uses the `WebServiceTemplate` to access a web service. + +The integration test support lives in the `org.springframework.ws.test.client` package. The core class in that package is the `MockWebServiceServer`. The underlying idea is that the web service template connects to this mock server and sends it a request message, which the mock server then verifies against the registered expectations. If the expectations are met, the mock server then prepares a response message, which is sent back to the template. + +The typical usage of the `MockWebServiceServer` is: . + +1. Create a `MockWebServiceServer` instance by calling `MockWebServiceServer.createServer(WebServiceTemplate)`, `MockWebServiceServer.createServer(WebServiceGatewaySupport)`, or `MockWebServiceServer.createServer(ApplicationContext)`. + +2. Set up request expectations by calling `expect(RequestMatcher)`, possibly by using the default `RequestMatcher` implementations provided in `RequestMatchers` (which can be statically imported). Multiple expectations can be set up by chaining `andExpect(RequestMatcher)` calls. + +3. Create an appropriate response message by calling `andRespond(ResponseCreator)`, possibly by using the default `ResponseCreator` implementations provided in `ResponseCreators` (which can be statically imported). + +4. Use the `WebServiceTemplate` as normal, either directly of through client code. + +5. Call `MockWebServiceServer.verify()` to make sure that all expectations have been met. + +| |Note that the `MockWebServiceServer` (and related classes) offers a 'fluent' API, so you can typically use the code-completion features in your IDE to guide you through the process of setting up the mock server.| +|---|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +| |Also note that you can rely on the standard logging features available in Spring Web Services in your unit tests. Sometimes, it might be useful to inspect the request or response message to find out why a particular tests failed. See [Message Logging and Tracing](#logging) for more information.| +|---|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +Consider, for example, the following Web service client class: + +``` +import org.springframework.ws.client.core.support.WebServiceGatewaySupport; + +public class CustomerClient extends WebServiceGatewaySupport { (1) + + public int getCustomerCount() { + CustomerCountRequest request = new CustomerCountRequest(); (2) + request.setCustomerName("John Doe"); + + CustomerCountResponse response = + (CustomerCountResponse) getWebServiceTemplate().marshalSendAndReceive(request); (3) + + return response.getCustomerCount(); + } + +} +``` + +|**1**| The `CustomerClient` extends `WebServiceGatewaySupport`, which provides it with a `webServiceTemplate` property. | +|-----|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +|**2**| `CustomerCountRequest` is an object supported by a marshaller. For instance, it can have an `@XmlRootElement` annotation to be supported by JAXB2. | +|**3**|The `CustomerClient` uses the `WebServiceTemplate` offered by `WebServiceGatewaySupport` to marshal the request object into a SOAP message and sends that to the web service. The response object is unmarshalled into a `CustomerCountResponse`.| + +The following example shows a typical test for `CustomerClient`: + +``` +import javax.xml.transform.Source; +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.test.context.ContextConfiguration; +import org.springframework.test.context.junit4.SpringJUnit4ClassRunner; +import org.springframework.xml.transform.StringSource; + +import org.junit.Before; +import org.junit.Test; +import org.junit.runner.RunWith; + +import static org.junit.Assert.assertEquals; + +import org.springframework.ws.test.client.MockWebServiceServer; (1) +import static org.springframework.ws.test.client.RequestMatchers.*; (1) +import static org.springframework.ws.test.client.ResponseCreators.*; (1) + +@RunWith(SpringJUnit4ClassRunner.class) (2) +@ContextConfiguration("integration-test.xml") (2) +public class CustomerClientIntegrationTest { + + @Autowired + private CustomerClient client; (3) + + private MockWebServiceServer mockServer; (4) + + @Before + public void createServer() throws Exception { + mockServer = MockWebServiceServer.createServer(client); + } + + @Test + public void customerClient() throws Exception { + Source requestPayload = new StringSource( + "" + + "John Doe" + + ""); + Source responsePayload = new StringSource( + "" + + "10" + + ""); + + mockServer.expect(payload(requestPayload)).andRespond(withPayload(responsePayload));(5) + + int result = client.getCustomerCount(); (6) + assertEquals(10, result); (6) + + mockServer.verify(); (7) + } + +} +``` + +|**1**| The `CustomerClientIntegrationTest` imports the `MockWebServiceServer` and statically imports `RequestMatchers` and `ResponseCreators`. | +|-----|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +|**2**| This test uses the standard testing facilities provided in the Spring Framework. This is not required but is generally the easiest way to set up the test. | +|**3**| The `CustomerClient` is configured in `integration-test.xml` and wired into this test using `@Autowired`. | +|**4**| In a `@Before` method, we create a `MockWebServiceServer` by using the `createServer` factory method. | +|**5**|We define expectations by calling `expect()` with a `payload()` `RequestMatcher` provided by the statically imported `RequestMatchers` (see [Using `RequestMatcher` and `RequestMatchers`](#client-test-request-matcher)).

    We also set up a response by calling `andRespond()` with a `withPayload()` `ResponseCreator` provided by the statically imported `ResponseCreators` (see [Using `ResponseCreator` and `ResponseCreators`](#client-test-response-creator)).

    This part of the test might look a bit confusing, but the code-completion features of your IDE are of great help. After you type `expect(`, your IDE can provide you with a list of possible request matching strategies, provided you statically imported `RequestMatchers`. The same applies to `andRespond(`, provided you statically imported `ResponseCreators`.| +|**6**| We call `getCustomerCount()` on the `CustomerClient`, thus using the `WebServiceTemplate`. The template has been set up for “testing mode” by now, so no real (HTTP) connection is made by this method call. We also make some JUnit assertions based on the result of the method call. | +|**7**| We call `verify()` on the `MockWebServiceServer`, verifying that the expected message was actually received. | + +#### 6.2.2. Using `RequestMatcher` and `RequestMatchers` + +To verify whether the request message meets certain expectations, the `MockWebServiceServer` uses the `RequestMatcher` strategy interface. The contract defined by this interface is as follows: + +``` +public interface RequestMatcher { + + void match(URI uri, + WebServiceMessage request) + throws IOException, + AssertionError; +} +``` + +You can write your own implementations of this interface, throwing `AssertionError` exceptions when the message does not meet your expectations, but you certainly do not have to. The `RequestMatchers` class provides standard `RequestMatcher` implementations for you to use in your tests. You typically statically import this class. + +The `RequestMatchers` class provides the following request matchers: + +|`RequestMatchers` method| Description | +|------------------------|-----------------------------------------------------------------------------------| +| `anything()` | Expects any sort of request. | +| `payload()` | Expects a given request payload. | +| `validPayload()` | Expects the request payload to validate against given XSD schemas. | +| `xpath()` |Expects a given XPath expression to exist, not exist, or evaluate to a given value.| +| `soapHeader()` | Expects a given SOAP header to exist in the request message. | +| `connectionTo()` | Expects a connection to the given URL. | + +You can set up multiple request expectations by chaining `andExpect()` calls: + +``` +mockServer.expect(connectionTo("http://example.com")). + andExpect(payload(expectedRequestPayload)). + andExpect(validPayload(schemaResource)). + andRespond(...); +``` + +For more information on the request matchers provided by `RequestMatchers`, see the [Javadoc](https://docs.spring.io/spring-ws/docs/current/api/org/springframework/ws/test/client/RequestMatchers.html). + +#### 6.2.3. Using `ResponseCreator` and `ResponseCreators` + +When the request message has been verified and meets the defined expectations, the `MockWebServiceServer` creates a response message for the `WebServiceTemplate` to consume. The server uses the `ResponseCreator` strategy interface for this purpose: + +``` +public interface ResponseCreator { + + WebServiceMessage createResponse(URI uri, + WebServiceMessage request, + WebServiceMessageFactory messageFactory) + throws IOException; + +} +``` + +Once again, you can write your own implementations of this interface, creating a response message by using the message factory, but you certainly do not have to, as the `ResponseCreators` class provides standard `ResponseCreator` implementations for you to use in your tests. You typically statically import this class. + +The `ResponseCreators` class provides the following responses: + +| `ResponseCreators` method | Description | +|------------------------------------------------------------------------------------------------------------------------|-------------------------------------------------------------------------------------------------------------------------------------| +| `withPayload()` | Creates a response message with a given payload. | +| `withError()` | Creates an error in the response connection. This method gives you the opportunity to test your error handling. | +| `withException()` |Throws an exception when reading from the response connection. This method gives you the opportunity to test your exception handling.| +|`withMustUnderstandFault()`, `withClientOrSenderFault()`, `withServerOrReceiverFault()`, or `withVersionMismatchFault()`| Creates a response message with a given SOAP fault. This method gives you the opportunity to test your Fault handling. | + +For more information on the request matchers provided by `RequestMatchers`, see the [Javadoc](https://docs.spring.io/spring-ws/docs/current/api/org/springframework/ws/test/client/RequestMatchers.html). + +## 7. Securing Your Web services with Spring-WS + +This chapter explains how to add WS-Security aspects to your Web services. We focus on the three different areas of WS-Security: + +* **Authentication**: This is the process of determining whether a principal is who they claim to be. In this context, a “principal” generally means a user, device or some other system that can perform an action in your application. + +* **Digital signatures**: The digital signature of a message is a piece of information based on both the document and the signer’s private key. It is created through the use of a hash function and a private signing function (encrypting with the signer’s private key). + +* **Encryption and Decryption**: Encryption is the process of transforming data into a form that is impossible to read without the appropriate key. It is mainly used to keep information hidden from anyone for whom it is not intended. Decryption is the reverse of encryption. It is the process of transforming encrypted data back into an readable form. + +These three areas are implemented by using the `XwsSecurityInterceptor` or `Wss4jSecurityInterceptor`, which we describe in [`XwsSecurityInterceptor`](#security-xws-security-interceptor) and [Using `Wss4jSecurityInterceptor`](#security-wss4j-security-interceptor), respectively + +| |Note that WS-Security (especially encryption and signing) requires substantial amounts of memory and can decrease performance. If performance is important to you, you might want to consider not using WS-Security or using HTTP-based security.| +|---|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +### 7.1. `XwsSecurityInterceptor` + +The `XwsSecurityInterceptor` is an `EndpointInterceptor` (see [Intercepting Requests — the `EndpointInterceptor` Interface](#server-endpoint-interceptor)) that is based on SUN’s XML and Web Services Security package (XWSS). This WS-Security implementation is part of the Java Web Services Developer Pack ([Java WSDP](http://java.sun.com/webservices/)). + +Like any other endpoint interceptor, it is defined in the endpoint mapping (see [Endpoint mappings](#server-endpoint-mapping)). This means that you can be selective about adding WS-Security support. Some endpoint mappings require it, while others do not. + +| |Note that XWSS requires both a SUN 1.5 JDK and the SUN SAAJ reference implementation. The WSS4J interceptor does not have these requirements (see [Using `Wss4jSecurityInterceptor`](#security-wss4j-security-interceptor)).| +|---|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +The `XwsSecurityInterceptor` requires a security policy file to operate. This XML file tells the interceptor what security aspects to require from incoming SOAP messages and what aspects to add to outgoing messages. The basic format of the policy file is explained in the following sections, but you can find a more in-depth tutorial [here](http://java.sun.com/webservices/docs/1.6/tutorial/doc/XWS-SecurityIntro4.html#wp564887). You can set the policy with the `policyConfiguration` property, which requires a Spring resource. The policy file can contain multiple elements — for example, require a username token on incoming messages and sign all outgoing messages. It contains a `SecurityConfiguration` element (not a `JAXRPCSecurity` element) as its root. + +Additionally, the security interceptor requires one or more `CallbackHandler` instances to operate. These handlers are used to retrieve certificates, private keys, validate user credentials, and so on. Spring-WS offers handlers for most common security concerns — for example, authenticating against a Spring Security authentication manager and signing outgoing messages based on a X509 certificate. The following sections indicate what callback handler to use for which security concern. You can set the callback handlers by using the `callbackHandler` or `callbackHandlers` property. + +The following example that shows how to wire up the `XwsSecurityInterceptor`: + +``` + + + + + + + + + + + ... + +``` + +This interceptor is configured by using the `securityPolicy.xml` file on the classpath. It uses two callback handlers that are defined later in the file. + +#### 7.1.1. Keystores + +For most cryptographic operations, you an use the standard `java.security.KeyStore` objects. These operations include certificate verification, message signing, signature verification, and encryption. They exclude username and time-stamp verification. This section aims to give you some background knowledge on keystores and the Java tools that you can use to store keys and certificates in a keystore file. This information is mostly not related to Spring-WS but to the general cryptographic features of Java. + +The `java.security.KeyStore` class represents a storage facility for cryptographic keys and certificates. It can contain three different sort of elements: + +* **Private Keys**: These keys are used for self-authentication. The private key is accompanied by a certificate chain for the corresponding public key. Within the field of WS-Security, this accounts for message signing and message decryption. + +* **Symmetric Keys**: Symmetric (or secret) keys are also used for message encryption and decryption — the difference being that both sides (sender and recipient) share the same secret key. + +* **Trusted certificates**: These X509 certificates are called a “trusted certificate” because the keystore owner trusts that the public key in the certificates does indeed belong to the owner of the certificate. Within WS-Security, these certificates are used for certificate validation, signature verification, and encryption. + +##### Using `keytool` + +The `keytool` program, a key and certificate management utility, is supplied with your Java Virtual Machine. You can use this tool to create new keystores, add new private keys and certificates to them, and so on. It is beyond the scope of this document to provide a full reference of the `keytool` command, but you can find a reference [here](http://java.sun.com/j2se/1.5.0/docs/tooldocs/windows/keytool.html) or by using the `keytool -help` command on the command line. + +##### Using `KeyStoreFactoryBean` + +To easily load a keystore by using Spring configuration, you can use the `KeyStoreFactoryBean`. It has a resource location property, which you can set to point to the path of the keystore to load. A password may be given to check the integrity of the keystore data. If a password is not given, integrity checking is not performed. The following listing configures a `KeyStoreFactoryBean`: + +``` + + + + +``` + +| |If you do not specify the location property, a new, empty keystore is created, which is most likely not what you want.| +|---|----------------------------------------------------------------------------------------------------------------------| + +##### KeyStoreCallbackHandler + +To use the keystores within a `XwsSecurityInterceptor`, you need to define a `KeyStoreCallbackHandler`. This callback has three properties with type `keystore`: (`keyStore`,`trustStore`, and `symmetricStore`). The exact stores used by the handler depend on the cryptographic operations that are to be performed by this handler. For private key operation, the `keyStore` is used. For symmetric key operations, the `symmetricStore` is used. For determining trust relationships, the `trustStore` is used. The following table indicates this: + +| Cryptographic operation | Keystore used | +|------------------------------------------|-----------------------------------| +| Certificate validation |First `keyStore`, then `trustStore`| +| Decryption based on private key | `keyStore` | +| Decryption based on symmetric key | `symmetricStore` | +|Encryption based on public key certificate| `trustStore` | +| Encryption based on symmetric key | `symmetricStore` | +| Signing | `keyStore` | +| Signature verification | `trustStore` | + +Additionally, the `KeyStoreCallbackHandler` has a `privateKeyPassword` property, which should be set to unlock the private keys contained in the`keyStore`. + +If the `symmetricStore` is not set, it defaults to the `keyStore`. If the key or trust store is not set, the callback handler uses the standard Java mechanism to load or create it. See the JavaDoc of the `KeyStoreCallbackHandler` to know how this mechanism works. + +For instance, if you want to use the `KeyStoreCallbackHandler` to validate incoming certificates or signatures, you can use a trust store: + +``` + + + + + + + + + + +``` + +If you want to use it to decrypt incoming certificates or sign outgoing messages, you can use a key store: + +``` + + + + + + + + + + + +``` + +The following sections indicate where the `KeyStoreCallbackHandler` can be used and which properties to set for particular cryptographic operations. + +#### 7.1.2. Authentication + +As stated in the [introduction to this chapter](#security), authentication is the task of determining whether a principal is who they claim to be. Within WS-Security, authentication can take two forms: using a username and password token (using either a plain text password or a password digest) or using a X509 certificate. + +##### Plain Text Username Authentication + +The simplest form of username authentication uses plain text passwords. In this scenario, the SOAP message contains a `UsernameToken` element, which itself contains a `Username` element and a `Password` element which contains the plain text password. Plain text authentication can be compared to the basic authentication provided by HTTP servers. + +| |Note that plain text passwords are not very secure. Therefore, you should always add additional security measures to your transport layer if you use them (using HTTPS instead of plain HTTP, for instance).| +|---|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +To require that every incoming message contains a `UsernameToken` with a plain text password, the security policy file should contain a `RequireUsernameToken` element, with the `passwordDigestRequired` attribute set to `false`. You can find a reference of possible child elements [here](http://java.sun.com/webservices/docs/1.6/tutorial/doc/XWS-SecurityIntro4.html#wp567459). The following listing shows how to include a `RequireUsernameToken` element: + +``` + + ... + + ... + +``` + +If the username token is not present, the `XwsSecurityInterceptor` returns a SOAP fault to the sender. If it is present, it fires a `PasswordValidationCallback` with a `PlainTextPasswordRequest` to the registered handlers. Within Spring-WS, there are three classes that handle this particular callback. + +* [`SimplePasswordValidationCallbackHandler`](#security-simple-password-validation-callback-handler) + +* [`SpringPlainTextPasswordValidationCallbackHandler`](#using-springplaintextpasswordvalidationcallbackhandler) + +* [`JaasPlainTextPasswordValidationCallbackHandler`](#using-jaasplaintextpasswordvalidationcallbackhandler) + +###### Using `SimplePasswordValidationCallbackHandler` + +The simplest password validation handler is the `SimplePasswordValidationCallbackHandler`. This handler validates passwords against an in-memory `Properties` object, which you can specify byusing the `users` property: + +``` + + + + Ernie + + + +``` + +In this case, we are allowing only the user, "Bert", to log in by using the password, "Ernie". + +###### Using `SpringPlainTextPasswordValidationCallbackHandler` + +The `SpringPlainTextPasswordValidationCallbackHandler` uses [Spring Security](https://spring.io/projects/spring-security) to authenticate users. It is beyond the scope of this document to describe Spring Security, but it is a full-fledged security framework. You can read more about it in the [Spring Security reference documentation](https://docs.spring.io/spring-security/site/docs/current/reference/htmlsingle/). + +The `SpringPlainTextPasswordValidationCallbackHandler` requires an `AuthenticationManager` to operate. It uses this manager to authenticate against a `UsernamePasswordAuthenticationToken` that it creates. If authentication is successful, the token is stored in the `SecurityContextHolder`. You can set the authentication manager by using the `authenticationManager` property: + +``` + + + + + + + + + + + + + + + ... + +``` + +###### Using `JaasPlainTextPasswordValidationCallbackHandler` + +The `JaasPlainTextPasswordValidationCallbackHandler` is based on the standard [Java Authentication and Authorization Service](http://java.sun.com/products/jaas/). It is beyond the scope of this document to provide a full introduction into JAAS, but a [good tutorial](http://www.javaworld.com/javaworld/jw-09-2002/jw-0913-jaas.html) is available. + +The `JaasPlainTextPasswordValidationCallbackHandler` requires only a `loginContextName` to operate. It creates a new JAAS `LoginContext` by using this name and handles the standard JAAS `NameCallback` and `PasswordCallback` by using the username and password provided in the SOAP message. This means that this callback handler integrates with any JAAS `LoginModule` that fires these callbacks during the `login()` phase, which is standard behavior. + +You can wire up a `JaasPlainTextPasswordValidationCallbackHandler` as follows: + +``` + + + +``` + +In this case, the callback handler uses the `LoginContext` named `MyLoginModule`. This module should be defined in your `jaas.config` file, as explained in the [tutorial mentioned earlier](http://www.javaworld.com/javaworld/jw-09-2002/jw-0913-jaas.html). + +##### Digest Username Authentication + +When using password digests, the SOAP message also contains a `UsernameToken` element, which itself contains a `Username` element and a `Password` element. The difference is that the password is not sent as plain text, but as a digest. The recipient compares this digest to the digest he calculated from the known password of the user, and, if they are the same, the user is authenticated. This method is comparable to the digest authentication provided by HTTP servers. + +To require that every incoming message contains a `UsernameToken` element with a password digest, the security policy file should contain a `RequireUsernameToken` element, with the `passwordDigestRequired` attribute set to `true`. Additionally, the `nonceRequired` attribute should be set to `true`: You can find a reference of possible child elements [here](http://java.sun.com/webservices/docs/1.6/tutorial/doc/XWS-SecurityIntro4.html#wp567459). The following listing shows how to define a `RequireUsernameToken` element: + +``` + + ... + + ... + +``` + +If the username token is not present, the `XwsSecurityInterceptor` returns a SOAP fault to the sender. If it is present, it fires a `PasswordValidationCallback` with a `DigestPasswordRequest` to the registered handlers. Within Spring-WS, two classes handle this particular callback: `SimplePasswordValidationCallbackHandler` and `SpringDigestPasswordValidationCallbackHandler`. + +###### Using `SimplePasswordValidationCallbackHandler` + +The `SimplePasswordValidationCallbackHandler` can handle both plain text passwords as well as password digests. It is described in [Using `SimplePasswordValidationCallbackHandler`](#security-simple-password-validation-callback-handler). + +###### Using `SpringDigestPasswordValidationCallbackHandler` + +The `SpringDigestPasswordValidationCallbackHandler` requires a Spring Security `UserDetailService` to operate. It uses this service to retrieve the password of the user specified in the token. The digest of the password contained in this details object is then compared with the digest in the message. If they are equal, the user has successfully authenticated, and a `UsernamePasswordAuthenticationToken` is stored in the `SecurityContextHolder`. You can set the service by using the `userDetailsService` property. Additionally, you can set a `userCache` property, to cache loaded user details. The following example shows how to do so: + +``` + + + + + + + ... + +``` + +##### Certificate Authentication + +A more secure way of authentication uses X509 certificates. In this scenario, the SOAP message contains a`BinarySecurityToken`, which contains a Base 64-encoded version of a X509 certificate. The certificate is used by the recipient to authenticate. The certificate stored in the message is also used to sign the message (see [Verifying Signatures](#security-verifying-signatures)). + +To make sure that all incoming SOAP messages carry a`BinarySecurityToken`, the security policy file should contain a `RequireSignature` element. This element can further carry other elements, which are covered in [Verifying Signatures](#security-verifying-signatures). You can find a reference of possible child elements [here](http://java.sun.com/webservices/docs/1.6/tutorial/doc/XWS-SecurityIntro4.html#wp565769). The following listing shows how to define a `RequireSignature` element: + +``` + + ... + + ... + +``` + +When a message arrives that carries no certificate, the `XwsSecurityInterceptor` returns a SOAP fault to the sender. If it is present, it fires a `CertificateValidationCallback`. Three handlers within Spring-WS handle this callback for authentication purposes: + +* [`KeyStoreCallbackHandler`](#using-keystorecallbackhandler) + +* [`SpringCertificateValidationCallbackHandler`](#using-springcertificatevalidationcallbackhandler) + +* [`JaasCertificateValidationCallbackHandler`](#using-jaascertificatevalidationcallbackhandler) + +| |In most cases, certificate authentication should be preceded by certificate validation, since you want to authenticate against only valid certificates. Invalid certificates, such as certificates for which the expiration date has passed or which are not in your store of trusted certificates, should be ignored.

    In Spring-WS terms, this means that the `SpringCertificateValidationCallbackHandler` or `JaasCertificateValidationCallbackHandler` should be preceded by `KeyStoreCallbackHandler`. This can be accomplished by setting the order of the `callbackHandlers` property in the configuration of the `XwsSecurityInterceptor`:

    ```
    class="org.springframework.ws.soap.security.xwss.XwsSecurityInterceptor">








    ```

    Using this setup, the interceptor first determines if the certificate in the message is valid buusing the keystore and then authenticating against it.| +|---|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +###### Using `KeyStoreCallbackHandler` + +The `KeyStoreCallbackHandler` uses a standard Java keystore to validate certificates. This certificate validation process consists of the following steps: . + +1. The handler checks whether the certificate is in the private `keyStore`. If it is, it is valid. + +2. If the certificate is not in the private keystore, the handler checks whether the current date and time are within the validity period given in the certificate. If they are not, the certificate is invalid. If it is, it continues with the final step. + +3. A certification path for the certificate is created. This basically means that the handler determines whether the certificate has been issued by any of the certificate authorities in the `trustStore`. If a certification path can be built successfully, the certificate is valid. Otherwise, the certificate is not valid. + +To use the `KeyStoreCallbackHandler` for certificate validation purposes, you most likely need to set only the `trustStore` property: + +``` + + + + + + + + + + +``` + +Using the setup shown in the preceding example, the certificate that is to be validated must be in the trust store itself or the trust store must contain a certificate authority that issued the certificate. + +###### Using `SpringCertificateValidationCallbackHandler` + +The `SpringCertificateValidationCallbackHandler` requires an Spring Security `AuthenticationManager` to operate. It uses this manager to authenticate against a `X509AuthenticationToken` that it creates. The configured authentication manager is expected to supply a provider that can handle this token (usually an instance of `X509AuthenticationProvider`). If authentication is successful, the token is stored in the `SecurityContextHolder`. You can set the authentication manager by using the `authenticationManager` property: + +``` + + + + + + + + + + + + + + + + + + + ... + +``` + +In this case, we use a custom user details service to obtain authentication details based on the certificate. See the [Spring Security reference documentation](http://www.springframework.org/security) for more information about authentication against X509 certificates. + +###### Using `JaasCertificateValidationCallbackHandler` + +The `JaasCertificateValidationCallbackHandler` requires a `loginContextName` to operate. It creates a new JAAS `LoginContext` by using this name and the `X500Principal` of the certificate. This means that this callback handler integrates with any JAAS `LoginModule` that handles X500 principals. + +You can wire up a `JaasCertificateValidationCallbackHandler` as follows: + +``` + + MyLoginModule + +``` + +In this case, the callback handler uses the `LoginContext` named `MyLoginModule`. This module should be defined in your `jaas.config` file and should be able to authenticate against X500 principals. + +#### 7.1.3. Digital Signatures + +The digital signature of a message is a piece of information based on both the document and the signer’s private key. Two main tasks are related to signatures in WS-Security: verifying signatures and signing messages. + +##### Verifying Signatures + +As with [certificate-based authentication](#security-certificate-authentication), a signed message contains a `BinarySecurityToken`, which contains the certificate used to sign the message. Additionally, it contains a `SignedInfo` block, which indicates what part of the message was signed. + +To make sure that all incoming SOAP messages carry a `BinarySecurityToken`, the security policy file should contain a `RequireSignature` element. It can also contain a `SignatureTarget` element, which specifies the target message part that was expected to be signed and various other subelements. You can also define the private key alias to use, whether to use a symmetric instead of a private key, and many other properties. You can find a reference of possible child elements [here](http://java.sun.com/webservices/docs/1.6/tutorial/doc/XWS-SecurityIntro4.html#wp565769). The following listing configures a `RequireSignature` element: + +``` + + + +``` + +If the signature is not present, the `XwsSecurityInterceptor` returns a SOAP fault to the sender. If it is present, it fires a `SignatureVerificationKeyCallback` to the registered handlers. Within Spring-WS, one class handles this particular callback: `KeyStoreCallbackHandler`. + +###### Using `KeyStoreCallbackHandler` + +As described in [KeyStoreCallbackHandler](#security-key-store-callback-handler), `KeyStoreCallbackHandler` uses a `java.security.KeyStore` for handling various cryptographic callbacks, including signature verification. For signature verification, the handler uses the `trustStore` property: + +``` + + + + + + + + + + +``` + +##### Signing Messages + +When signing a message, the `XwsSecurityInterceptor` adds the `BinarySecurityToken` to the message. It also adds a `SignedInfo` block, which indicates what part of the message was signed. + +To sign all outgoing SOAP messages, the security policy file should contain a `Sign` element. It can also contain a `SignatureTarget` element, which specifies the target message part that was expected to be signed and various other subelements. You can also define the private key alias to use, whether to use a symmetric instead of a private key, and many other properties. You can find a reference of possible child elements [here](http://java.sun.com/webservices/docs/1.6/tutorial/doc/XWS-SecurityIntro4.html#wp565497). The following example includes a `Sign` element: + +``` + + + +``` + +The `XwsSecurityInterceptor` fires a `SignatureKeyCallback` to the registered handlers. Within Spring-WS, the `KeyStoreCallbackHandler` class handles this particular callback. + +###### Using `KeyStoreCallbackHandler` + +As described in [KeyStoreCallbackHandler](#security-key-store-callback-handler), the `KeyStoreCallbackHandler` uses a `java.security.KeyStore` to handle various cryptographic callbacks, including signing messages. For adding signatures, the handler uses the `keyStore` property. Additionally, you must set the `privateKeyPassword` property to unlock the private key used for signing. The following example uses a `KeyStoreCallbackHandler`: + +``` + + + + + + + + + + + +``` + +#### 7.1.4. Decryption and Encryption + +When encrypting, the message is transformed into a form that can be read only with the appropriate key. The message can be decrypted to reveal the original, readable message. + +##### Decryption + +To decrypt incoming SOAP messages, the security policy file should contain a `RequireEncryption` element. This element can further carry a `EncryptionTarget` element that indicates which part of the message should be encrypted and a `SymmetricKey` to indicate that a shared secret instead of the regular private key should be used to decrypt the message. You can read a description of the other elements [here](http://java.sun.com/webservices/docs/1.6/tutorial/doc/XWS-SecurityIntro4.html#wp565951). The following example uses a `RequireEncryption` element: + +``` + + + +``` + +If an incoming message is not encrypted, the `XwsSecurityInterceptor` returns a SOAP ault to the sender. If it is present, it fires a `DecryptionKeyCallback` to the registered handlers. Within Spring-WS, the `KeyStoreCallbackHandler` class handles this particular callback. + +###### Using `KeyStoreCallbackHandler` + +As described in [KeyStoreCallbackHandler](#security-key-store-callback-handler), the `KeyStoreCallbackHandler` uses a `java.security.KeyStore` to handle various cryptographic callbacks, including decryption. For decryption, the handler uses the `keyStore` property. Additionally, you must set the `privateKeyPassword` property to unlock the private key used for decryption. For decryption based on symmetric keys, it uses the `symmetricStore`. The following example uses `KeyStoreCallbackHandler`: + +``` + + + + + + + + + + + +``` + +##### Encryption + +To encrypt outgoing SOAP messages, the security policy file should contain an `Encrypt` element. This element can further carry a `EncryptionTarget` element that indicates which part of the message should be encrypted and a `SymmetricKey` to indicate that a shared secret instead of the regular public key should be used to encrypt the message. You can read a description of the other elements [here](http://java.sun.com/webservices/docs/1.6/tutorial/doc/XWS-SecurityIntro4.html#wp565951). The following example uses an `Encrypt` element: + +``` + + + +``` + +The `XwsSecurityInterceptor` fires an `EncryptionKeyCallback` to the registered handlers to retrieve the encryption information. Within Spring-WS, the `KeyStoreCallbackHandler` class handles this particular callback. + +###### Using `KeyStoreCallbackHandler` + +As described in [KeyStoreCallbackHandler](#security-key-store-callback-handler), the `KeyStoreCallbackHandler` uses a `java.security.KeyStore` to handle various cryptographic callbacks, including encryption. For encryption based on public keys, the handler uses the `trustStore` property. For encryption based on symmetric keys, it uses `symmetricStore`. The following example uses `KeyStoreCallbackHandler`: + +``` + + + + + + + + + + +``` + +#### 7.1.5. Security Exception Handling + +When a securement or validation action fails, the `XwsSecurityInterceptor` throws a `WsSecuritySecurementException` or `WsSecurityValidationException` respectively. These exceptions bypass the [standard exception handling mechanism](#server-endpoint-exception-resolver) but are handled by the interceptor itself. + +`WsSecuritySecurementException` exceptions are handled by the `handleSecurementException` method of the `XwsSecurityInterceptor`. By default, this method logs an error and stops further processing of the message. + +Similarly, `WsSecurityValidationException` exceptions are handled by the `handleValidationException` method of the `XwsSecurityInterceptor`. By default, this method creates a SOAP 1.1 Client or SOAP 1.2 sender fault and sends that back as a response. + +| |Both `handleSecurementException` and `handleValidationException` are protected methods, which you can override to change their default behavior.| +|---|------------------------------------------------------------------------------------------------------------------------------------------------| + +### 7.2. Using `Wss4jSecurityInterceptor` + +The `Wss4jSecurityInterceptor` is an `EndpointInterceptor` (see [Intercepting Requests — the `EndpointInterceptor` Interface](#server-endpoint-interceptor)) that is based on [Apache’s WSS4J](https://ws.apache.org/wss4j/). + +WSS4J implements the following standards: + +* OASIS Web Services Security: SOAP Message Security 1.0 Standard 200401, March 2004 + +* Username Token profile V1.0 + +* X.509 Token Profile V1.0 + +This interceptor supports messages created by the `AxiomSoapMessageFactory` and the `SaajSoapMessageFactory`. + +#### 7.2.1. Configuring `Wss4jSecurityInterceptor` + +WSS4J uses no external configuration file. The interceptor is entirely configured by properties. The validation and securement actions invoked by this interceptor are specified via `validationActions` and `securementActions` properties, respectively. Actions are passed as a space-separated strings. The following listing shows an example configuration: + +``` + + + ... + + ... + +``` + +The following table shows the available validation actions: + +|Validation action| Description | +|-----------------|------------------------| +| `UsernameToken` |Validates username token| +| `Timestamp` |Validates the timestamp | +| `Encrypt` | Decrypts the message | +| `Signature` |Validates the signature | +| `NoSecurity` | No action performed | + +The following table shows the available securement actions: + +| Securement action | Description | +|------------------------|---------------------------------------------------------------| +| `UsernameToken` | Adds a username token | +|`UsernameTokenSignature`|Adds a username token and a signature username token secret key| +| `Timestamp` | Adds a timestamp | +| `Encrypt` | Encrypts the response | +| `Signature` | Signs the response | +| `NoSecurity` | No action performed | + +The order of the actions is significant and is enforced by the interceptor. If its security actions were performed in a different order than the one specified by`validationActions`, the interceptor rejects an incoming SOAP message. + +#### 7.2.2. Handling Digital Certificates + +For cryptographic operations that require interaction with a keystore or certificate handling (signature, encryption, and decryption operations), WSS4J requires an instance of`org.apache.ws.security.components.crypto.Crypto`. + +`Crypto` instances can be obtained from WSS4J’s `CryptoFactory` or more conveniently with the Spring-WS`CryptoFactoryBean`. + +##### CryptoFactoryBean + +Spring-WS provides a convenient factory bean, `CryptoFactoryBean`, that constructs and configures `Crypto` instances through strongly typed properties (preferred) or through a `Properties` object. + +By default, `CryptoFactoryBean` returns instances of `org.apache.ws.security.components.crypto.Merlin`. You can change this by setting the `cryptoProvider` property (or its equivalent `org.apache.ws.security.crypto.provider` string property). + +The following example configuration uses `CryptoFactoryBean`: + +``` + + + + +``` + +#### 7.2.3. Authentication + +This section addresses how to do authentication with `Wss4jSecurityInterceptor`. + +##### Validating Username Token + +Spring-WS provides a set of callback handlers to integrate with Spring Security. Additionally, a simple callback handler, `SimplePasswordValidationCallbackHandler`, is provided to configure users and passwords with an in-memory `Properties` object. + +Callback handlers are configured through the `validationCallbackHandler` of the `Wss4jSecurityInterceptor` property. + +###### Using `SimplePasswordValidationCallbackHandler` + +`SimplePasswordValidationCallbackHandler` validates plain text and digest username tokens against an in-memory `Properties` object. You can configure it as follows: + +``` + + + + Ernie + + + +``` + +###### Using `SpringSecurityPasswordValidationCallbackHandler` + +The `SpringSecurityPasswordValidationCallbackHandler` validates plain text and digest passwords by using a Spring Security `UserDetailService` to operate. It uses this service to retrieve the the password (or a digest of the password) of the user specified in the token. The password (or a digest of the password) contained in this details object is then compared with the digest in the message. If they are equal, the user has successfully authenticated, and a `UsernamePasswordAuthenticationToken` is stored in the`SecurityContextHolder`. You can set the service by using the `userDetailsService`. Additionally, you can set a `userCache` property, to cache loaded user details, as follows: + +``` + + + + + + + ... + +``` + +##### Adding Username Token + +Adding a username token to an outgoing message is as simple as adding `UsernameToken` to the `securementActions` property of the `Wss4jSecurityInterceptor` and specifying `securementUsername` and`securementPassword`. + +The password type can be set by setting the `securementPasswordType` property. Possible values are `PasswordText` for plain text passwords or `PasswordDigest` for digest passwords, which is the default. + +The following example generates a username token with a digest password: + +``` + + + + + +``` + +If the plain text password type is chosen, it is possible to instruct the interceptor to add `Nonce` and `Created` elements by setting the `securementUsernameTokenElements` property. The value must be a list that contains the desired elements' names separated by spaces (case sensitive). + +The following example generates a username token with a plain text password, a `Nonce`, and a `Created` element: + +``` + + + + + + + +``` + +##### Certificate Authentication + +As certificate authentication is akin to digital signatures, WSS4J handles it as part of the signature validation and securement. Specifically, the `securementSignatureKeyIdentifier` property must be set to `DirectReference` in order to instruct WSS4J to generate a `BinarySecurityToken` element containing the X509 certificate and to include it in the outgoing message. The certificate’s name and password are passed through the `securementUsername` and `securementPassword` properties, respectively, as the following example shows: + +``` + + + + + + + + + + + + +``` + +For the certificate validation, regular signature validation applies: + +``` + + + + + + + + + +``` + +At the end of the validation, the interceptor automatically verifies the validity of the certificate by delegating to the default WSS4J implementation. If needed, you can change this behavior by redefining the `verifyCertificateTrust` method. + +For more detail, see to [Digital Signatures](#security-wss4j-digital-signatures). + +#### 7.2.4. Security Timestamps + +This section describes the various timestamp options available in the `Wss4jSecurityInterceptor`. + +##### Validating Timestamps + +To validate timestamps, add `Timestamp` to the `validationActions` property. You can override timestamp semantics specified by the initiator of the SOAP message by setting `timestampStrict` to `true` and specifying a server-side time-to-live in seconds (default: 300) by setting the `timeToLive` property. The interceptor always rejects already expired timestamps, whatever the value of `timeToLive` is. + +In the following example, the interceptor limits the timestamp validity window to 10 seconds, rejecting any valid timestamp token outside that window: + +``` + + + + + +``` + +##### Adding Timestamps + +Adding `Timestamp` to the `securementActions` property generates a timestamp header in outgoing messages. The `timestampPrecisionInMilliseconds` property specifies whether the precision of the generated timestamp is in milliseconds. The default value is `true`. The following listing adds a timestamp: + +``` + + + + +``` + +#### 7.2.5. Digital Signatures + +This section describes the various signature options available in the `Wss4jSecurityInterceptor`. + +##### Verifying Signatures + +To instruct the `Wss4jSecurityInterceptor`, `validationActions` must contain the `Signature` action. Additionally, the `validationSignatureCrypto` property must point to the keystore containing the public certificates of the initiator: + +``` + + + + + + + + + +``` + +##### Signing Messages + +Signing outgoing messages is enabled by adding the `Signature` action to the `securementActions`. The alias and the password of the private key to use are specified by the `securementUsername` and `securementPassword` properties, respectively. `securementSignatureCrypto` must point to the keystore that contains the private key: + +``` + + + + + + + + + + + +``` + +Furthermore, you can define the signature algorithm by setting the `securementSignatureAlgorithm` property. + +You can customize the key identifier type to use by setting the `securementSignatureKeyIdentifier` property. Only `IssuerSerial` and `DirectReference` are valid for the signature. + +The `securementSignatureParts` property controls which part of the message is signed. The value of this property is a list of semicolon-separated element names that identify the elements to sign. The general form of a signature part is `{}{namespace}Element`. Note that the first empty brackets are used for encryption parts only. The default behavior is to sign the SOAP body. + +The following example shows how to sign the `echoResponse` element in the Spring Web Services echo sample: + +``` + +``` + +To specify an element without a namespace, use the string, `Null` (case sensitive), as the namespace name. + +If no other element in the request has a local name of `Body`, the SOAP namespace identifier can be empty (`{}`). + +##### Signature Confirmation + +Signature confirmation is enabled by setting `enableSignatureConfirmation` to `true`. Note that the signature confirmation action spans over the request and the response. This implies that `secureResponse` and `validateRequest` must be set to `true` (which is the default value) even if there are no corresponding security actions. The following example sets the `enableSignatureConfirmation` property to `true`: + +``` + + + + + + + + + + +``` + +#### 7.2.6. Decryption and Encryption + +This section describes the various decryption and encryption options available in the `Wss4jSecurityInterceptor`. + +##### Decryption + +Decryption of incoming SOAP messages requires that the `Encrypt` action be added to the `validationActions` property. The rest of the configuration depends on the key information that appears in the message. (This is because WSS4J needs only a Crypto for encypted keys, whereas embedded key name validation is delegated to a callback handler.) + +To decrypt messages with an embedded encrypted symmetric key (the `xenc:EncryptedKey` element), `validationDecryptionCrypto` needs to point to a keystore that contains the decryption private key. Additionally, `validationCallbackHandler` has to be injected with a `org.springframework.ws.soap.security.wss4j.callback.KeyStoreCallbackHandler` that specifies the key’s password: + +``` + + + + + + + + + + + + + + +``` + +To support decryption of messages with an embedded key name ( `ds:KeyName` element), you can configure a `KeyStoreCallbackHandler` that points to the keystore with the symmetric secret key. The `symmetricKeyPassword` property indicates the key’s password, the key name being the one specified by `ds:KeyName` element: + +``` + + + + + + + + + + + + + + + +``` + +##### Encryption + +Adding `Encrypt` to the `securementActions` enables encryption of outgoing messages. You can set the certificate’s alias to use for the encryption by setting the `securementEncryptionUser` property. The keystore where the certificate resides is accessed through the `securementEncryptionCrypto` property. As encryption relies on public certificates, no password needs to be passed. The following example uses the `securementEncryptionCrypto` property: + +``` + + + + + + + + + + +``` + +You can customize encryption in several ways: The key identifier type to use is defined by the `securementEncryptionKeyIdentifier` property. Possible values are `IssuerSerial`,`X509KeyIdentifier`, `DirectReference`,`Thumbprint`, `SKIKeyIdentifier`, and `EmbeddedKeyName`. + +If you choose the `EmbeddedKeyName` type, you need to specify the secret key to use for the encryption. The alias of the key is set in the `securementEncryptionUser` property, as for the other key identifier types. However, WSS4J requires a callback handler to fetch the secret key. Thus, you must provide `securementCallbackHandler` with a `KeyStoreCallbackHandler` that points to the appropriate keystore. By default, the `ds:KeyName` element in the resulting WS-Security header takes the value of the `securementEncryptionUser` property. To indicate a different name, you can set the `securementEncryptionEmbeddedKeyName` with the desired value. In the next example, the outgoing message is encrypted with a key aliased `secretKey`, whereas `myKey` appears in `ds:KeyName` element: + +``` + + + + + + + + + + + + + + + + + + +``` + +The `securementEncryptionKeyTransportAlgorithm` property defines which algorithm to use to encrypt the generated symmetric key. Supported values are `[http://www.w3.org/2001/04/xmlenc#rsa-1_5](https://www.w3.org/2001/04/xmlenc#rsa-1_5)`, which is the default, and `[http://www.w3.org/2001/04/xmlenc#rsa-oaep-mgf1p](https://www.w3.org/2001/04/xmlenc#rsa-oaep-mgf1p)`. + +You can set the symmetric encryption algorithm to use by setting the `securementEncryptionSymAlgorithm` property. Supported values are `[http://www.w3.org/2001/04/xmlenc#aes128-cbc](https://www.w3.org/2001/04/xmlenc#aes128-cbc)` (default), `[http://www.w3.org/2001/04/xmlenc#tripledes-cbc](https://www.w3.org/2001/04/xmlenc#tripledes-cbc)`, `[http://www.w3.org/2001/04/xmlenc#aes256-cbc](https://www.w3.org/2001/04/xmlenc#aes256-cbc)`, and `[http://www.w3.org/2001/04/xmlenc#aes192-cbc](https://www.w3.org/2001/04/xmlenc#aes192-cbc)`. + +Finally, the `securementEncryptionParts` property defines which parts of the message are encrypted. The value of this property is a list of semicolon-separated element names that identify the elements to encrypt. An encryption mode specifier and a namespace identification, each inside a pair of curly brackets, may precede each element name. The encryption mode specifier is either `{Content}` or `{Element}` See the W3C XML Encryption specification about the differences between Element and Content encryption. The following example identifies the `echoResponse` from the echo sample: + +``` + +``` + +Be aware that the element name, the namespace identifier, and the encryption modifier are case-sensitive. You can omit the encryption modifier and the namespace identifier. If you do, the encryption mode defaults to `Content`, and the namespace is set to the SOAP namespace. + +To specify an element without a namespace, use the value, `Null` (case sensitive), as the namespace name. If no list is specified, the handler encrypts the SOAP Body in `Content` mode by default. + +#### 7.2.7. Security Exception Handling + +The exception handling of the `Wss4jSecurityInterceptor` is identical to that of the `XwsSecurityInterceptor`. See [Security Exception Handling](#security-xws-exception-handling) for more information. + +# III. Other Resources + +In addition to this reference documentation, a number of other resources may help you learn how to use Spring Web Services. These additional, third-party resources are enumerated in this section. + +## Bibliography + +* [waldo-94] Jim Waldo, Ann Wollrath, and Sam Kendall. *A Note on Distributed Computing*. Springer Verlag. 1994 + +* [alpine] Steve Loughran & Edmund Smith. *Rethinking the Java SOAP Stack*. May 17, 2005. © 2005 IEEE Telephone Laboratories, Inc. + +* [effective-enterprise-java] Ted Neward. Scott Meyers. *Effective Enterprise Java*. Addison-Wesley. 2004 + +* [effective-xml] Elliotte Rusty Harold. Scott Meyers. *Effective XML*. Addison-Wesley. 2004 \ No newline at end of file diff --git a/docs/en/spring/README.md b/docs/en/spring/README.md new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/docs/en/spring/consuming-rest.md b/docs/en/spring/consuming-rest.md new file mode 100644 index 0000000000000000000000000000000000000000..9bc25ad24d81eb533ff953a07f8f99d252a6e3e5 --- /dev/null +++ b/docs/en/spring/consuming-rest.md @@ -0,0 +1,261 @@ +# Consuming a RESTful Web Service + +This guide walks you through the process of creating an application that consumes a RESTful web service. + +## What You Will Build + +You will build an application that uses Spring’s `RestTemplate` to retrieve a random Spring Boot quotation at https://quoters.apps.pcfone.io/api/random. + +## What You Need + +- About 15 minutes +- A favorite text editor or IDE +- [JDK 1.8](http://www.oracle.com/technetwork/java/javase/downloads/index.html) or later +- [Gradle 4+](http://www.gradle.org/downloads) or [Maven 3.2+](https://maven.apache.org/download.cgi) +- You can also import the code straight into your IDE: + - [Spring Tool Suite (STS)](https://spring.io/guides/gs/sts) + - [IntelliJ IDEA](https://spring.io/guides/gs/intellij-idea/) + +## How to complete this guide + +Like most Spring [Getting Started guides](https://spring.io/guides), you can start from scratch and complete each step or you can bypass basic setup steps that are already familiar to you. Either way, you end up with working code. + +To **start from scratch**, move on to [Starting with Spring Initializr](https://spring.io/guides/gs/consuming-rest/#scratch). + +To **skip the basics**, do the following: + +- [Download](https://github.com/spring-guides/gs-consuming-rest/archive/main.zip) and unzip the source repository for this guide, or clone it using [Git](https://spring.io/understanding/Git): `git clone https://github.com/spring-guides/gs-consuming-rest.git` +- cd into `gs-consuming-rest/initial` +- Jump ahead to [Fetching a REST Resource](https://spring.io/guides/gs/consuming-rest/#initial). + +**When you finish**, you can check your results against the code in `gs-consuming-rest/complete`. + +## Starting with Spring Initializr + +You can use this [pre-initialized project](https://start.spring.io/#!type=maven-project&language=java&platformVersion=2.5.5&packaging=jar&jvmVersion=11&groupId=com.example&artifactId=consuming-rest&name=consuming-rest&description=Demo project for Spring Boot&packageName=com.example.consuming-rest&dependencies=web) and click Generate to download a ZIP file. This project is configured to fit the examples in this tutorial. + +To manually initialize the project: + +1. Navigate to [https://start.spring.io](https://start.spring.io/). This service pulls in all the dependencies you need for an application and does most of the setup for you. +2. Choose either Gradle or Maven and the language you want to use. This guide assumes that you chose Java. +3. Click **Dependencies** and select **Spring Web**. +4. Click **Generate**. +5. Download the resulting ZIP file, which is an archive of a web application that is configured with your choices. + +If your IDE has the Spring Initializr integration, you can complete this process from your IDE. + +You can also fork the project from Github and open it in your IDE or other editor. + +## Fetching a REST Resource + +With project setup complete, you can create a simple application that consumes a RESTful service. + +A RESTful service has been stood up at https://quoters.apps.pcfone.io/api/random. It randomly fetches quotations about Spring Boot and returns them as JSON documents. + +If you request that URL through a web browser or curl, you receive a JSON document that looks something like this: + +``` +{ + type: "success", + value: { + id: 10, + quote: "Really loving Spring Boot, makes stand alone Spring apps easy." + } +} +``` + +That is easy enough but not terribly useful when fetched through a browser or through curl. + +A more useful way to consume a REST web service is programmatically. To help you with that task, Spring provides a convenient template class called [`RestTemplate`](https://docs.spring.io/spring/docs/current/javadoc-api/org/springframework/web/client/RestTemplate.html). `RestTemplate` makes interacting with most RESTful services a one-line incantation. And it can even bind that data to custom domain types. + +First, you need to create a domain class to contain the data that you need. The following listing shows the `Quote` class, which you can use as your domain class: + +``` +src/main/java/com/example/consumingrest/Quote.java +``` + +``` +package com.example.consumingrest; + +import com.fasterxml.jackson.annotation.JsonIgnoreProperties; + +@JsonIgnoreProperties(ignoreUnknown = true) +public class Quote { + + private String type; + private Value value; + + public Quote() { + } + + public String getType() { + return type; + } + + public void setType(String type) { + this.type = type; + } + + public Value getValue() { + return value; + } + + public void setValue(Value value) { + this.value = value; + } + + @Override + public String toString() { + return "Quote{" + + "type='" + type + '\'' + + ", value=" + value + + '}'; + } +} +``` + +This simple Java class has a handful of properties and matching getter methods. It is annotated with `@JsonIgnoreProperties` from the Jackson JSON processing library to indicate that any properties not bound in this type should be ignored. + +To directly bind your data to your custom types, you need to specify the variable name to be exactly the same as the key in the JSON document returned from the API. In case your variable name and key in JSON doc do not match, you can use `@JsonProperty` annotation to specify the exact key of the JSON document. (This example matches each variable name to a JSON key, so you do not need that annotation here.) + +You also need an additional class, to embed the inner quotation itself. The `Value` class fills that need and is shown in the following listing (at `src/main/java/com/example/consumingrest/Value.java`): + +``` +package com.example.consumingrest; + +import com.fasterxml.jackson.annotation.JsonIgnoreProperties; + +@JsonIgnoreProperties(ignoreUnknown = true) +public class Value { + + private Long id; + private String quote; + + public Value() { + } + + public Long getId() { + return this.id; + } + + public String getQuote() { + return this.quote; + } + + public void setId(Long id) { + this.id = id; + } + + public void setQuote(String quote) { + this.quote = quote; + } + + @Override + public String toString() { + return "Value{" + + "id=" + id + + ", quote='" + quote + '\'' + + '}'; + } +} +``` + +This uses the same annotations but maps onto other data fields. + +## Finishing the Application + +The Initalizr creates a class with a `main()` method. The following listing shows the class the Initializr creates (at `src/main/java/com/example/consumingrest/ConsumingRestApplication.java`): + +``` +package com.example.consumingrest; + +import org.springframework.boot.SpringApplication; +import org.springframework.boot.autoconfigure.SpringBootApplication; + +@SpringBootApplication +public class ConsumingRestApplication { + + public static void main(String[] args) { + SpringApplication.run(ConsumingRestApplication.class, args); + } + +} +``` + +Now you need to add a few other things to the `ConsumingRestApplication` class to get it to show quotations from our RESTful source. You need to add: + +- A logger, to send output to the log (the console, in this example). +- A `RestTemplate`, which uses the Jackson JSON processing library to process the incoming data. +- A `CommandLineRunner` that runs the `RestTemplate` (and, consequently, fetches our quotation) on startup. + +The following listing shows the finished `ConsumingRestApplication` class (at `src/main/java/com/example/consumingrest/ConsumingRestApplication.java`): + +``` +package com.example.consumingrest; + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import org.springframework.boot.CommandLineRunner; +import org.springframework.boot.SpringApplication; +import org.springframework.boot.autoconfigure.SpringBootApplication; +import org.springframework.boot.web.client.RestTemplateBuilder; +import org.springframework.context.annotation.Bean; +import org.springframework.web.client.RestTemplate; + +@SpringBootApplication +public class ConsumingRestApplication { + + private static final Logger log = LoggerFactory.getLogger(ConsumingRestApplication.class); + + public static void main(String[] args) { + SpringApplication.run(ConsumingRestApplication.class, args); + } + + @Bean + public RestTemplate restTemplate(RestTemplateBuilder builder) { + return builder.build(); + } + + @Bean + public CommandLineRunner run(RestTemplate restTemplate) throws Exception { + return args -> { + Quote quote = restTemplate.getForObject( + "https://quoters.apps.pcfone.io/api/random", Quote.class); + log.info(quote.toString()); + }; + } +} +``` + +## Running the Application + +You can run the application from the command line with Gradle or Maven. You can also build a single executable JAR file that contains all the necessary dependencies, classes, and resources and run that. Building an executable jar makes it easy to ship, version, and deploy the service as an application throughout the development lifecycle, across different environments, and so forth. + +If you use Gradle, you can run the application by using `./gradlew bootRun`. Alternatively, you can build the JAR file by using `./gradlew build` and then run the JAR file, as follows: + +``` +java -jar build/libs/gs-consuming-rest-0.1.0.jar +``` + +If you use Maven, you can run the application by using `./mvnw spring-boot:run`. Alternatively, you can build the JAR file with `./mvnw clean package` and then run the JAR file, as follows: + +``` +java -jar target/gs-consuming-rest-0.1.0.jar +``` + +The steps described here create a runnable JAR. You can also build a classic WAR file. + +You should see output similar to the following but with a random quotation: + +``` +2019-08-22 14:06:46.506 INFO 42940 --- [ main] c.e.c.ConsumingRestApplication : Quote{type='success', value=Value{id=1, quote='Working with Spring Boot is like pair-programming with the Spring developers.'}} +``` + +If you see an error that reads, `Could not extract response: no suitable HttpMessageConverter found for response type [class com.example.consumingrest.Quote]`, it is possible that you are in an environment that cannot connect to the backend service (which sends JSON if you can reach it). Maybe you are behind a corporate proxy. Try setting the `http.proxyHost` and `http.proxyPort` system properties to values appropriate for your environment. + +## Summary + +Congratulations! You have just developed a simple REST client by using Spring Boot. + +原文链接: https://spring.io/guides/gs/consuming-rest/ + diff --git a/docs/en/spring/getting-started_first-application.md b/docs/en/spring/getting-started_first-application.md new file mode 100644 index 0000000000000000000000000000000000000000..28e8188ede5a3547251cf253fe21c0772120378e --- /dev/null +++ b/docs/en/spring/getting-started_first-application.md @@ -0,0 +1,239 @@ +## Developing Your First Spring Boot Application + +This section describes how to develop a small “Hello World!” web application that highlights some of Spring Boot’s key features. We use Maven to build this project, since most IDEs support it. + +Tip: + +The [spring.io](https://spring.io/) web site contains many “Getting Started” [guides](https://spring.io/guides) that use Spring Boot. If you need to solve a specific problem, check there first. + +You can shortcut the steps below by going to [start.spring.io](https://start.spring.io/) and choosing the "Web" starter from the dependencies searcher. Doing so generates a new project structure so that you can [start coding right away](https://docs.spring.io/spring-boot/docs/2.6.4/reference/html/getting-started.html#getting-started.first-application.code). Check the [start.spring.io user guide](https://github.com/spring-io/start.spring.io/blob/main/USING.adoc) for more details. + +Before we begin, open a terminal and run the following commands to ensure that you have valid versions of Java and Maven installed: + +``` +$ java -version +java version "1.8.0_102" +Java(TM) SE Runtime Environment (build 1.8.0_102-b14) +Java HotSpot(TM) 64-Bit Server VM (build 25.102-b14, mixed mode) +``` + +``` +$ mvn -v +Apache Maven 3.5.4 (1edded0938998edf8bf061f1ceb3cfdeccf443fe; 2018-06-17T14:33:14-04:00) +Maven home: /usr/local/Cellar/maven/3.3.9/libexec +Java version: 1.8.0_102, vendor: Oracle Corporation +``` + +Note: + +This sample needs to be created in its own directory. Subsequent instructions assume that you have created a suitable directory and that it is your current directory. + +### Creating the POM + +We need to start by creating a Maven `pom.xml` file. The `pom.xml` is the recipe that is used to build your project. Open your favorite text editor and add the following: + +```xml + + + 4.0.0 + + com.example + myproject + 0.0.1-SNAPSHOT + + + org.springframework.boot + spring-boot-starter-parent + 2.6.4 + + + + + +``` + +The preceding listing should give you a working build. You can test it by running `mvn package` (for now, you can ignore the “jar will be empty - no content was marked for inclusion!” warning). + +Note: + +At this point, you could import the project into an IDE (most modern Java IDEs include built-in support for Maven). For simplicity, we continue to use a plain text editor for this example. + +### Adding Classpath Dependencies + +Spring Boot provides a number of “Starters” that let you add jars to your classpath. Our applications for smoke tests use the `spring-boot-starter-parent` in the `parent` section of the POM. The `spring-boot-starter-parent` is a special starter that provides useful Maven defaults. It also provides a [`dependency-management`](https://docs.spring.io/spring-boot/docs/2.6.4/reference/html/using.html#using.build-systems.dependency-management) section so that you can omit `version` tags for “blessed” dependencies. + +Other “Starters” provide dependencies that you are likely to need when developing a specific type of application. Since we are developing a web application, we add a `spring-boot-starter-web` dependency. Before that, we can look at what we currently have by running the following command: + +```shell +$ mvn dependency:tree + +[INFO] com.example:myproject:jar:0.0.1-SNAPSHOT +``` + +The `mvn dependency:tree` command prints a tree representation of your project dependencies. You can see that `spring-boot-starter-parent` provides no dependencies by itself. To add the necessary dependencies, edit your `pom.xml` and add the `spring-boot-starter-web` dependency immediately below the `parent` section: + +```xml + + + org.springframework.boot + spring-boot-starter-web + + +``` + +If you run `mvn dependency:tree` again, you see that there are now a number of additional dependencies, including the Tomcat web server and Spring Boot itself. + +### Writing the Code + +To finish our application, we need to create a single Java file. By default, Maven compiles sources from `src/main/java`, so you need to create that directory structure and then add a file named `src/main/java/MyApplication.java` to contain the following code: + +```java +@RestController +@EnableAutoConfiguration +public class MyApplication { + + @RequestMapping("/") + String home() { + return "Hello World!"; + } + + public static void main(String[] args) { + SpringApplication.run(MyApplication.class, args); + } + +} +``` + +Although there is not much code here, quite a lot is going on. We step through the important parts in the next few sections. + +#### The @RestController and @RequestMapping Annotations + +The first annotation on our `MyApplication` class is `@RestController`. This is known as a *stereotype* annotation. It provides hints for people reading the code and for Spring that the class plays a specific role. In this case, our class is a web `@Controller`, so Spring considers it when handling incoming web requests. + +The `@RequestMapping` annotation provides “routing” information. It tells Spring that any HTTP request with the `/` path should be mapped to the `home` method. The `@RestController` annotation tells Spring to render the resulting string directly back to the caller. + +Tip: + +The `@RestController` and `@RequestMapping` annotations are Spring MVC annotations (they are not specific to Spring Boot). See the [MVC section](https://docs.spring.io/spring-framework/docs/5.3.16/reference/html/web.html#mvc) in the Spring Reference Documentation for more details. + +#### The @EnableAutoConfiguration Annotation + +The second class-level annotation is `@EnableAutoConfiguration`. This annotation tells Spring Boot to “guess” how you want to configure Spring, based on the jar dependencies that you have added. Since `spring-boot-starter-web` added Tomcat and Spring MVC, the auto-configuration assumes that you are developing a web application and sets up Spring accordingly. + +``` +Starters and Auto-configuration + +Auto-configuration is designed to work well with “Starters”, but the two concepts are not directly tied. You are free to pick and choose jar dependencies outside of the starters. Spring Boot still does its best to auto-configure your application. +``` + +#### The “main” Method + +The final part of our application is the `main` method. This is a standard method that follows the Java convention for an application entry point. Our main method delegates to Spring Boot’s `SpringApplication` class by calling `run`. `SpringApplication` bootstraps our application, starting Spring, which, in turn, starts the auto-configured Tomcat web server. We need to pass `MyApplication.class` as an argument to the `run` method to tell `SpringApplication` which is the primary Spring component. The `args` array is also passed through to expose any command-line arguments. + +### Running the Example + +At this point, your application should work. Since you used the `spring-boot-starter-parent` POM, you have a useful `run` goal that you can use to start the application. Type `mvn spring-boot:run` from the root project directory to start the application. You should see output similar to the following: + +```shell +$ mvn spring-boot:run + + . ____ _ __ _ _ + /\\ / ___'_ __ _ _(_)_ __ __ _ \ \ \ \ +( ( )\___ | '_ | '_| | '_ \/ _` | \ \ \ \ + \\/ ___)| |_)| | | | | || (_| | ) ) ) ) + ' |____| .__|_| |_|_| |_\__, | / / / / + =========|_|==============|___/=/_/_/_/ + :: Spring Boot :: (v2.6.4) +....... . . . +....... . . . (log output here) +....... . . . +........ Started MyApplication in 2.222 seconds (JVM running for 6.514) +``` + +If you open a web browser to `localhost:8080`, you should see the following output: + +``` +Hello World! +``` + +To gracefully exit the application, press `ctrl-c`. + +### Creating an Executable Jar + +We finish our example by creating a completely self-contained executable jar file that we could run in production. Executable jars (sometimes called “fat jars”) are archives containing your compiled classes along with all of the jar dependencies that your code needs to run. + +Executable jars and Java + +Java does not provide a standard way to load nested jar files (jar files that are themselves contained within a jar). This can be problematic if you are looking to distribute a self-contained application. + +To solve this problem, many developers use “uber” jars. An uber jar packages all the classes from all the application’s dependencies into a single archive. The problem with this approach is that it becomes hard to see which libraries are in your application. It can also be problematic if the same filename is used (but with different content) in multiple jars. + +Spring Boot takes a [different approach](https://docs.spring.io/spring-boot/docs/2.6.4/reference/html/executable-jar.html#appendix.executable-jar) and lets you actually nest jars directly. + +To create an executable jar, we need to add the `spring-boot-maven-plugin` to our `pom.xml`. To do so, insert the following lines just below the `dependencies` section: + +```xml + + + + org.springframework.boot + spring-boot-maven-plugin + + + +``` + +Note: + +The `spring-boot-starter-parent` POM includes `` configuration to bind the `repackage` goal. If you do not use the parent POM, you need to declare this configuration yourself. See the [plugin documentation](https://docs.spring.io/spring-boot/docs/2.6.4/maven-plugin/reference/htmlsingle/#getting-started) for details. + +Save your `pom.xml` and run `mvn package` from the command line, as follows: + +```shell +$ mvn package + +[INFO] Scanning for projects... +[INFO] +[INFO] ------------------------------------------------------------------------ +[INFO] Building myproject 0.0.1-SNAPSHOT +[INFO] ------------------------------------------------------------------------ +[INFO] .... .. +[INFO] --- maven-jar-plugin:2.4:jar (default-jar) @ myproject --- +[INFO] Building jar: /Users/developer/example/spring-boot-example/target/myproject-0.0.1-SNAPSHOT.jar +[INFO] +[INFO] --- spring-boot-maven-plugin:2.6.4:repackage (default) @ myproject --- +[INFO] ------------------------------------------------------------------------ +[INFO] BUILD SUCCESS +[INFO] ------------------------------------------------------------------------ +``` + +If you look in the `target` directory, you should see `myproject-0.0.1-SNAPSHOT.jar`. The file should be around 10 MB in size. If you want to peek inside, you can use `jar tvf`, as follows: + +```shell +$ jar tvf target/myproject-0.0.1-SNAPSHOT.jar +``` + +You should also see a much smaller file named `myproject-0.0.1-SNAPSHOT.jar.original` in the `target` directory. This is the original jar file that Maven created before it was repackaged by Spring Boot. + +To run that application, use the `java -jar` command, as follows: + +```shell +$ java -jar target/myproject-0.0.1-SNAPSHOT.jar + + . ____ _ __ _ _ + /\\ / ___'_ __ _ _(_)_ __ __ _ \ \ \ \ +( ( )\___ | '_ | '_| | '_ \/ _` | \ \ \ \ + \\/ ___)| |_)| | | | | || (_| | ) ) ) ) + ' |____| .__|_| |_|_| |_\__, | / / / / + =========|_|==============|___/=/_/_/_/ + :: Spring Boot :: (v2.6.4) +....... . . . +....... . . . (log output here) +....... . . . +........ Started MyApplication in 2.536 seconds (JVM running for 2.864) +``` + +As before, to exit the application, press `ctrl-c`. + +原文链接: https://docs.spring.io/spring-boot/docs/2.6.4/reference/html/getting-started.html#getting-started.first-application \ No newline at end of file diff --git a/docs/en/spring/initializr.md b/docs/en/spring/initializr.md new file mode 100644 index 0000000000000000000000000000000000000000..5f6a427b7d19bca0c8603a74e52513543240d0e0 --- /dev/null +++ b/docs/en/spring/initializr.md @@ -0,0 +1,5 @@ +# Spring initializr + +![2022-03-01-18-11-18](./initializr/2022-03-01-18-11-18.png) + +原文链接: https://start.spring.io/ diff --git a/docs/en/spring/initializr/2022-03-01-18-11-18.png b/docs/en/spring/initializr/2022-03-01-18-11-18.png new file mode 100644 index 0000000000000000000000000000000000000000..fb8e05f9961e5868a61a2674607c0e2287d08ece Binary files /dev/null and b/docs/en/spring/initializr/2022-03-01-18-11-18.png differ diff --git a/docs/en/spring/initializr/2022-03-03-13-33.png b/docs/en/spring/initializr/2022-03-03-13-33.png new file mode 100644 index 0000000000000000000000000000000000000000..00ca73a5b751a556483354fb7255ea995c059d3c Binary files /dev/null and b/docs/en/spring/initializr/2022-03-03-13-33.png differ diff --git a/docs/en/spring/installing.md b/docs/en/spring/installing.md new file mode 100644 index 0000000000000000000000000000000000000000..91cb74459131782eea21e187a12aad69aaa3433d --- /dev/null +++ b/docs/en/spring/installing.md @@ -0,0 +1,182 @@ +## Installing Spring Boot + +Spring Boot can be used with “classic” Java development tools or installed as a command line tool. Either way, you need [Java SDK v1.8](https://www.java.com/) or higher. Before you begin, you should check your current Java installation by using the following command: + +``` +$ java -version +``` + +If you are new to Java development or if you want to experiment with Spring Boot, you might want to try the [Spring Boot CLI](https://docs.spring.io/spring-boot/docs/2.6.4/reference/html/getting-started.html#getting-started.installing.cli) (Command Line Interface) first. Otherwise, read on for “classic” installation instructions. + +### Installation Instructions for the Java Developer + +You can use Spring Boot in the same way as any standard Java library. To do so, include the appropriate `spring-boot-*.jar` files on your classpath. Spring Boot does not require any special tools integration, so you can use any IDE or text editor. Also, there is nothing special about a Spring Boot application, so you can run and debug a Spring Boot application as you would any other Java program. + +Although you *could* copy Spring Boot jars, we generally recommend that you use a build tool that supports dependency management (such as Maven or Gradle). + +#### Maven Installation + +Spring Boot is compatible with Apache Maven 3.3 or above. If you do not already have Maven installed, you can follow the instructions at [maven.apache.org](https://maven.apache.org/). + +Tip: + +On many operating systems, Maven can be installed with a package manager. If you use OSX Homebrew, try `brew install maven`. Ubuntu users can run `sudo apt-get install maven`. Windows users with [Chocolatey](https://chocolatey.org/) can run `choco install maven` from an elevated (administrator) prompt. + +Spring Boot dependencies use the `org.springframework.boot` `groupId`. Typically, your Maven POM file inherits from the `spring-boot-starter-parent` project and declares dependencies to one or more [“Starters”](https://docs.spring.io/spring-boot/docs/2.6.4/reference/html/using.html#using.build-systems.starters). Spring Boot also provides an optional [Maven plugin](https://docs.spring.io/spring-boot/docs/2.6.4/reference/html/build-tool-plugins.html#build-tool-plugins.maven) to create executable jars. + +More details on getting started with Spring Boot and Maven can be found in the [Getting Started section](https://docs.spring.io/spring-boot/docs/2.6.4/maven-plugin/reference/htmlsingle/#getting-started) of the Maven plugin’s reference guide. + +#### Gradle Installation + +Spring Boot is compatible with Gradle 6.8, 6.9, and 7.x. If you do not already have Gradle installed, you can follow the instructions at [gradle.org](https://gradle.org/). + +Spring Boot dependencies can be declared by using the `org.springframework.boot` `group`. Typically, your project declares dependencies to one or more [“Starters”](https://docs.spring.io/spring-boot/docs/2.6.4/reference/html/using.html#using.build-systems.starters). Spring Boot provides a useful [Gradle plugin](https://docs.spring.io/spring-boot/docs/2.6.4/reference/html/build-tool-plugins.html#build-tool-plugins.gradle) that can be used to simplify dependency declarations and to create executable jars. + +``` +Gradle Wrapper +The Gradle Wrapper provides a nice way of “obtaining” Gradle when you need to build a project. It is a small script and library that you commit alongside your code to bootstrap the build process. See docs.gradle.org/current/userguide/gradle_wrapper.html for details. +``` + +More details on getting started with Spring Boot and Gradle can be found in the [Getting Started section](https://docs.spring.io/spring-boot/docs/2.6.4/gradle-plugin/reference/htmlsingle/#getting-started) of the Gradle plugin’s reference guide. + +### Installing the Spring Boot CLI + +The Spring Boot CLI (Command Line Interface) is a command line tool that you can use to quickly prototype with Spring. It lets you run [Groovy](https://groovy-lang.org/) scripts, which means that you have a familiar Java-like syntax without so much boilerplate code. + +You do not need to use the CLI to work with Spring Boot, but it is a quick way to get a Spring application off the ground without an IDE. + +#### Manual Installation + +You can download the Spring CLI distribution from the Spring software repository: + +- [spring-boot-cli-2.6.4-bin.zip](https://repo.spring.io/release/org/springframework/boot/spring-boot-cli/2.6.4/spring-boot-cli-2.6.4-bin.zip) +- [spring-boot-cli-2.6.4-bin.tar.gz](https://repo.spring.io/release/org/springframework/boot/spring-boot-cli/2.6.4/spring-boot-cli-2.6.4-bin.tar.gz) + +Cutting edge [snapshot distributions](https://repo.spring.io/snapshot/org/springframework/boot/spring-boot-cli/) are also available. + +Once downloaded, follow the [INSTALL.txt](https://raw.githubusercontent.com/spring-projects/spring-boot/v2.6.4/spring-boot-project/spring-boot-cli/src/main/content/INSTALL.txt) instructions from the unpacked archive. In summary, there is a `spring` script (`spring.bat` for Windows) in a `bin/` directory in the `.zip` file. Alternatively, you can use `java -jar` with the `.jar` file (the script helps you to be sure that the classpath is set correctly). + +#### Installation with SDKMAN! + +SDKMAN! (The Software Development Kit Manager) can be used for managing multiple versions of various binary SDKs, including Groovy and the Spring Boot CLI. Get SDKMAN! from [sdkman.io](https://sdkman.io/) and install Spring Boot by using the following commands: + +``` +$ sdk install springboot +$ spring --version +Spring CLI v2.6.4 +``` + +If you develop features for the CLI and want access to the version you built, use the following commands: + +``` +$ sdk install springboot dev /path/to/spring-boot/spring-boot-cli/target/spring-boot-cli-2.6.4-bin/spring-2.6.4/ +$ sdk default springboot dev +$ spring --version +Spring CLI v2.6.4 +``` + +The preceding instructions install a local instance of `spring` called the `dev` instance. It points at your target build location, so every time you rebuild Spring Boot, `spring` is up-to-date. + +You can see it by running the following command: + +``` +$ sdk ls springboot + +================================================================================ +Available Springboot Versions +================================================================================ +> + dev +* 2.6.4 + +================================================================================ ++ - local version +* - installed +> - currently in use +================================================================================ +``` + +#### OSX Homebrew Installation + +If you are on a Mac and use [Homebrew](https://brew.sh/), you can install the Spring Boot CLI by using the following commands: + +``` +$ brew tap spring-io/tap +$ brew install spring-boot +``` + +Homebrew installs `spring` to `/usr/local/bin`. + +Note: + +If you do not see the formula, your installation of brew might be out-of-date. In that case, run `brew update` and try again. + +#### MacPorts Installation + +If you are on a Mac and use [MacPorts](https://www.macports.org/), you can install the Spring Boot CLI by using the following command: + +``` +$ sudo port install spring-boot-cli +``` + +#### Command-line Completion + +The Spring Boot CLI includes scripts that provide command completion for the [BASH](https://en.wikipedia.org/wiki/Bash_(Unix_shell)) and [zsh](https://en.wikipedia.org/wiki/Z_shell) shells. You can `source` the script (also named `spring`) in any shell or put it in your personal or system-wide bash completion initialization. On a Debian system, the system-wide scripts are in `/shell-completion/bash` and all scripts in that directory are executed when a new shell starts. For example, to run the script manually if you have installed by using SDKMAN!, use the following commands: + +``` +$ . ~/.sdkman/candidates/springboot/current/shell-completion/bash/spring +$ spring + grab help jar run test version +``` + +Note: + +If you install the Spring Boot CLI by using Homebrew or MacPorts, the command-line completion scripts are automatically registered with your shell. + +#### Windows Scoop Installation + +If you are on a Windows and use [Scoop](https://scoop.sh/), you can install the Spring Boot CLI by using the following commands: + +``` +> scoop bucket add extras +> scoop install springboot +``` + +Scoop installs `spring` to `~/scoop/apps/springboot/current/bin`. + +Note: + +If you do not see the app manifest, your installation of scoop might be out-of-date. In that case, run `scoop update` and try again. + +#### Quick-start Spring CLI Example + +You can use the following web application to test your installation. To start, create a file called `app.groovy`, as follows: + +``` +@RestController +class ThisWillActuallyRun { + + @RequestMapping("/") + String home() { + "Hello World!" + } + +} +``` + +Then run it from a shell, as follows: + +``` +$ spring run app.groovy +``` + +Note: + +The first run of your application is slow, as dependencies are downloaded. Subsequent runs are much quicker. + +Open `localhost:8080` in your favorite web browser. You should see the following output: + +``` +Hello World! +``` + +原文链接: https://docs.spring.io/spring-boot/docs/2.6.4/reference/html/getting-started.html#getting-started.installing diff --git a/docs/en/spring/intellij_idea.md b/docs/en/spring/intellij_idea.md new file mode 100644 index 0000000000000000000000000000000000000000..eda7c22a1a1ae14f4767ad24dccf7477dd6c4b61 --- /dev/null +++ b/docs/en/spring/intellij_idea.md @@ -0,0 +1,51 @@ +# Working a Getting Started guide with IntelliJ IDEA + +This guide walks you through using IntelliJ IDEA to build one of the Getting Started guides. + +## What you’ll build + +You’ll pick a Spring guide and import it into IntelliJ IDEA. Then you can read the guide, work on the code, and run the project. + +## What you’ll need + +- About 15 minutes +- [IntelliJ IDEA](https://www.jetbrains.com/idea/download/) +- [JDK 6](http://www.oracle.com/technetwork/java/javase/downloads/index.html) or later + +## Installing IntelliJ IDEA + +If you don’t have IntelliJ IDEA (Ultimate Edition) installed yet, visit the link up above. From there, you can download a copy for your platform. To install it simply unpack the downloaded archive. + +When you’re done, go ahead and launch IntelliJ IDEA. + +## Importing a Getting Started guide + +To import an existing project you need some code, so clone or copy one of the Getting Started guides, e.g. the [REST Service](https://spring.io/guides/gs/rest-service/) guide: + +``` +$ git clone https://github.com/spring-guides/gs-rest-service.gitCOPY +``` + +With IntelliJ IDEA up and running, click **Import Project** on the **Welcome Screen**, or **File | Open** on the main menu: + +![spring_guide_welcome_import](./intellij_idea/spring_guide_welcome_import.png) + +In the pop-up dialog make sure to select either [Maven](https://spring.io/guides/gs/maven)'s **pom.xml** or [Gradle](https://spring.io/guides/gs/gradle)'s **build.gradle** file under the **complete** folder: + +![spring_guide_select_gradle_file](./intellij_idea/spring_guide_select_gradle_file.png) + +IntelliJ IDEA will create a project with all the code from the guide ready to run. + +## Creating a Project from Scratch + +In case you’d like to start with an empty project and copy-and-paste your way through the guide, create a new **Maven** or **Gradle** project in the **Project Wizard**: + +![spring_guide_new_project](./intellij_idea/spring_guide_new_project.png) + +## See Also + +The following guide may also be helpful: + +- [Working a Getting Started guide with STS](https://spring.io/guides/gs/sts/) + +原文链接: https://spring.io/guides/gs/intellij-idea/ \ No newline at end of file diff --git a/docs/en/spring/intellij_idea/spring_guide_new_project.png b/docs/en/spring/intellij_idea/spring_guide_new_project.png new file mode 100644 index 0000000000000000000000000000000000000000..68579cd3537624d98a26d7c99681bd55729009b8 Binary files /dev/null and b/docs/en/spring/intellij_idea/spring_guide_new_project.png differ diff --git a/docs/en/spring/intellij_idea/spring_guide_select_gradle_file.png b/docs/en/spring/intellij_idea/spring_guide_select_gradle_file.png new file mode 100644 index 0000000000000000000000000000000000000000..154ca13ec624d957ff5a495cbf031aa6954e2d52 Binary files /dev/null and b/docs/en/spring/intellij_idea/spring_guide_select_gradle_file.png differ diff --git a/docs/en/spring/intellij_idea/spring_guide_welcome_import.png b/docs/en/spring/intellij_idea/spring_guide_welcome_import.png new file mode 100644 index 0000000000000000000000000000000000000000..88f5a586378ba1e3027dfb9bfaa92d9180b062e2 Binary files /dev/null and b/docs/en/spring/intellij_idea/spring_guide_welcome_import.png differ diff --git a/docs/en/spring/introducing-spring-boot.md b/docs/en/spring/introducing-spring-boot.md new file mode 100644 index 0000000000000000000000000000000000000000..c7610712a3f79cd60a28bea4e524ed8fcbc425ed --- /dev/null +++ b/docs/en/spring/introducing-spring-boot.md @@ -0,0 +1,14 @@ +## Introducing Spring Boot + +Spring Boot helps you to create stand-alone, production-grade Spring-based applications that you can run. We take an opinionated view of the Spring platform and third-party libraries, so that you can get started with minimum fuss. Most Spring Boot applications need very little Spring configuration. + +You can use Spring Boot to create Java applications that can be started by using `java -jar` or more traditional war deployments. We also provide a command line tool that runs “spring scripts”. + +Our primary goals are: + +- Provide a radically faster and widely accessible getting-started experience for all Spring development. +- Be opinionated out of the box but get out of the way quickly as requirements start to diverge from the defaults. +- Provide a range of non-functional features that are common to large classes of projects (such as embedded servers, security, metrics, health checks, and externalized configuration). +- Absolutely no code generation and no requirement for XML configuration. + +原文链接: https://docs.spring.io/spring-boot/docs/2.6.4/reference/html/getting-started.html#getting-started.introducing-spring-boot \ No newline at end of file diff --git a/docs/en/spring/quickstart.md b/docs/en/spring/quickstart.md new file mode 100644 index 0000000000000000000000000000000000000000..26bfd1064d1d71f84bc9795b60acd60db2b2758a --- /dev/null +++ b/docs/en/spring/quickstart.md @@ -0,0 +1,86 @@ +# Spring Quickstart Guide + +## What you'll build + +You will build a classic “Hello World!” endpoint which any browser can connect to. You can even tell it your name, and it will respond in a more friendly way. + +## What you’ll need + +**An Integrated Developer Environment (IDE)** + +Popular choices include [IntelliJ IDEA](https://www.jetbrains.com/idea/), [Spring Tools](https://spring.io/tools), [Visual Studio Code](https://code.visualstudio.com/docs/languages/java), or [Eclipse](https://www.eclipse.org/downloads/packages/), and many more. + +**A Java™ Development Kit (JDK)** + +We recommend [BellSoft Liberica JDK](https://bell-sw.com/) version 8 or version 11. + +## Step 1: Start a new Spring Boot project + +Use [start.spring.io](https://start.spring.io/) to create a “web” project. In the “Dependencies” dialog search for and add the “web” dependency as shown in the screenshot. Hit the “Generate” button, download the zip, and unpack it into a folder on your computer. + +![quick-img-1-12bfde9c5c280b1940d85dee3d81772d](./quickstart_img/quick-img-1-12bfde9c5c280b1940d85dee3d81772d.png) + +Projects created by [start.spring.io](https://start.spring.io/) contain [Spring Boot](https://spring.io/projects/spring-boot), a framework that makes Spring ready to work inside your app, but without much code or configuration required. Spring Boot is the quickest and most popular way to start Spring projects. + +## Step 2: Add your code + +Open up the project in your IDE and locate the `DemoApplication.java` file in the `src/main/java/com/example/demo` folder. Now change the contents of the file by adding the extra method and annotations shown in the code below. You can copy and paste the code or just type it. + +``` + + package com.example.demo; + import org.springframework.boot.SpringApplication; + import org.springframework.boot.autoconfigure.SpringBootApplication; + import org.springframework.web.bind.annotation.GetMapping; + import org.springframework.web.bind.annotation.RequestParam; + import org.springframework.web.bind.annotation.RestController; + + @SpringBootApplication + @RestController + public class DemoApplication { + + + public static void main(String[] args) { + SpringApplication.run(DemoApplication.class, args); + } + + @GetMapping("/hello") + public String hello(@RequestParam(value = "name", defaultValue = "World") String name) { + return String.format("Hello %s!", name); + } + + } + +``` + +*This is all the code required to create a simple “Hello World” web service in Spring Boot.* + +The `hello()` method we’ve added is designed to take a String parameter called `name`, and then combine this parameter with the word `"Hello"` in the code. This means that if you set your name to `“Amy”` in the request, the response would be `“Hello Amy”`. + +The `@RestController` annotation tells Spring that this code describes an endpoint that should be made available over the web. The `@GetMapping(“/hello”)` tells Spring to use our `hello()` method to answer requests that get sent to the `http://localhost:8080/hello` address. Finally, the `@RequestParam` is telling Spring to expect a `name` value in the request, but if it’s not there, it will use the word “World” by default. + +## Step 3: Try it + +Let’s build and run the program. Open a command line (or terminal) and navigate to the folder where you have the project files. We can build and run the application by issuing the following command: + +**MacOS/Linux:** + +``` +./mvnw spring-boot:run +``` + +**Windows:** + +``` +mvnw spring-boot:run +``` + +You should see some output that looks very similar to this: + +![quick-img2-ac5ae88c60ffaa062234a580f9f1abc3](./quickstart_img/quick-img2-ac5ae88c60ffaa062234a580f9f1abc3.png) + +The last couple of lines here tell us that Spring has started. Spring Boot’s embedded Apache Tomcat server is acting as a webserver and is listening for requests on `localhost` port `8080`. Open your browser and in the address bar at the top enter http://localhost:8080/hello. You should get a nice friendly response like this: + +![quick-img3-afa0a1fe446db8e3c8c7a8d9ca532d23](./quickstart_img/quick-img3-afa0a1fe446db8e3c8c7a8d9ca532d23.png) + +原文链接: https://spring.io/quickstart \ No newline at end of file diff --git a/docs/en/spring/quickstart_img/quick-img-1-12bfde9c5c280b1940d85dee3d81772d.png b/docs/en/spring/quickstart_img/quick-img-1-12bfde9c5c280b1940d85dee3d81772d.png new file mode 100644 index 0000000000000000000000000000000000000000..6528d8fdf980b67ad6ef2e14b456e7c6c786a7b3 Binary files /dev/null and b/docs/en/spring/quickstart_img/quick-img-1-12bfde9c5c280b1940d85dee3d81772d.png differ diff --git a/docs/en/spring/quickstart_img/quick-img2-ac5ae88c60ffaa062234a580f9f1abc3.png b/docs/en/spring/quickstart_img/quick-img2-ac5ae88c60ffaa062234a580f9f1abc3.png new file mode 100644 index 0000000000000000000000000000000000000000..deb5d5cee8bbbf279177a93a6d71d2e6600dc217 Binary files /dev/null and b/docs/en/spring/quickstart_img/quick-img2-ac5ae88c60ffaa062234a580f9f1abc3.png differ diff --git a/docs/en/spring/quickstart_img/quick-img3-afa0a1fe446db8e3c8c7a8d9ca532d23.png b/docs/en/spring/quickstart_img/quick-img3-afa0a1fe446db8e3c8c7a8d9ca532d23.png new file mode 100644 index 0000000000000000000000000000000000000000..a08c827e6eeaec481b25e26f9e376daeeb56d846 Binary files /dev/null and b/docs/en/spring/quickstart_img/quick-img3-afa0a1fe446db8e3c8c7a8d9ca532d23.png differ diff --git a/docs/en/spring/rest-service.md b/docs/en/spring/rest-service.md new file mode 100644 index 0000000000000000000000000000000000000000..504e967c8f186af0f6a0f4857c4e2e8b22f64fa4 --- /dev/null +++ b/docs/en/spring/rest-service.md @@ -0,0 +1,207 @@ +# Building a RESTful Web Service + +This guide walks you through the process of creating a “Hello, World” RESTful web service with Spring. + +## What You Will Build + +You will build a service that will accept HTTP GET requests at `http://localhost:8080/greeting`. + +It will respond with a JSON representation of a greeting, as the following listing shows: + +``` +{"id":1,"content":"Hello, World!"} +``` + +You can customize the greeting with an optional `name` parameter in the query string, as the following listing shows: + +``` +http://localhost:8080/greeting?name=User +``` + +The `name` parameter value overrides the default value of `World` and is reflected in the response, as the following listing shows: + +``` +{"id":1,"content":"Hello, User!"} +``` + +## What You Need + +- About 15 minutes +- A favorite text editor or IDE +- [JDK 1.8](http://www.oracle.com/technetwork/java/javase/downloads/index.html) or later +- [Gradle 4+](http://www.gradle.org/downloads) or [Maven 3.2+](https://maven.apache.org/download.cgi) +- You can also import the code straight into your IDE: + - [Spring Tool Suite (STS)](https://spring.io/guides/gs/sts) + - [IntelliJ IDEA](https://spring.io/guides/gs/intellij-idea/) + +## How to complete this guide + +Like most Spring [Getting Started guides](https://spring.io/guides), you can start from scratch and complete each step or you can bypass basic setup steps that are already familiar to you. Either way, you end up with working code. + +To **start from scratch**, move on to [Starting with Spring Initializr](https://spring.io/guides/gs/rest-service/#scratch). + +To **skip the basics**, do the following: + +- [Download](https://github.com/spring-guides/gs-rest-service/archive/main.zip) and unzip the source repository for this guide, or clone it using [Git](https://spring.io/understanding/Git): `git clone https://github.com/spring-guides/gs-rest-service.git` +- cd into `gs-rest-service/initial` +- Jump ahead to [Create a Resource Representation Class](https://spring.io/guides/gs/rest-service/#initial). + +**When you finish**, you can check your results against the code in `gs-rest-service/complete`. + +## Starting with Spring Initializr + +You can use this [pre-initialized project](https://start.spring.io/#!type=maven-project&language=java&platformVersion=2.5.5&packaging=jar&jvmVersion=11&groupId=com.example&artifactId=rest-service&name=rest-service&description=Demo project for Spring Boot&packageName=com.example.rest-service&dependencies=web) and click Generate to download a ZIP file. This project is configured to fit the examples in this tutorial. + +To manually initialize the project: + +1. Navigate to [https://start.spring.io](https://start.spring.io/). This service pulls in all the dependencies you need for an application and does most of the setup for you. +2. Choose either Gradle or Maven and the language you want to use. This guide assumes that you chose Java. +3. Click **Dependencies** and select **Spring Web**. +4. Click **Generate**. +5. Download the resulting ZIP file, which is an archive of a web application that is configured with your choices. + +``` +If your IDE has the Spring Initializr integration, you can complete this process from your IDE. +``` + +``` +You can also fork the project from Github and open it in your IDE or other editor. +``` + +## Create a Resource Representation Class + +Now that you have set up the project and build system, you can create your web service. + +Begin the process by thinking about service interactions. + +The service will handle `GET` requests for `/greeting`, optionally with a `name` parameter in the query string. The `GET` request should return a `200 OK` response with JSON in the body that represents a greeting. It should resemble the following output: + +``` +{ + "id": 1, + "content": "Hello, World!" +} +``` + +The `id` field is a unique identifier for the greeting, and `content` is the textual representation of the greeting. + +To model the greeting representation, create a resource representation class. To do so, provide a plain old Java object with fields, constructors, and accessors for the `id` and `content` data, as the following listing (from `src/main/java/com/example/restservice/Greeting.java`) shows: + +``` +package com.example.restservice; + +public class Greeting { + + private final long id; + private final String content; + + public Greeting(long id, String content) { + this.id = id; + this.content = content; + } + + public long getId() { + return id; + } + + public String getContent() { + return content; + } +} +``` + + This application uses the [Jackson JSON](https://github.com/FasterXML/jackson) library to automatically marshal instances of type `Greeting` into JSON. Jackson is included by default by the web starter. + +## Create a Resource Controller + +In Spring’s approach to building RESTful web services, HTTP requests are handled by a controller. These components are identified by the [`@RestController`](https://docs.spring.io/spring/docs/current/javadoc-api/org/springframework/web/bind/annotation/RestController.html) annotation, and the `GreetingController` shown in the following listing (from `src/main/java/com/example/restservice/GreetingController.java`) handles `GET` requests for `/greeting` by returning a new instance of the `Greeting` class: + +``` +package com.example.restservice; + +import java.util.concurrent.atomic.AtomicLong; + +import org.springframework.web.bind.annotation.GetMapping; +import org.springframework.web.bind.annotation.RequestParam; +import org.springframework.web.bind.annotation.RestController; + +@RestController +public class GreetingController { + + private static final String template = "Hello, %s!"; + private final AtomicLong counter = new AtomicLong(); + + @GetMapping("/greeting") + public Greeting greeting(@RequestParam(value = "name", defaultValue = "World") String name) { + return new Greeting(counter.incrementAndGet(), String.format(template, name)); + } +} +``` + +This controller is concise and simple, but there is plenty going on under the hood. We break it down step by step. + +The `@GetMapping` annotation ensures that HTTP GET requests to `/greeting` are mapped to the `greeting()` method. + + There are companion annotations for other HTTP verbs (e.g. `@PostMapping` for POST). There is also a `@RequestMapping` annotation that they all derive from, and can serve as a synonym (e.g. `@RequestMapping(method=GET)`). + +`@RequestParam` binds the value of the query string parameter `name` into the `name` parameter of the `greeting()` method. If the `name` parameter is absent in the request, the `defaultValue` of `World` is used. + +The implementation of the method body creates and returns a new `Greeting` object with `id` and `content` attributes based on the next value from the `counter` and formats the given `name` by using the greeting `template`. + +A key difference between a traditional MVC controller and the RESTful web service controller shown earlier is the way that the HTTP response body is created. Rather than relying on a view technology to perform server-side rendering of the greeting data to HTML, this RESTful web service controller populates and returns a `Greeting` object. The object data will be written directly to the HTTP response as JSON. + +This code uses Spring [`@RestController`](https://docs.spring.io/spring/docs/current/javadoc-api/org/springframework/web/bind/annotation/RestController.html) annotation, which marks the class as a controller where every method returns a domain object instead of a view. It is shorthand for including both `@Controller` and `@ResponseBody`. + +The `Greeting` object must be converted to JSON. Thanks to Spring’s HTTP message converter support, you need not do this conversion manually. Because [Jackson 2](https://github.com/FasterXML/jackson) is on the classpath, Spring’s [`MappingJackson2HttpMessageConverter`](https://docs.spring.io/spring/docs/current/javadoc-api/org/springframework/http/converter/json/MappingJackson2HttpMessageConverter.html) is automatically chosen to convert the `Greeting` instance to JSON. + +`@SpringBootApplication` is a convenience annotation that adds all of the following: + +- `@Configuration`: Tags the class as a source of bean definitions for the application context. +- `@EnableAutoConfiguration`: Tells Spring Boot to start adding beans based on classpath settings, other beans, and various property settings. For example, if `spring-webmvc` is on the classpath, this annotation flags the application as a web application and activates key behaviors, such as setting up a `DispatcherServlet`. +- `@ComponentScan`: Tells Spring to look for other components, configurations, and services in the `com/example` package, letting it find the controllers. + +The `main()` method uses Spring Boot’s `SpringApplication.run()` method to launch an application. Did you notice that there was not a single line of XML? There is no `web.xml` file, either. This web application is 100% pure Java and you did not have to deal with configuring any plumbing or infrastructure. + +### Build an executable JAR + +You can run the application from the command line with Gradle or Maven. You can also build a single executable JAR file that contains all the necessary dependencies, classes, and resources and run that. Building an executable jar makes it easy to ship, version, and deploy the service as an application throughout the development lifecycle, across different environments, and so forth. + +If you use Gradle, you can run the application by using `./gradlew bootRun`. Alternatively, you can build the JAR file by using `./gradlew build` and then run the JAR file, as follows: + +``` +java -jar build/libs/gs-rest-service-0.1.0.jar +``` + +If you use Maven, you can run the application by using `./mvnw spring-boot:run`. Alternatively, you can build the JAR file with `./mvnw clean package` and then run the JAR file, as follows: + +``` +java -jar target/gs-rest-service-0.1.0.jar +``` + +The steps described here create a runnable JAR. You can also [build a classic WAR file](https://spring.io/guides/gs/convert-jar-to-war/). + +Logging output is displayed. The service should be up and running within a few seconds. + +## Test the Service + +Now that the service is up, visit `http://localhost:8080/greeting`, where you should see: + +``` +{"id":1,"content":"Hello, World!"} +``` + +Provide a `name` query string parameter by visiting `http://localhost:8080/greeting?name=User`. Notice how the value of the `content` attribute changes from `Hello, World!` to `Hello, User!`, as the following listing shows: + +``` +{"id":2,"content":"Hello, User!"} +``` + +This change demonstrates that the `@RequestParam` arrangement in `GreetingController` is working as expected. The `name` parameter has been given a default value of `World` but can be explicitly overridden through the query string. + +Notice also how the `id` attribute has changed from `1` to `2`. This proves that you are working against the same `GreetingController` instance across multiple requests and that its `counter` field is being incremented on each call as expected. + +## Summary + +Congratulations! You have just developed a RESTful web service with Spring. + +原文链接: https://spring.io/guides/gs/rest-service/ \ No newline at end of file diff --git a/docs/en/spring/system-requirements.md b/docs/en/spring/system-requirements.md new file mode 100644 index 0000000000000000000000000000000000000000..8ff53087b1672a460b1d21963426d38033386931 --- /dev/null +++ b/docs/en/spring/system-requirements.md @@ -0,0 +1,25 @@ +## System Requirements + +Spring Boot 2.6.4 requires [Java 8](https://www.java.com/) and is compatible up to and including Java 17. [Spring Framework 5.3.16](https://docs.spring.io/spring-framework/docs/5.3.16/reference/html/) or above is also required. + +Explicit build support is provided for the following build tools: + +| Build Tool | Version | +| :--------- | :-------------------- | +| Maven | 3.5+ | +| Gradle | 6.8.x, 6.9.x, and 7.x | + +### Servlet Containers + +Spring Boot supports the following embedded servlet containers: + +| Name | Servlet Version | +| :----------- | :-------------- | +| Tomcat 9.0 | 4.0 | +| Jetty 9.4 | 3.1 | +| Jetty 10.0 | 4.0 | +| Undertow 2.0 | 4.0 | + +You can also deploy Spring Boot applications to any servlet 3.1+ compatible container. + +原文链接: https://docs.spring.io/spring-boot/docs/2.6.4/reference/html/getting-started.html#getting-started.system-requirements \ No newline at end of file diff --git a/docs/en/spring/vscode_java.md b/docs/en/spring/vscode_java.md new file mode 100644 index 0000000000000000000000000000000000000000..6abba425faf5ca017818cab7728743bd1c66181f --- /dev/null +++ b/docs/en/spring/vscode_java.md @@ -0,0 +1,112 @@ +# Getting Started with Java in VS Code + +This tutorial shows you how to write and run Hello World program in Java with Visual Studio Code. It also covers a few advanced features, which you can explore by reading other documents in this section. + +For an overview of the features available for Java in VS Code, see [Java Language Overview](https://code.visualstudio.com/docs/languages/java). + +If you run into any issues when following this tutorial, you can contact us by entering an [issue](https://github.com/microsoft/vscode-java-pack/issues). + +## Setting up VS Code for Java development + +### Coding Pack for Java + +To help you set up quickly, you can install the **Coding Pack for Java**, which includes VS Code, the Java Development Kit (JDK), and essential Java extensions. The Coding Pack can be used as a clean installation, or to update or repair an existing development environment. + +[Install the Coding Pack for Java - Windows](https://aka.ms/vscode-java-installer-win) + +[Install the Coding Pack for Java - macOS](https://aka.ms/vscode-java-installer-mac) + +> **Note**: The Coding Pack for Java is only available for Windows and macOS. For other operating systems, you will need to manually install a JDK, VS Code, and Java extensions. + +### Installing extensions + +If you are an existing VS Code user, you can also add Java support by installing the [Extension Pack for Java](https://marketplace.visualstudio.com/items?itemName=vscjava.vscode-java-pack), which includes these extensions: + +- [Language Support for Java™ by Red Hat](https://marketplace.visualstudio.com/items?itemName=redhat.java) +- [Debugger for Java](https://marketplace.visualstudio.com/items?itemName=vscjava.vscode-java-debug) +- [Test Runner for Java](https://marketplace.visualstudio.com/items?itemName=vscjava.vscode-java-test) +- [Maven for Java](https://marketplace.visualstudio.com/items?itemName=vscjava.vscode-maven) +- [Project Manager for Java](https://marketplace.visualstudio.com/items?itemName=vscjava.vscode-java-dependency) +- [Visual Studio IntelliCode](https://marketplace.visualstudio.com/items?itemName=VisualStudioExptTeam.vscodeintellicode) + +[Install the Extension Pack for Java](vscode:extension/vscjava.vscode-java-pack) + +The [Extension Pack for Java](https://marketplace.visualstudio.com/items?itemName=vscjava.vscode-java-pack) provides a Quick Start guide and tips for code editing and debugging. It also has a FAQ that answers some frequently asked questions. Use the command **Java: Tips for Beginners** from the Command Palette (Ctrl+Shift+P) to launch the guide. + +![getting-started](./vscode_java/getting-started.png) + +You can also install extensions separately. The **Extensions Guide** is provided to help you. You can launch the guide with the **Java: Extensions Guide** command. + +For this tutorial, the only required extensions are: + +- [Language Support for Java™ by Red Hat](https://marketplace.visualstudio.com/items?itemName=redhat.java) +- [Debugger for Java](https://marketplace.visualstudio.com/items?itemName=vscjava.vscode-java-debug) + +## Installing and setting up a Java Development Kit (JDK) + +To use Java within Visual Studio Code, you need to install a Java Development Kit (JDK) on your local environment. JDK is a software development environment used for developing Java applications. + +### Supported Java versions + +The [Extension Pack for Java](https://marketplace.visualstudio.com/items?itemName=vscjava.vscode-java-pack) supports Java version 1.5 or above. + +> **Note**: To configure JDKs for your projects, see [Configure Runtime for Projects](https://code.visualstudio.com/docs/java/java-project#_configure-runtime-for-projects). To enable Java preview features, see [How can I use VS Code with new Java versions](https://code.visualstudio.com/docs/java/java-faq#_how-can-i-use-visual-studio-code-with-new-java-versions). + +### Installing a Java Development Kit (JDK) + +If you have never installed a JDK before and need to install one, we recommend you to choose from one of these sources: + +- [Amazon Corretto](https://aws.amazon.com/corretto) +- [Azul Zulu](https://www.azul.com/downloads/?package=jdk) +- [Eclipse Adoptium's Temurin](https://adoptium.net/) +- [Microsoft Build of OpenJDK](https://www.microsoft.com/openjdk) +- [Oracle Java SE](https://www.oracle.com/java/technologies/javase-downloads.html) +- [Red Hat build of OpenJDK](https://developers.redhat.com/products/openjdk/download) +- [SapMachine](https://sapmachine.io/) + +## Creating a source code file + +Create a folder for your Java program and open the folder with VS Code. Then in VS Code, create a new file and save it with the name `Hello.java`. When you open that file, the Java Language Server automatically starts loading, and you should see a loading icon on the right side of the Status Bar. After it finishes loading, you will see a thumbs-up icon. + + + +> **Note**: If you open a Java file in VS Code without opening its folder, the Java Language Server might not work properly. + +VS Code will also try to figure out the correct package for the new type and fill the new file from a template. See [Create new file](https://code.visualstudio.com/docs/java/java-editing#_create-new-file). + +You can also create a Java project using the **Java: Create Java Project** command. Bring up the **Command Palette** (Ctrl+Shift+P) and then type `java` to search for this command. After selecting the command, you will be prompted for the location and name of the project. You can also choose your build tool from this command. + + + +Visual Studio Code also supports more complex Java projects — see [Project Management](https://code.visualstudio.com/docs/java/java-project). + +## Editing source code + +You can use code snippets to scaffold your classes and methods. VS Code also provides IntelliSense for code completion, and various refactor methods. + + + +To learn more about editing Java, see [Java Editing](https://code.visualstudio.com/docs/java/java-editing). + +## Running and debugging your program + +To run and debug Java code, set a breakpoint, then either press F5 on your keyboard or use the **Run** > **Start Debugging** menu item. You can also use the **Run|Debug** CodeLens option in the editor. After the code compiles, you can see all your variables and threads in the Run view. + + + +The debugger also supports advanced features such as [Hot Code Replace](https://code.visualstudio.com/docs/java/java-debugging#_hot-code-replace) and conditional breakpoints. + +For more information, see [Java Debugging](https://code.visualstudio.com/docs/java/java-debugging). + +## More features + +The editor also has many more capabilities to assist with your Java workload. + +- [Editing Java](https://code.visualstudio.com/docs/java/java-editing) explains how to navigate and edit Java in more details +- [Debugging](https://code.visualstudio.com/docs/java/java-debugging) illustrates all the key features of the Java Debugger +- [Testing](https://code.visualstudio.com/docs/java/java-testing) provides comprehensive support for JUnit and TestNG framework +- [Java Project Management](https://code.visualstudio.com/docs/java/java-project) shows you how to use a project view and work with Maven +- [Spring Boot](https://code.visualstudio.com/docs/java/java-spring-boot) and [Tomcat and Jetty](https://code.visualstudio.com/docs/java/java-tomcat-jetty) demonstrate great framework support +- [Java Web Apps](https://code.visualstudio.com/docs/java/java-webapp) shows how to work with Java Web App in VS Code + +原文链接: https://code.visualstudio.com/docs/java/java-tutorial \ No newline at end of file diff --git a/docs/en/spring/vscode_java/getting-started.png b/docs/en/spring/vscode_java/getting-started.png new file mode 100644 index 0000000000000000000000000000000000000000..032350eaa3b49715db96afcd5c0e38e17b954665 Binary files /dev/null and b/docs/en/spring/vscode_java/getting-started.png differ diff --git a/docs/en/spring/why-spring.md b/docs/en/spring/why-spring.md new file mode 100644 index 0000000000000000000000000000000000000000..7786d3fade76bc408bce14484aafa5cb30088ed4 --- /dev/null +++ b/docs/en/spring/why-spring.md @@ -0,0 +1,59 @@ +# Why Spring? + +Spring makes programming Java quicker, easier, and safer for everybody. Spring’s focus on speed, simplicity, and productivity has made it the [world's most popular](https://snyk.io/blog/jvm-ecosystem-report-2018-platform-application/) Java framework. + +## Spring is everywhere + +Spring’s flexible libraries are trusted by developers all over the world. Spring delivers delightful experiences to millions of end-users every day—whether that’s [streaming TV](https://medium.com/netflix-techblog/netflix-oss-and-spring-boot-coming-full-circle-4855947713a0), [online shopping](https://tech.target.com/2018/12/18/spring-feign.html), or countless other innovative solutions. Spring also has contributions from all the big names in tech, including Alibaba, Amazon, Google, Microsoft, and more. + +## Spring is flexible + +Spring’s flexible and comprehensive set of extensions and third-party libraries let developers build almost any application imaginable. At its core, Spring Framework’s [Inversion of Control (IoC)](https://en.wikipedia.org/wiki/Inversion_of_control) and [Dependency Injection (DI)](https://en.wikipedia.org/wiki/Dependency_injection) features provide the foundation for a wide-ranging set of features and functionality. Whether you’re building secure, reactive, cloud-based microservices for the web, or complex streaming data flows for the enterprise, Spring has the tools to help. + +## Spring is productive + +[Spring Boot](https://spring.io/guides/gs/spring-boot/) transforms how you approach Java programming tasks, radically streamlining your experience. Spring Boot combines necessities such as an application context and an auto-configured, embedded web server to make [microservice](https://spring.io/microservices) development a cinch. To go even faster, you can combine Spring Boot with Spring Cloud’s rich set of supporting libraries, servers, patterns, and templates, to safely deploy entire microservices-based architectures into the [cloud](https://spring.io/cloud), in record time. + +## Spring is fast + +Our engineers care deeply about performance. With Spring, you’ll notice fast startup, fast shutdown, and optimized execution, by default. Increasingly, Spring projects also support the [reactive](https://spring.io/reactive) (nonblocking) programming model for even greater efficiency. Developer productivity is Spring’s superpower. Spring Boot helps developers build applications with ease and with far less toil than other competing paradigms. Embedded web servers, auto-configuration, and “fat jars” help you get started quickly, and innovations like [LiveReload in Spring DevTools](https://docs.spring.io/spring-boot/docs/current/reference/html/using-spring-boot.html#using-boot-devtools-livereload) mean developers can iterate faster than ever before. You can even start a new Spring project in seconds, with the Spring Initializr at [start.spring.io](https://start.spring.io/). + +## Spring is secure + +Spring has a proven track record of dealing with security issues quickly and responsibly. The Spring committers work with security professionals to patch and test any reported vulnerabilities. Third-party dependencies are also monitored closely, and regular updates are issued to help keep your data and applications as safe as possible. In addition, [Spring Security](https://spring.io/projects/spring-security) makes it easier for you to integrate with industry-standard security schemes and deliver trustworthy solutions that are secure by default. + +## Spring is supportive + +The [Spring community](https://spring.io/community) is enormous, global, diverse, and spans folks of all ages and capabilities, from complete beginners to seasoned pros. No matter where you are on your journey, you can find the support and resources you need to get you to the next level: [quickstarts](https://spring.io/quickstart), [guides & tutorials](https://spring.io/guides), [videos](https://www.youtube.com/channel/UC7yfnfvEUlXUIfm8rGLwZdA), [meetups](https://spring.io/events), [support](https://spring.io/support), or even formal [training and certification](https://spring.io/training). + +# What can Spring do? + +## Microservices + +Quickly deliver production‑grade features with independently evolvable microservices. + +## Reactive + +Spring's asynchronous, nonblocking architecture means you can get more from your computing resources. + +## Cloud + +Your code, any cloud—we’ve got you covered. Connect and scale your services, whatever your platform. + +## Web apps + +Frameworks for fast, secure, and responsive web applications connected to any data store. + +## Serverless + +The ultimate flexibility. Scale up on demand and scale to zero when there’s no demand. + +## Event Driven + +Integrate with your enterprise. React to business events. Act on your streaming data in realtime. + +## Batch + +Automated tasks. Offline processing of data at a time to suit you. + +原文链接: https://spring.io/why-spring \ No newline at end of file diff --git a/docs/spring-amqp/spring-amqp.md b/docs/spring-amqp/spring-amqp.md index ce8eca66fc30c246c4b033aadbf173741cfbfb9b..61bd42ea6d5e1ea0d08f5d577d0bb1457e090ea0 100644 --- a/docs/spring-amqp/spring-amqp.md +++ b/docs/spring-amqp/spring-amqp.md @@ -799,7 +799,7 @@ public interface ChannelListener { 还包括`cacheMode`属性(`CHANNEL`或`CONNECTION`)。 -![cacheStats](images/cacheStats.png) +![cacheStats](https://docs.spring.io/spring-amqp/docs/current/reference/html/images/cacheStats.png) 图 1.JVisualVM 示例 @@ -4382,7 +4382,7 @@ public RabbitTransactionManager rabbitTransactionManager() { | | | | | | | | | -| (group) |这仅在使用名称空间时可用。
    当指定时,类型`Collection`的 Bean 被注册为该名称,并且每个
    元素的
    容器被添加到集合中。
    例如,这允许,通过迭代集合来启动和停止容器组。
    如果多个``元素具有相同的组值,集合形式中的容器
    是如此指定的所有容器的集合。|![tickmark](images/tickmark.png)|![tickmark](images/tickmark.png)| | +| (group) |这仅在使用名称空间时可用。
    当指定时,类型`Collection`的 Bean 被注册为该名称,并且每个
    元素的
    容器被添加到集合中。
    例如,这允许,通过迭代集合来启动和停止容器组。
    如果多个``元素具有相同的组值,集合形式中的容器
    是如此指定的所有容器的集合。|![tickmark](https://docs.spring.io/spring-amqp/docs/current/reference/html/images/tickmark.png)|![tickmark](https://docs.spring.io/spring-amqp/docs/current/reference/html/images/tickmark.png)| | | | | | | | | | | | diff --git a/docs/spring-hateoas/spring-hateoas.md b/docs/spring-hateoas/spring-hateoas.md index f8ff1091fe65701d88060b9c79b321374e3502a7..7f6ef1162606ed141ef22139595dce33c414c5a8 100644 --- a/docs/spring-hateoas/spring-hateoas.md +++ b/docs/spring-hateoas/spring-hateoas.md @@ -159,7 +159,7 @@ Spring 为了方便地创建富含超媒体的表示,Hateoas 提供了一组 例 7.`RepresentationModel`类层次结构 -diagram classes +diagram classes 使用`RepresentationModel`的默认方法是创建它的一个子类,以包含表示应该包含的所有属性,创建该类的实例,填充属性并用链接丰富它。 diff --git a/docs/spring-integration/amqp.md b/docs/spring-integration/amqp.md index 6c1a4000bdc40e41dc62638dfe44115b90ce921c..ae0d23a17d1d8480443d1b7f861abf4c92355498 100644 --- a/docs/spring-integration/amqp.md +++ b/docs/spring-integration/amqp.md @@ -152,7 +152,7 @@ XML |**19**|指定底层`AbstractMessageListenerContainer`的恢复尝试之间的间隔(以毫秒为单位)。
    可选(默认为`5000`)。| |**20**|如果“true”且代理上没有队列可用,则容器在启动时抛出一个致命的异常,如果在容器运行时删除了队列(在进行了三次被动声明队列的尝试之后),则停止。,容器不抛出异常并进入恢复模式,尝试根据`recovery-interval`重新启动。
    可选(默认为`true`)。| |**21**|在底层`AbstractMessageListenerContainer`停止之后和强制关闭 AMQP 连接之前等待工作人员的时间(以毫秒为单位)。
    如果有工作人员在关闭信号出现时处于活动状态,只要他们能够在此超时范围内完成处理,他们就被允许完成处理。
    否则,连接已关闭,消息仍未确认(如果通道是事务性的)。
    可选(默认为`5000`)。| -|**22**|默认情况下,底层`AbstractMessageListenerContainer`使用`SimpleAsyncTaskExecutor`实现,该实现为每个任务启动一个新线程,并异步运行它,默认情况下,注意,此实现不重用线程。
    考虑使用线程池`TaskExecutor`实现作为替代。
    可选(默认为`SimpleAsyncTaskExecutor`)。| +|**22**|默认情况下,底层`AbstractMessageListenerContainer`使用`SimpleAsyncTaskExecutor`实现,该实现为每个任务启动一个新线程,并异步运行它,默认情况下,注意,此实现不重用线程。
    考虑使用线程池`TaskExecutor`实现作为替代。
    可选(默认为`SimpleAsyncTaskExecutor`)。| |**23**|默认情况下,底层`AbstractMessageListenerContainer`创建了`DefaultTransactionAttribute`的新实例(它采用 EJB 方法回滚运行时,但不检查异常)。
    可选(默认为`DefaultTransactionAttribute`)。| |**24**|设置对底层`AbstractMessageListenerContainer`上的外部`PlatformTransactionManager`的引用。
    事务管理器与`channel-transacted`属性一起工作。
    如果在框架发送或接收消息时已经有事务在进行中并且`channelTransacted`标志是`true`,消息传递事务的提交或回滚将被推迟到当前事务结束时。
    如果`channelTransacted`标志是`false`,则消息传递操作不应用事务语义(它是自动标记的)。
    以获取更多信息,见[Transactions with Spring AMQP](https://docs.spring.io/spring-amqp/reference/html/%255Freference.html#%5Ftransactions)。
    可选。| |**25**|告诉`SimpleMessageListenerContainer`在单个事务中要处理多少消息(如果通道是事务性的)。
    对于最佳结果,它应该小于或等于`prefetch-count`中设置的值。
    当设置了“customers-per-queue”时不允许。
    可选(默认为`1`)。| @@ -984,6 +984,6 @@ public IntegrationFlow flow(RabbitTemplate template) { 下面的图像说明了 Spring 在此示例中使用的集成组件的基本集合。 -![spring integration amqp sample graph](images/spring-integration-amqp-sample-graph.png) +![spring integration amqp sample graph](https://docs.spring.io/spring-integration/docs/current/reference/html/images/spring-integration-amqp-sample-graph.png) -图 1。AMQP 样本的 Spring 积分图 \ No newline at end of file +图1.AMQP 样本的 Spring 积分图 \ No newline at end of file diff --git a/docs/spring-integration/http.md b/docs/spring-integration/http.md index b1aec4d7096cd2d4c530d688dc0be1a379b2ece5..ce93d7eefdea2fe36ef386194b85f05ace28259f 100644 --- a/docs/spring-integration/http.md +++ b/docs/spring-integration/http.md @@ -131,7 +131,7 @@ Bean 这个定义通过将 HTTP 请求委托给`RestTemplate`来运行。然后 如果`transfer-cookies`是`false`,则接收到的任何`Set-Cookie`头在回复消息中保持为`Set-Cookie`,并在随后的发送中被删除。 -| |空响应主体

    http 是一个请求-响应协议。
    但是,响应可能没有主体,只有标题。
    在这种情况下,`Message`产生一个有效负载为`org.springframework.http.ResponseEntity`的回复`expected-response-type`,而不管是否提供了
    根据也有对相同 URL 的调用可能返回或不返回响应体的情况,
    例如,对 HTTP 资源的第一个请求返回内容,但是第二个不是(返回`304 Not Modified`)。但是,在所有情况下,
    ,`http_statusCode`消息头被填充。
    这可以在 HTTP 出站网关之后的某些路由逻辑中使用。
    你还可以使用`\`将带有`ResponseEntity`的消息路由到不同的流,而不是用于带有主体的响应。| +| |空响应主体

    http 是一个请求-响应协议。
    但是,响应可能没有主体,只有标题。
    在这种情况下,`Message`产生一个有效负载为`org.springframework.http.ResponseEntity`的回复`expected-response-type`,而不管是否提供了
    根据也有对相同 URL 的调用可能返回或不返回响应体的情况,
    例如,对 HTTP 资源的第一个请求返回内容,但是第二个不是(返回`304 Not Modified`)。但是,在所有情况下,
    ,`http_statusCode`消息头被填充。
    这可以在 HTTP 出站网关之后的某些路由逻辑中使用。
    你还可以使用`\`将带有`ResponseEntity`的消息路由到不同的流,而不是用于带有主体的响应。| |---|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| | |expected-response-type

    除了前面提到的关于空响应主体的说明外,如果响应确实包含主体,则必须提供一个适当的`expected-response-type`属性,或者,同样,你收到的是一个没有正文的`ResponseEntity`。
    `expected-response-type`必须与(配置或默认的)`HttpMessageConverter`实例和响应中的`Content-Type`头兼容。
    这可以是一个抽象类,甚至是一个接口(当你使用 Java 序列化时,例如`java.io.Serializable`)和`Content-Type: application/x-java-serialized-object`)。| @@ -504,7 +504,7 @@ List nameValuePairs = 下面的示例展示了如何使用 Java 配置入站网关: -例 1。使用 Java 配置的入站网关 +例1.使用 Java 配置的入站网关 ``` @Bean @@ -528,7 +528,7 @@ public RequestMapping mapping() { 下面的示例展示了如何使用 Java DSL 配置入站网关: -例 2。使用 Java DSL 的入站网关 +例2.使用 Java DSL 的入站网关 ``` @Bean @@ -543,7 +543,7 @@ public IntegrationFlow inbound() { 下面的示例展示了如何使用 Java 配置出站网关: -例 3。使用 Java 配置的出站网关 +例3.使用 Java 配置的出站网关 ``` @ServiceActivator(inputChannel = "httpOutRequest") @@ -559,7 +559,7 @@ public HttpRequestExecutingMessageHandler outbound() { 下面的示例展示了如何使用 Java DSL 配置出站网关: -例 4。使用 Java DSL 的出站网关 +例4.使用 Java DSL 的出站网关 ``` @Bean @@ -582,15 +582,15 @@ public IntegrationFlow outbound() { 组件与消息通道交互,可以为其指定超时。例如,HTTP 入站网关将从连接的 HTTP 客户机接收的消息转发到消息通道(使用请求超时),因此 HTTP 入站网关从用于生成 HTTP 响应的应答通道(使用应答超时)接收应答消息。下面的插图提供了一个直观的解释: -![HTTP 入站网关](images/http-inbound-gateway.png) +![HTTP 入站网关](https://docs.spring.io/spring-integration/docs/current/reference/html/images/http-inbound-gateway.png) -图 1。超时设置如何应用到 HTTP 入站网关 +图1.超时设置如何应用到 HTTP 入站网关 对于出站端点,我们需要考虑与远程服务器交互时的计时工作方式。下图显示了这种情况: -![HTTP 出站网关](images/http-outbound-gateway.png) +![HTTP 出站网关](https://docs.spring.io/spring-integration/docs/current/reference/html/images/http-outbound-gateway.png) -图 2。超时设置如何应用到 HTTP 出站网关 +图2.超时设置如何应用到 HTTP 出站网关 在使用 HTTP 出站网关或 HTTP 出站通道适配器发出活动 HTTP 请求时,你可能希望配置与 HTTP 相关的超时行为。在这些实例中,这两个组件使用 Spring 的[`RestTemplate`](https://DOCS. Spring.io/ Spring/DOCS/current/javadoc-api/org/springframework/web/client/resttemplate.html)支持来执行 HTTP 请求。 @@ -602,7 +602,7 @@ public IntegrationFlow outbound() { 如果你没有显式地配置`request-factory`或`rest-template`属性,那么将实例化一个默认的`RestTemplate`(它使用`SimpleClientHttpRequestFactory`)。 -| |对于某些 JVM 实现,`URLConnection`类对超时的处理可能不一致。

    例如,来自 Java 平台,关于`setConnectTimeout`的标准版 6API 规范:

    >该方法的一些非标准实现可能会忽略指定的超时。
    要查看连接超时集,请调用 getConnectTimeout()。

    如果你有特定的需求,则应该测试你的超时。考虑使用,这反过来,使用[Apache HttpComponents HttpClient](https://hc.apache.org/httpcomponents-client-ga/),而不是依赖 JVM 提供的实现。| +| |对于某些 JVM 实现,`URLConnection`类对超时的处理可能不一致。

    例如,来自 Java 平台,关于`setConnectTimeout`的标准版 6API 规范:

    >该方法的一些非标准实现可能会忽略指定的超时。
    要查看连接超时集,请调用 getConnectTimeout()。

    如果你有特定的需求,则应该测试你的超时。考虑使用,这反过来,使用[Apache HttpComponents HttpClient](https://hc.apache.org/httpcomponents-client-ga/),而不是依赖 JVM 提供的实现。| |---|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| | |当你使用带有池连接管理器的 Apache HttpComponents HttpClient 时,你应该意识到,默认情况下,连接管理器为每个给定的路由创建的并发连接不超过两个,并且总共创建的连接不超过 20 个,对于许多现实世界中的应用程序,
    ,这些限制可能被证明是过于约束。
    有关配置这个重要组件的信息,请参见[Apache 文档](https://hc.apache.org/httpcomponents-client-ga/)。| diff --git a/docs/spring-integration/ip.md b/docs/spring-integration/ip.md index 14630d23e60eccacfa17e61eafb24b9fb8b679c8..8354225ce3605e7238e110cf8a3e9fe14ced7701 100644 --- a/docs/spring-integration/ip.md +++ b/docs/spring-integration/ip.md @@ -1367,7 +1367,7 @@ public static class Config { 下面是使用 DSL 配置使用 DSL 的流的一些示例。 -例 1。服务器适配器流 +例1.服务器适配器流 ``` @Bean @@ -1383,7 +1383,7 @@ public IntegrationFlow server() { } ``` -例 2。客户端适配器流 +例2.客户端适配器流 ``` @Bean @@ -1393,7 +1393,7 @@ public IntegrationFlow client() { } ``` -例 3。服务器网关流 +例3.服务器网关流 ``` @Bean @@ -1410,7 +1410,7 @@ public IntegrationFlow server() { } ``` -例 4。客户端网关流程 +例4.客户端网关流程 ``` @Bean diff --git a/docs/spring-integration/message-routing.md b/docs/spring-integration/message-routing.md index fdd49e73de44cb8706fa8078c50bade87c623bc7..ed8ec56e25b9c440fa4ad8a4fd42d68017e2ca78 100644 --- a/docs/spring-integration/message-routing.md +++ b/docs/spring-integration/message-routing.md @@ -44,43 +44,43 @@ Spring 集成提供了以下路由器: | Attribute |路由器| header value router | xpath router | payload type router | recipient list route | exception type router | |----------------------|--------------------------------|--------------------------------|--------------------------------|--------------------------------|--------------------------------|--------------------------------| -| apply-sequence |![tickmark](images/tickmark.png)|![tickmark](images/tickmark.png)|![tickmark](images/tickmark.png)|![tickmark](images/tickmark.png)|![tickmark](images/tickmark.png)|![tickmark](images/tickmark.png)| -|default-output-channel|![tickmark](images/tickmark.png)|![tickmark](images/tickmark.png)|![tickmark](images/tickmark.png)|![tickmark](images/tickmark.png)|![tickmark](images/tickmark.png)|![tickmark](images/tickmark.png)| -| resolution-required |![tickmark](images/tickmark.png)|![tickmark](images/tickmark.png)|![tickmark](images/tickmark.png)|![tickmark](images/tickmark.png)|![tickmark](images/tickmark.png)|![tickmark](images/tickmark.png)| -| ignore-send-failures |![tickmark](images/tickmark.png)|![tickmark](images/tickmark.png)|![tickmark](images/tickmark.png)|![tickmark](images/tickmark.png)|![tickmark](images/tickmark.png)|![tickmark](images/tickmark.png)| -| timeout |![tickmark](images/tickmark.png)|![tickmark](images/tickmark.png)|![tickmark](images/tickmark.png)|![tickmark](images/tickmark.png)|![tickmark](images/tickmark.png)|![tickmark](images/tickmark.png)| -| id |![tickmark](images/tickmark.png)|![tickmark](images/tickmark.png)|![tickmark](images/tickmark.png)|![tickmark](images/tickmark.png)|![tickmark](images/tickmark.png)|![tickmark](images/tickmark.png)| -| auto-startup |![tickmark](images/tickmark.png)|![tickmark](images/tickmark.png)|![tickmark](images/tickmark.png)|![tickmark](images/tickmark.png)|![tickmark](images/tickmark.png)|![tickmark](images/tickmark.png)| -| input-channel |![tickmark](images/tickmark.png)|![tickmark](images/tickmark.png)|![tickmark](images/tickmark.png)|![tickmark](images/tickmark.png)|![tickmark](images/tickmark.png)|![tickmark](images/tickmark.png)| -| order |![tickmark](images/tickmark.png)|![tickmark](images/tickmark.png)|![tickmark](images/tickmark.png)|![tickmark](images/tickmark.png)|![tickmark](images/tickmark.png)|![tickmark](images/tickmark.png)| -| method |![tickmark](images/tickmark.png)| | | | | | -| ref |![tickmark](images/tickmark.png)| | | | | | -| expression |![tickmark](images/tickmark.png)| | | | | | -| header-name | |![tickmark](images/tickmark.png)| | | | | -| evaluate-as-string | | |![tickmark](images/tickmark.png)| | | | -| xpath-expression-ref | | |![tickmark](images/tickmark.png)| | | | -| converter | | |![tickmark](images/tickmark.png)| | | | +| apply-sequence |![tickmark](https://docs.spring.io/spring-integration/docs/current/reference/html/images/tickmark.png)|![tickmark](https://docs.spring.io/spring-integration/docs/current/reference/html/images/tickmark.png)|![tickmark](https://docs.spring.io/spring-integration/docs/current/reference/html/images/tickmark.png)|![tickmark](https://docs.spring.io/spring-integration/docs/current/reference/html/images/tickmark.png)|![tickmark](https://docs.spring.io/spring-integration/docs/current/reference/html/images/tickmark.png)|![tickmark](https://docs.spring.io/spring-integration/docs/current/reference/html/images/tickmark.png)| +|default-output-channel|![tickmark](https://docs.spring.io/spring-integration/docs/current/reference/html/images/tickmark.png)|![tickmark](https://docs.spring.io/spring-integration/docs/current/reference/html/images/tickmark.png)|![tickmark](https://docs.spring.io/spring-integration/docs/current/reference/html/images/tickmark.png)|![tickmark](https://docs.spring.io/spring-integration/docs/current/reference/html/images/tickmark.png)|![tickmark](https://docs.spring.io/spring-integration/docs/current/reference/html/images/tickmark.png)|![tickmark](https://docs.spring.io/spring-integration/docs/current/reference/html/images/tickmark.png)| +| resolution-required |![tickmark](https://docs.spring.io/spring-integration/docs/current/reference/html/images/tickmark.png)|![tickmark](https://docs.spring.io/spring-integration/docs/current/reference/html/images/tickmark.png)|![tickmark](https://docs.spring.io/spring-integration/docs/current/reference/html/images/tickmark.png)|![tickmark](https://docs.spring.io/spring-integration/docs/current/reference/html/images/tickmark.png)|![tickmark](https://docs.spring.io/spring-integration/docs/current/reference/html/images/tickmark.png)|![tickmark](https://docs.spring.io/spring-integration/docs/current/reference/html/images/tickmark.png)| +| ignore-send-failures |![tickmark](https://docs.spring.io/spring-integration/docs/current/reference/html/images/tickmark.png)|![tickmark](https://docs.spring.io/spring-integration/docs/current/reference/html/images/tickmark.png)|![tickmark](https://docs.spring.io/spring-integration/docs/current/reference/html/images/tickmark.png)|![tickmark](https://docs.spring.io/spring-integration/docs/current/reference/html/images/tickmark.png)|![tickmark](https://docs.spring.io/spring-integration/docs/current/reference/html/images/tickmark.png)|![tickmark](https://docs.spring.io/spring-integration/docs/current/reference/html/images/tickmark.png)| +| timeout |![tickmark](https://docs.spring.io/spring-integration/docs/current/reference/html/images/tickmark.png)|![tickmark](https://docs.spring.io/spring-integration/docs/current/reference/html/images/tickmark.png)|![tickmark](https://docs.spring.io/spring-integration/docs/current/reference/html/images/tickmark.png)|![tickmark](https://docs.spring.io/spring-integration/docs/current/reference/html/images/tickmark.png)|![tickmark](https://docs.spring.io/spring-integration/docs/current/reference/html/images/tickmark.png)|![tickmark](https://docs.spring.io/spring-integration/docs/current/reference/html/images/tickmark.png)| +| id |![tickmark](https://docs.spring.io/spring-integration/docs/current/reference/html/images/tickmark.png)|![tickmark](https://docs.spring.io/spring-integration/docs/current/reference/html/images/tickmark.png)|![tickmark](https://docs.spring.io/spring-integration/docs/current/reference/html/images/tickmark.png)|![tickmark](https://docs.spring.io/spring-integration/docs/current/reference/html/images/tickmark.png)|![tickmark](https://docs.spring.io/spring-integration/docs/current/reference/html/images/tickmark.png)|![tickmark](https://docs.spring.io/spring-integration/docs/current/reference/html/images/tickmark.png)| +| auto-startup |![tickmark](https://docs.spring.io/spring-integration/docs/current/reference/html/images/tickmark.png)|![tickmark](https://docs.spring.io/spring-integration/docs/current/reference/html/images/tickmark.png)|![tickmark](https://docs.spring.io/spring-integration/docs/current/reference/html/images/tickmark.png)|![tickmark](https://docs.spring.io/spring-integration/docs/current/reference/html/images/tickmark.png)|![tickmark](https://docs.spring.io/spring-integration/docs/current/reference/html/images/tickmark.png)|![tickmark](https://docs.spring.io/spring-integration/docs/current/reference/html/images/tickmark.png)| +| input-channel |![tickmark](https://docs.spring.io/spring-integration/docs/current/reference/html/images/tickmark.png)|![tickmark](https://docs.spring.io/spring-integration/docs/current/reference/html/images/tickmark.png)|![tickmark](https://docs.spring.io/spring-integration/docs/current/reference/html/images/tickmark.png)|![tickmark](https://docs.spring.io/spring-integration/docs/current/reference/html/images/tickmark.png)|![tickmark](https://docs.spring.io/spring-integration/docs/current/reference/html/images/tickmark.png)|![tickmark](https://docs.spring.io/spring-integration/docs/current/reference/html/images/tickmark.png)| +| order |![tickmark](https://docs.spring.io/spring-integration/docs/current/reference/html/images/tickmark.png)|![tickmark](https://docs.spring.io/spring-integration/docs/current/reference/html/images/tickmark.png)|![tickmark](https://docs.spring.io/spring-integration/docs/current/reference/html/images/tickmark.png)|![tickmark](https://docs.spring.io/spring-integration/docs/current/reference/html/images/tickmark.png)|![tickmark](https://docs.spring.io/spring-integration/docs/current/reference/html/images/tickmark.png)|![tickmark](https://docs.spring.io/spring-integration/docs/current/reference/html/images/tickmark.png)| +| method |![tickmark](https://docs.spring.io/spring-integration/docs/current/reference/html/images/tickmark.png)| | | | | | +| ref |![tickmark](https://docs.spring.io/spring-integration/docs/current/reference/html/images/tickmark.png)| | | | | | +| expression |![tickmark](https://docs.spring.io/spring-integration/docs/current/reference/html/images/tickmark.png)| | | | | | +| header-name | |![tickmark](https://docs.spring.io/spring-integration/docs/current/reference/html/images/tickmark.png)| | | | | +| evaluate-as-string | | |![tickmark](https://docs.spring.io/spring-integration/docs/current/reference/html/images/tickmark.png)| | | | +| xpath-expression-ref | | |![tickmark](https://docs.spring.io/spring-integration/docs/current/reference/html/images/tickmark.png)| | | | +| converter | | |![tickmark](https://docs.spring.io/spring-integration/docs/current/reference/html/images/tickmark.png)| | | | 下表显示了链内路由器可用的配置参数: | Attribute |路由器| header value router | xpath router | payload type router | recipient list router | exception type router | |----------------------|--------------------------------|--------------------------------|--------------------------------|--------------------------------|--------------------------------|--------------------------------| -| apply-sequence |![tickmark](images/tickmark.png)|![tickmark](images/tickmark.png)|![tickmark](images/tickmark.png)|![tickmark](images/tickmark.png)|![tickmark](images/tickmark.png)|![tickmark](images/tickmark.png)| -|default-output-channel|![tickmark](images/tickmark.png)|![tickmark](images/tickmark.png)|![tickmark](images/tickmark.png)|![tickmark](images/tickmark.png)|![tickmark](images/tickmark.png)|![tickmark](images/tickmark.png)| -| resolution-required |![tickmark](images/tickmark.png)|![tickmark](images/tickmark.png)|![tickmark](images/tickmark.png)|![tickmark](images/tickmark.png)|![tickmark](images/tickmark.png)|![tickmark](images/tickmark.png)| -| ignore-send-failures |![tickmark](images/tickmark.png)|![tickmark](images/tickmark.png)|![tickmark](images/tickmark.png)|![tickmark](images/tickmark.png)|![tickmark](images/tickmark.png)|![tickmark](images/tickmark.png)| -| timeout |![tickmark](images/tickmark.png)|![tickmark](images/tickmark.png)|![tickmark](images/tickmark.png)|![tickmark](images/tickmark.png)|![tickmark](images/tickmark.png)|![tickmark](images/tickmark.png)| +| apply-sequence |![tickmark](https://docs.spring.io/spring-integration/docs/current/reference/html/images/tickmark.png)|![tickmark](https://docs.spring.io/spring-integration/docs/current/reference/html/images/tickmark.png)|![tickmark](https://docs.spring.io/spring-integration/docs/current/reference/html/images/tickmark.png)|![tickmark](https://docs.spring.io/spring-integration/docs/current/reference/html/images/tickmark.png)|![tickmark](https://docs.spring.io/spring-integration/docs/current/reference/html/images/tickmark.png)|![tickmark](https://docs.spring.io/spring-integration/docs/current/reference/html/images/tickmark.png)| +|default-output-channel|![tickmark](https://docs.spring.io/spring-integration/docs/current/reference/html/images/tickmark.png)|![tickmark](https://docs.spring.io/spring-integration/docs/current/reference/html/images/tickmark.png)|![tickmark](https://docs.spring.io/spring-integration/docs/current/reference/html/images/tickmark.png)|![tickmark](https://docs.spring.io/spring-integration/docs/current/reference/html/images/tickmark.png)|![tickmark](https://docs.spring.io/spring-integration/docs/current/reference/html/images/tickmark.png)|![tickmark](https://docs.spring.io/spring-integration/docs/current/reference/html/images/tickmark.png)| +| resolution-required |![tickmark](https://docs.spring.io/spring-integration/docs/current/reference/html/images/tickmark.png)|![tickmark](https://docs.spring.io/spring-integration/docs/current/reference/html/images/tickmark.png)|![tickmark](https://docs.spring.io/spring-integration/docs/current/reference/html/images/tickmark.png)|![tickmark](https://docs.spring.io/spring-integration/docs/current/reference/html/images/tickmark.png)|![tickmark](https://docs.spring.io/spring-integration/docs/current/reference/html/images/tickmark.png)|![tickmark](https://docs.spring.io/spring-integration/docs/current/reference/html/images/tickmark.png)| +| ignore-send-failures |![tickmark](https://docs.spring.io/spring-integration/docs/current/reference/html/images/tickmark.png)|![tickmark](https://docs.spring.io/spring-integration/docs/current/reference/html/images/tickmark.png)|![tickmark](https://docs.spring.io/spring-integration/docs/current/reference/html/images/tickmark.png)|![tickmark](https://docs.spring.io/spring-integration/docs/current/reference/html/images/tickmark.png)|![tickmark](https://docs.spring.io/spring-integration/docs/current/reference/html/images/tickmark.png)|![tickmark](https://docs.spring.io/spring-integration/docs/current/reference/html/images/tickmark.png)| +| timeout |![tickmark](https://docs.spring.io/spring-integration/docs/current/reference/html/images/tickmark.png)|![tickmark](https://docs.spring.io/spring-integration/docs/current/reference/html/images/tickmark.png)|![tickmark](https://docs.spring.io/spring-integration/docs/current/reference/html/images/tickmark.png)|![tickmark](https://docs.spring.io/spring-integration/docs/current/reference/html/images/tickmark.png)|![tickmark](https://docs.spring.io/spring-integration/docs/current/reference/html/images/tickmark.png)|![tickmark](https://docs.spring.io/spring-integration/docs/current/reference/html/images/tickmark.png)| | id | | | | | | | | auto-startup | | | | | | | | input-channel | | | | | | | | order | | | | | | | -| method |![tickmark](images/tickmark.png)| | | | | | -| ref |![tickmark](images/tickmark.png)| | | | | | -| expression |![tickmark](images/tickmark.png)| | | | | | -| header-name | |![tickmark](images/tickmark.png)| | | | | -| evaluate-as-string | | |![tickmark](images/tickmark.png)| | | | -| xpath-expression-ref | | |![tickmark](images/tickmark.png)| | | | -| converter | | |![tickmark](images/tickmark.png)| | | | +| method |![tickmark](https://docs.spring.io/spring-integration/docs/current/reference/html/images/tickmark.png)| | | | | | +| ref |![tickmark](https://docs.spring.io/spring-integration/docs/current/reference/html/images/tickmark.png)| | | | | | +| expression |![tickmark](https://docs.spring.io/spring-integration/docs/current/reference/html/images/tickmark.png)| | | | | | +| header-name | |![tickmark](https://docs.spring.io/spring-integration/docs/current/reference/html/images/tickmark.png)| | | | | +| evaluate-as-string | | |![tickmark](https://docs.spring.io/spring-integration/docs/current/reference/html/images/tickmark.png)| | | | +| xpath-expression-ref | | |![tickmark](https://docs.spring.io/spring-integration/docs/current/reference/html/images/tickmark.png)| | | | +| converter | | |![tickmark](https://docs.spring.io/spring-integration/docs/current/reference/html/images/tickmark.png)| | | | | |在 Spring Integration2.1 中,路由器参数已经在所有路由器实现中得到了更多的标准化。因此,一些小的更改可能会破坏较旧的 Spring 基于 Integration 的应用程序。,自 Spring Integration2.1 以来,将`ignore-channel-name-resolution-failures`属性删除,以利于将其行为与`resolution-required`属性合并,
    此外,`resolution-required`属性现在默认为`true`,
    在进行这些更改之前,`resolution-required`属性默认为`false`,当没有解析通道且没有设置`default-output-channel`时,导致消息被静默删除。
    新行为需要至少一个解析通道,并且默认情况下,如果没有确定通道(或者发送尝试未成功),则抛出`MessageDeliveryException`。
    如果你确实希望静默地删除消息,则可以设置`default-output-channel="nullChannel"`。| |---|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| @@ -697,7 +697,7 @@ Spring 集成为常见的基于内容的路由用例提供了相当多的不同 任何属于`AbstractMappingMessageRouter`(包括大多数框架定义的路由器)的子类的路由器都是动态路由器,因为`channelMapping`是在`AbstractMappingMessageRouter`级别上定义的。该映射的 setter 方法与“setChannelMapping”和“removeChannelMapping”方法一起作为公共方法公开。只要有对路由器本身的引用,就可以在运行时更改、添加和删除路由器映射。这也意味着你可以通过 JMX(参见[JMX 支持](./jmx.html#jmx))或 Spring 集成控制总线(参见[控制总线](./control-bus.html#control-bus))功能公开这些相同的配置选项。 -| |返回到通道键,因为通道名是灵活和方便的,
    但是,如果你不信任消息创建者,恶意参与者(了解系统)可能会创建一条消息,并将其路由到一个意外的通道。,例如,因此,你可能希望禁用此功能(将`channelKeyFallback`属性设置为`false`),并在需要时更改映射。| +| |返回到通道键,因为通道名是灵活和方便的,
    但是,如果你不信任消息创建者,恶意参与者(了解系统)可能会创建一条消息,并将其路由到一个意外的通道。,例如,因此,你可能希望禁用此功能(将`channelKeyFallback`属性设置为`false`),并在需要时更改映射。| |---|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| ##### 使用控制总线管理路由器映射 @@ -1998,7 +1998,7 @@ public MessageHandler scatterGatherDistribution() { |**6** |用于接收来自每个供应商的聚合回复的通道。
    它在分散消息中用作`replyChannel`头。
    可选的。
    默认情况下,`FixedSubscriberChannel`被创建。| |**7** |当订阅了多个相同的处理程序`DirectChannel`(用于负载平衡目的)时,此组件的顺序相同。
    可选。| |**8** |指定应该启动和停止端点的阶段,
    启动顺序从最低到最高,关闭顺序从最高到最低,
    默认情况下,该值为`Integer.MAX_VALUE`,这意味着这个容器开始得越晚,停止得越快。
    可选的。| -|**9** |当向`output-channel`发送回复`Message`时等待的超时间隔。
    默认情况下,发送块为一秒。
    只有在输出通道有一些“发送”限制的情况下才适用,例如,a`QueueChannel`具有已满的固定“容量”的
    在这种情况下,将抛出一个`MessageDeliveryException`。
    对于`send-timeout`实现,
    被忽略。
    对于`group-timeout(-expression)`,来自计划过期任务的`MessageDeliveryException`将导致该任务被重新安排。默认情况下,发送块为一秒。
    只有在输出通道有一些“发送”限制的情况下才适用,例如,a`QueueChannel`具有已满的固定“容量”的
    在这种情况下,将抛出一个`MessageDeliveryException`。
    对于`send-timeout`实现,
    被忽略。
    对于`group-timeout(-expression)`,来自计划过期任务的`MessageDeliveryException`将导致该任务被重新安排。默认情况下,它将无限期地等待。如果回复超时,将返回
    ’null。
    可选。
    它默认为`-1`,这意味着无限期地等待。| |**11**|指定 Scatter-Gather 是否必须返回一个非空值。
    该值默认为`true`。
    因此,当底层聚合器在`gather-timeout`之后返回一个空值时,将抛出一个
    。`null`注意,如果`null`是一种可能性,应该指定`gather-timeout`以避免无限期等待。| |**12**|将``选项.
    选项.
    与`scatter-channel`属性互斥。| diff --git a/docs/spring-integration/messaging-endpoints.md b/docs/spring-integration/messaging-endpoints.md index 310b0ca22e62aaaa101d2f7244df90bcc0310e4c..5d7b2803a41731d5186f3505e1a6740b859bd5c2 100644 --- a/docs/spring-integration/messaging-endpoints.md +++ b/docs/spring-integration/messaging-endpoints.md @@ -139,7 +139,7 @@ consumer.setTaskExecutor(taskExecutor); |**4** |固定延迟触发器在覆盖项下使用`PeriodicTrigger`。
    如果不使用`time-unit`属性,则指定的值以毫秒为单位表示。
    如果设置了此属性,则必须指定以下所有属性:`fixed-rate`、`trigger`、`cron`和`ref`。| |**5** |固定费率触发器在覆盖项下使用`PeriodicTrigger`。
    如果不使用`time-unit`属性,则指定的值以毫秒为单位表示。
    如果设置了该属性,则不需要指定以下属性:`fixed-delay`,`trigger`,`cron`,以及`ref`。| |**6** |引用 poller 的底层 Bean-定义的 ID 类型为`org.springframework.integration.scheduling.PollerMetadata`。
    对于顶级 poller 元素,`id`属性是必需的,除非它是默认的 poller(`default="true"`)。| -|**7** |有关更多信息,请参见[配置入站通道适配器](./channel-adapter.html#channel-adapter-namespace-inbound)。
    如果未指定,默认值取决于上下文。
    如果使用`PollingConsumer`,则此属性默认为`-1`。
    但是,如果使用`max-messages-per-poll`,则`max-messages-per-poll`属性默认为`1`。如果未指定,它的默认值为 1000(毫秒)。
    可选。| |**9** |Bean 引用另一个顶级 poller。
    `ref`属性不能出现在顶级`poller`元素上。
    但是,如果设置了此属性,则不能指定以下属性:`fixed-rate`,`cron`,以及`fixed-delay`。| |**10**|提供引用自定义任务执行器的能力。
    有关更多信息,请参见[TaskExecutor 支持](#taskexecutor-support)。
    可选。| @@ -1773,7 +1773,7 @@ compile "org.springframework.integration:spring-integration-groovy:5.5.9" 在 Spring Integration2.1 中,Groovy 支持的配置名称空间是 Spring Integration 的脚本支持的扩展,并共享[脚本支持](./scripting.html#scripting)部分中详细描述的核心配置和行为。尽管 Groovy 脚本很好地得到了通用脚本支持,但 Groovy 支持提供了`Groovy`配置名称空间,它由 Spring 框架的`org.springframework.scripting.groovy.GroovyScriptFactory`和相关组件支持,为使用 Groovy 提供了扩展功能。下面的清单显示了两个示例配置: -例 1。过滤器 +例1.过滤器 ``` diff --git a/docs/spring-integration/overview.md b/docs/spring-integration/overview.md index 29f19113dd76309553be5bd60a0d570ba8a80e05..3cff0606db9fd20bf6ecfecdcb24bfb4f213f6db 100644 --- a/docs/spring-integration/overview.md +++ b/docs/spring-integration/overview.md @@ -40,17 +40,17 @@ Spring 一体化遵循以下原则: 在 Spring 集成中,消息是任何 Java 对象的通用包装器,它与框架在处理该对象时使用的元数据相结合。它由有效载荷和报头组成。有效负载可以是任何类型的,并且头包含通常需要的信息,例如 ID、时间戳、相关 ID 和返回地址。头也用于将值传递给连接的传输程序和从连接的传输程序中传递。例如,当从接收到的文件创建消息时,文件名可以存储在要由下游组件访问的头中。同样,如果消息的内容最终将由出站邮件适配器发送,那么各个属性(To、From、CC、Subject 和其他属性)可能会被上游组件配置为消息头值。开发人员还可以在头中存储任意的键值对。 -![Message](images/message.jpg) +![Message](https://docs.spring.io/spring-integration/docs/current/reference/html/images/message.jpg) -图 1。信息 +图1.信息 #### 消息通道 消息通道表示管道和过滤器体系结构的“管道”。生产者将消息发送到一个通道,消费者从一个通道接收消息。因此,消息通道使消息传递组件解耦,并且还为截获和监视消息提供了一个方便的点。 -![消息通道](images/channel.jpg) +![消息通道](https://docs.spring.io/spring-integration/docs/current/reference/html/images/channel.jpg) -图 2。消息通道 +图2.消息通道 消息通道可以遵循点对点语义或发布订阅语义。对于点对点信道,发送到该信道的每条消息的接收者不能超过一个。另一方面,发布-订阅频道试图将每条消息广播给频道上的所有订阅者。 Spring 集成支持这两种模型。 @@ -79,9 +79,9 @@ Spring 集成的主要目标之一是通过控制反转来简化 Enterprise 集 消息路由器负责决定下一个应该接收消息的一个或多个通道(如果有的话)。通常,决策是基于消息的内容或消息头中可用的元数据。消息路由器通常被用作服务激活器或其他能够发送回复消息的端点上静态配置的输出通道的动态替代方案。同样,正如前面所描述的,消息路由器为多个订阅者使用的反应性消息过滤器提供了一种主动的替代方案。 -![Router](images/router.jpg) +![Router](https://docs.spring.io/spring-integration/docs/current/reference/html/images/router.jpg) -图 3。消息路由器 +图3.消息路由器 #### splitter @@ -102,9 +102,9 @@ Spring 集成的主要目标之一是通过控制反转来简化 Enterprise 集 请求-应答服务激活端点将目标对象的方法连接到输入和输出消息通道。 -![处理程序端点](images/handler-endpoint.jpg) +![处理程序端点](https://docs.spring.io/spring-integration/docs/current/reference/html/images/handler-endpoint.jpg) -图 4。服务激活器 +图4.服务激活器 | |正如前面所讨论的,在[消息通道](#overview-components-channel)中,通道可以是可校对的或可下标的。
    在前面的图中,这是由“时钟”符号和实心箭头(轮询)和虚线箭头(订阅)来描述的。| |---|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| @@ -113,16 +113,16 @@ Spring 集成的主要目标之一是通过控制反转来简化 Enterprise 集 通道适配器是将消息通道连接到其他系统或传输的端点。通道适配器可以是入站的,也可以是出站的。通常,通道适配器在消息和从另一个系统接收或发送到另一个系统的任何对象或资源(文件、HTTP 请求、JMS 消息等)之间进行一些映射。根据传输方式的不同,通道适配器还可以填充或提取消息头值。 Spring 集成提供了许多通道适配器,这在接下来的章节中进行了描述。 -![源端点](images/source-endpoint.jpg) +![源端点](https://docs.spring.io/spring-integration/docs/current/reference/html/images/source-endpoint.jpg) -图 5。入站通道适配器端点将源系统连接到`MessageChannel`。 +图5.入站通道适配器端点将源系统连接到`MessageChannel`。 | |消息源可以是可选的(例如,POP3)或消息驱动的(例如,IMAP IDLE)。
    在前面的图表中,这是由“时钟”符号、实心箭头和虚线箭头(消息驱动)来描述的。| |---|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -![目标端点](images/target-endpoint.jpg) +![目标端点](https://docs.spring.io/spring-integration/docs/current/reference/html/images/target-endpoint.jpg) -图 6。出站通道适配器端点将`MessageChannel`连接到目标系统。 +图6.出站通道适配器端点将`MessageChannel`连接到目标系统。 | |正如前面[消息通道](#overview-components-channel)中所讨论的,通道可以是可选的或可下标的。
    在前面的图表中,这是由“时钟”符号和实心箭头(轮询)和虚线箭头(订阅)来描述的。| |---|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| @@ -336,7 +336,7 @@ Spring 通过使用 Spring 框架的`SpringFactories`机制来加载几个`Integ 即使不使用 Spring boot,也可以使用 boot 提供的工具,通过为上述文件添加 Transformers 来增强 shade 插件。下面的示例展示了如何配置插件: -例 1。 POM.xml +例1. POM.xml ``` ... diff --git a/docs/spring-integration/samples.md b/docs/spring-integration/samples.md index 5eeec0a260ab55d00e2797bd4e44112f6eeb7b3c..8da7a6e4fde8cf363305a3dc76c6f14d64cb2c9f 100644 --- a/docs/spring-integration/samples.md +++ b/docs/spring-integration/samples.md @@ -81,9 +81,9 @@ GitHub 是用于社交编码的:如果你想向 Spring 集成示例项目提 下图显示了整个过程: -![贷款经纪人 EIP](images/loan-broker-eip.png) +![贷款经纪人 EIP](https://docs.spring.io/spring-integration/docs/current/reference/html/images/loan-broker-eip.png) -图 1。贷款经纪人样本 +图1.贷款经纪人样本 EIP 架构的核心是非常简单但功能强大的管道、过滤器概念,当然还有:消息。端点(过滤器)通过通道(管道)相互连接。产生端点将消息发送到通道,并且使用端点检索消息。该体系结构旨在定义各种机制,这些机制描述了端点之间如何交换信息,而不需要了解这些端点是什么或它们正在交换什么信息。因此,它提供了一个非常松散耦合和灵活的协作模型,同时还将集成关注点与业务关注点分离开来。EIP 通过进一步定义以下内容扩展了该体系结构: @@ -120,9 +120,9 @@ EIP 书的第 9 章很好地描述了这个用例的细节和变化,但这里 下图显示了消息传递网关的表示: -![gateway](images/gateway.jpg) +![gateway](https://docs.spring.io/spring-integration/docs/current/reference/html/images/gateway.jpg) -图 2。消息传递网关 +图2.消息传递网关 消息传递网关模式提供了一种访问消息传递系统(包括我们的贷款代理)的简单机制。在 Spring 集成中,可以将网关定义为普通的旧 Java 接口(不需要提供实现),使用 XML``元素或 Java 注释对其进行配置,并像使用其他任何方法一样使用它 Spring Bean。 Spring 集成通过生成消息(将有效负载映射到方法的输入参数)并将其发送到指定的信道,负责将方法调用委派和映射到消息传递基础设施。下面的示例展示了如何使用 XML 定义这样的网关: @@ -146,17 +146,17 @@ EIP 书的第 9 章很好地描述了这个用例的细节和变化,但这里 下图显示了链状图案: -![chain](images/chain.png) +![chain](https://docs.spring.io/spring-integration/docs/current/reference/html/images/chain.png) -图 3。链条 +图3.链条 前面的图片显示,我们有一个带有内部 header-enricher 元素的链,该元素通过`CREDIT_SCORE`header 和值(由对信用服务的调用决定)进一步丰富了消息的内容——一个简单的 POJO Spring Bean 由“CreditBureau”名称标识)。然后,它将委托给消息路由器。 下图显示了消息路由器模式: -![银行路由器](images/bank-router.jpg) +![银行路由器](https://docs.spring.io/spring-integration/docs/current/reference/html/images/bank-router.jpg) -图 4。消息路由器 +图4.消息路由器 Spring 集成提供了消息路由模式的几种实现方式。在这种情况下,我们使用一个路由器,该路由器根据表达式的求值来确定一个通道列表(在 Spring 表达式语言中)它查看信用评分(在上一步中确定),并根据信用评分的值从`Map` Bean 中选择具有`id`的`banks`的通道列表,其值为`premier`或`secondary`。一旦通道列表被选中,消息将被路由到这些通道。 @@ -164,9 +164,9 @@ Spring 集成提供了消息路由模式的几种实现方式。在这种情况 下图显示了消息聚合器模式: -![报价聚合器](images/quotes-aggregator.jpg) +![报价聚合器](https://docs.spring.io/spring-integration/docs/current/reference/html/images/quotes-aggregator.jpg) -图 5。消息聚合器 +图5.消息聚合器 聚合器模式描述将相关消息分组为单个消息的端点。可以提供标准和规则来确定聚合和相关策略。 Spring 集成提供了聚合器模式的几种实现方式以及方便的基于名称空间的配置。 @@ -196,9 +196,9 @@ Spring 集成提供了消息路由模式的几种实现方式。在这种情况 这个域是一个咖啡馆的域,下图描述了基本流程: -![cafe eip](images/cafe-eip.png) +![cafe eip](https://docs.spring.io/spring-integration/docs/current/reference/html/images/cafe-eip.png) -图 6。咖啡厅样本 +图6.咖啡厅样本 `Order`对象可以包含多个`OrderItems`。一旦下了订单,拆分器就会将复合订单消息分解为针对每种饮料的单个消息。然后,每一种都由路由器处理,该路由器确定饮料是热的还是冷的(通过检查`OrderItem`对象的“isiced”属性)。`Barista`为每一种饮料做准备,但冷热饮料的准备工作有两种不同的方法:“准备好的饮料”和“准备好的冷饮”。然后将准备好的饮料发送到`Waiter`,在那里它们被聚集到`Delivery`对象中。 diff --git a/docs/spring-integration/sftp.md b/docs/spring-integration/sftp.md index 8474db469b56425cd5ba7f1abe788dcb38ef5daa..687245d3f6e81169a049175b220b8e833168995c 100644 --- a/docs/spring-integration/sftp.md +++ b/docs/spring-integration/sftp.md @@ -1058,7 +1058,7 @@ root/ |- zoo.txt ``` -如果异常发生在`file3.txt`上,则网关抛出的`PartialSuccessException`具有`derivedInput`of`file1.txt`,`subdir`,以及`zoo.txt`和`partialResults`of`file1.txt`。它的`cause`是另一个`PartialSuccessException`与`derivedInput`之`file2.txt`和`file3.txt`之`partialResults`之`file2.txt`之 @@ -281,7 +281,7 @@ Spring 集成通过以下方式提供对消息存储模式的支持: ``` -例 2。聚合器 +例2.聚合器 ``` @@ -297,7 +297,7 @@ Spring 集成通过以下方式提供对消息存储模式的支持: * [Gemfire 消息存储](./gemfire.html#gemfire-message-store):使用 Gemfire 分布式缓存存储消息 -| |但是,在使用`MessageStore`的持久实现时要注意一些限制。

    消息数据(有效载荷和报头)通过使用不同的序列化策略进行序列化和反序列化,这取决于`MessageStore`的实现方式。
    例如,当使用`JdbcMessageStore`时,默认情况下,只有`Serializable`数据是持久的。
    在这种情况下,在进行序列化之前,要删除不可序列化的标头。
    此外,要注意传输适配器(例如 FTP、HTTP、JMS 和其他)注入的特定于协议的标头。
    例如,``将 HTTP 标头映射到消息标头,其中之一是一个`ArrayList`的不可序列化`org.springframework.http.MediaType`实例。你可以将你自己的`Serializer`和`Deserializer`策略接口的实现注入到一些`MessageStore`实现中(例如`JdbcMessageStore`),以改变序列化和反序列化的行为。

    特别注意表示某些类型数据的头。
    例如,如果其中一个头包含某个 Spring Bean 的实例,那么在反序列化时,你可能会得到另一个 Bean 的实例,这将直接影响框架创建的一些隐式头(例如`REPLY_CHANNEL`或`ERROR_CHANNEL`)。,
    目前,它们不能序列化,但是,即使它们是,反序列化的通道也不会表示预期的实例。

    从 Spring Integration3.0 开始,你可以使用配置为在用`HeaderChannelRegistry`注册通道后用名称替换这些头的 header 来解决此问题,

    同样,考虑一下,当你按照以下方式配置消息流时会发生什么:网关队列通道(由持久性消息存储支持)服务激活器,
    该网关创建了一个临时回复通道,当服务激活器的 poller 从队列中读取消息时,该通道会丢失。,再次,
    有关更多信息,请参见[页眉 Enricher](./content-enrichment.html#header-enricher)。| +| |但是,在使用`MessageStore`的持久实现时要注意一些限制。

    消息数据(有效载荷和报头)通过使用不同的序列化策略进行序列化和反序列化,这取决于`MessageStore`的实现方式。
    例如,当使用`JdbcMessageStore`时,默认情况下,只有`Serializable`数据是持久的。
    在这种情况下,在进行序列化之前,要删除不可序列化的标头。
    此外,要注意传输适配器(例如 FTP、HTTP、JMS 和其他)注入的特定于协议的标头。
    例如,``将 HTTP 标头映射到消息标头,其中之一是一个`ArrayList`的不可序列化`org.springframework.http.MediaType`实例。你可以将你自己的`Serializer`和`Deserializer`策略接口的实现注入到一些`MessageStore`实现中(例如`JdbcMessageStore`),以改变序列化和反序列化的行为。

    特别注意表示某些类型数据的头。
    例如,如果其中一个头包含某个 Spring Bean 的实例,那么在反序列化时,你可能会得到另一个 Bean 的实例,这将直接影响框架创建的一些隐式头(例如`REPLY_CHANNEL`或`ERROR_CHANNEL`)。,
    目前,它们不能序列化,但是,即使它们是,反序列化的通道也不会表示预期的实例。

    从 Spring Integration3.0 开始,你可以使用配置为在用`HeaderChannelRegistry`注册通道后用名称替换这些头的 header 来解决此问题,

    同样,考虑一下,当你按照以下方式配置消息流时会发生什么:网关队列通道(由持久性消息存储支持)服务激活器,
    该网关创建了一个临时回复通道,当服务激活器的 poller 从队列中读取消息时,该通道会丢失。,再次,
    有关更多信息,请参见[页眉 Enricher](./content-enrichment.html#header-enricher)。| |---|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| Spring Integration4.0 引入了两个新的接口: diff --git a/package.json b/package.json index 2c92b3d8da27247ff5e92b5124d5b2764055f2f9..e8f3d8e94560819ad7b43310b889f3b0b4e62411 100644 --- a/package.json +++ b/package.json @@ -1,7 +1,9 @@ { "scripts": { - "docs:dev": "vuepress dev docs --no-cache", - "docs:build": "vuepress build docs && cp ByteDanceVerify.html shenma-site-verification.txt sogousiteverification.txt google73113bc465d06f6b.html docs/.vuepress/dist" + "docs:dev": "yarn readme:copy && vuepress dev docs --no-cache", + "docs:build": "yarn readme:copy && vuepress build docs && yarn seo:copy", + "seo:copy": "cp ByteDanceVerify.html shenma-site-verification.txt sogousiteverification.txt google73113bc465d06f6b.html docs/.vuepress/dist", + "readme:copy": "cp -r README.md readme docs/ && cp -r README.md readme docs/en/" }, "devDependencies": { "vuepress": "^1.9.7",